more work on the simple inductive section
authorChristian Urban <urbanc@in.tum.de>
Thu, 26 Mar 2009 19:00:51 +0000
changeset 210 db8e302f44c8
parent 209 17b1512f51af
child 211 d5accbc67e1b
more work on the simple inductive section
ProgTutorial/Base.thy
ProgTutorial/FirstSteps.thy
ProgTutorial/Package/Ind_Code.thy
ProgTutorial/Package/Ind_General_Scheme.thy
ProgTutorial/Tactical.thy
ProgTutorial/document/root.tex
progtutorial.pdf
--- a/ProgTutorial/Base.thy	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/Base.thy	Thu Mar 26 19:00:51 2009 +0000
@@ -8,11 +8,28 @@
 (* to have a special tag for text enclosed in ML *)
 ML {*
 
+fun inherit_env (context as Context.Proof lthy) =
+      Context.Proof (LocalTheory.map_contexts (ML_Context.inherit_env context) lthy)
+  | inherit_env context = context;
+
+fun inherit_env_prf prf = Proof.map_contexts
+  (Context.proof_map (ML_Context.inherit_env (Context.Proof (Proof.context_of prf)))) prf
+
 val _ =
   OuterSyntax.command "ML" "eval ML text within theory"
     (OuterKeyword.tag "TutorialML" OuterKeyword.thy_decl)
     (OuterParse.ML_source >> (fn (txt, pos) =>
-      Toplevel.generic_theory (ML_Context.exec (fn () => ML_Context.eval true pos txt))));
+      Toplevel.generic_theory
+        (ML_Context.exec (fn () => ML_Context.eval true pos txt) #> inherit_env)));
+
+
+val _ =
+  OuterSyntax.command "ML_prf" "ML text within proof" 
+    (OuterKeyword.tag "TutorialML" OuterKeyword.prf_decl)
+    (OuterParse.ML_source >> (fn (txt, pos) =>
+      Toplevel.proof (Proof.map_context (Context.proof_map
+        (ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> inherit_env_prf)))
+
 *}
 (*
 ML {*
--- a/ProgTutorial/FirstSteps.thy	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/FirstSteps.thy	Thu Mar 26 19:00:51 2009 +0000
@@ -1558,6 +1558,10 @@
 section {* Pretty-Printing (TBD) *}
 
 text {*
+  Isabelle has a pretty sphisticated pretty printing module. 
+*}
+
+text {*
   @{ML Pretty.big_list},
   @{ML Pretty.brk},
   @{ML Pretty.block},
--- a/ProgTutorial/Package/Ind_Code.thy	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/Package/Ind_Code.thy	Thu Mar 26 19:00:51 2009 +0000
@@ -1,104 +1,8 @@
 theory Ind_Code
-imports "../Base" "../FirstSteps" Simple_Inductive_Package Ind_Prelims
+imports "../Base" "../FirstSteps" Ind_General_Scheme 
 begin
 
-datatype trm =
-  Var "string"
-| App "trm" "trm"
-| Lam "string" "trm"
-
-simple_inductive 
-  fresh :: "string \<Rightarrow> trm \<Rightarrow> bool" ("_ \<sharp> _" [100,100] 100)
-where
-  "a\<noteq>b \<Longrightarrow> a\<sharp>Var b"
-| "\<lbrakk>a\<sharp>t; a\<sharp>s\<rbrakk> \<Longrightarrow> a\<sharp>App t s"
-| "a\<sharp>Lam a t"
-| "\<lbrakk>a\<noteq>b; a\<sharp>t\<rbrakk> \<Longrightarrow> a\<sharp>Lam b t"
-
-section {* Code *}
-
-text {*
-  
-  @{text [display] "rule ::= \<And>xs. As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
-
-  @{text [display] "orule ::= \<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>* \<longrightarrow> pred ts"}
-
-  @{text [display] "def ::= pred \<equiv> \<lambda>zs. \<forall>preds. orules \<longrightarrow> pred zs"}
-  
-  @{text [display] "ind ::= \<And>zs. pred zs \<Longrightarrow> rules[preds::=Ps] \<Longrightarrow> P zs"}
-
-  @{text [display] "oind ::= \<forall>zs. pred zs \<longrightarrow> orules[preds::=Ps] \<longrightarrow> P zs"}
-
-  \underline{Induction proof}
-  
-  After ``objectivication'' we have 
-   @{text "pred zs"} and @{text "orules[preds::=Ps]"}; and have to show
-  @{text "P zs"}. Expanding @{text "pred zs"} gives @{text "\<forall>preds. orules \<longrightarrow> pred zs"}.
-  Instantiating the @{text "preds"} with @{text "Ps"} gives
-  @{text "orules[preds::=Ps] \<longrightarrow> P zs"}. So we can conclude with @{text "P zs"}.
-
-  \underline{Intro proof}
-
-  Assume we want to prove the $i$th intro rule. 
-
-  We have to show @{text "\<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>* \<longrightarrow> pred ts"};
-  expanding the defs, gives 
-  
-  @{text [display]
-  "\<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> (\<forall>preds. orules \<longrightarrow> pred ss))\<^isup>* \<longrightarrow>  (\<forall>preds. orules \<longrightarrow> pred ts"}
-  
-  By applying as many allI and impI as possible, we have
-  
-  @{text "As"}, @{text "(\<forall>ys. Bs \<longrightarrow> (\<forall>preds. orules \<longrightarrow> pred ss))\<^isup>*"},
-  @{text "orules"}; and have to show @{text "pred ts"}
-
-  the $i$th @{text "orule"} is of the 
-  form @{text "\<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>* \<longrightarrow> pred ts"}.
-  
-  So we apply the $i$th @{text "orule"}, but we have to show the @{text "As"} (by assumption)
-  and all @{text "(\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>*"}. For the latter we use the assumptions
-  @{text "(\<forall>ys. Bs \<longrightarrow> (\<forall>preds. orules \<longrightarrow> pred ss))\<^isup>*"} and @{text "orules"}.
-
-
-  \begin{center}
-  ****************************
-  \end{center}
-*}
-
-
-text {*
-  For building testcases let us give some shorthands for the definitions of @{text "even/odd"} and
-  @{text "fresh"}. (FIXME put in a figure)
-*}
-  
-ML{*val eo_defs = [@{thm even_def}, @{thm odd_def}]
-val eo_rules =  
-  [@{prop "even 0"},
-   @{prop "\<And>n. odd n \<Longrightarrow> even (Suc n)"},
-   @{prop "\<And>n. even n \<Longrightarrow> odd (Suc n)"}]
-val eo_orules =  
-  [@{prop "even 0"},
-   @{prop "\<forall>n. odd n \<longrightarrow> even (Suc n)"},
-   @{prop "\<forall>n. even n \<longrightarrow> odd (Suc n)"}]
-val eo_preds =  [@{term "even::nat\<Rightarrow>bool"}, @{term "odd::nat\<Rightarrow>bool"}] 
-val eo_prednames = [@{binding "even"}, @{binding "odd"}]
-val eo_syns = [NoSyn, NoSyn] 
-val eo_arg_tyss = [[@{typ "nat"}], [@{typ "nat"}]] *}
-
-
-ML{*val fresh_defs = [@{thm fresh_def}]
-val fresh_rules =  
-  [@{prop "\<And>a b. a\<noteq>b \<Longrightarrow> a\<sharp>Var b"},
-   @{prop "\<And>a s t. a\<sharp>t \<Longrightarrow> a\<sharp>s \<Longrightarrow> a\<sharp>App t s"},
-   @{prop "\<And>a t. a\<sharp>Lam a t"},
-   @{prop "\<And>a b t. a\<noteq>b \<Longrightarrow> a\<sharp>t \<Longrightarrow> a\<sharp>Lam b t"}]
-val fresh_orules =  
-  [@{prop "\<forall>a b. a\<noteq>b \<longrightarrow> a\<sharp>Var b"},
-   @{prop "\<forall>a s t. a\<sharp>t \<longrightarrow> a\<sharp>s \<longrightarrow> a\<sharp>App t s"},
-   @{prop "\<forall>a t. a\<sharp>Lam a t"},
-   @{prop "\<forall>a b t. a\<noteq>b \<longrightarrow> a\<sharp>t \<longrightarrow> a\<sharp>Lam b t"}]
-val fresh_preds =  [@{term "fresh::string\<Rightarrow>trm\<Rightarrow>bool"}] *}
-
+section {* The Gory Details *} 
 
 subsection {* Definitions *}
 
@@ -113,7 +17,7 @@
   annotation and a term representing the right-hand side of the definition.
 *}
 
-ML %linenosgray{*fun make_defs ((predname, syn), trm) lthy =
+ML %linenosgray{*fun make_defn ((predname, syn), trm) lthy =
 let 
   val arg = ((predname, syn), (Attrib.empty_binding, trm))
   val ((_, (_ , thm)), lthy') = LocalTheory.define Thm.internalK arg lthy
@@ -124,23 +28,24 @@
 text {*
   It returns the definition (as a theorem) and the local theory in which this definition has 
   been made. In Line 4, @{ML internalK in Thm} is a flag attached to the 
-  theorem (others possibilities are the flags @{ML definitionK in Thm} and @{ML axiomK in Thm}). 
+  theorem (others possibile flags are @{ML definitionK in Thm} and @{ML axiomK in Thm}). 
   These flags just classify theorems and have no significant meaning, except 
   for tools that, for example, find theorems in the theorem database. We also
-  use @{ML empty_binding in Attrib} in Line 3, since the definition does 
-  not need to have any theorem attributes. A testcase for this function is
+  use @{ML empty_binding in Attrib} in Line 3, since for our inductive predicates 
+  the definitions do not need to have any theorem attributes. A testcase for this 
+  function is
 *}
 
 local_setup %gray {* fn lthy =>
 let
   val arg = ((@{binding "MyTrue"}, NoSyn), @{term True})
-  val (def, lthy') = make_defs arg lthy 
+  val (def, lthy') = make_defn arg lthy 
 in
   warning (str_of_thm_no_vars lthy' def); lthy'
 end *}
 
 text {*
-  which makes the definition @{prop "MyTrue \<equiv> True"} and then prints it out. 
+  which introduces the definition @{prop "MyTrue \<equiv> True"} and then prints it out. 
   Since we are testing the function inside \isacommand{local\_setup}, i.e., make
   changes to the ambient theory, we can query the definition with the usual
   command \isacommand{thm}:
@@ -160,13 +65,13 @@
   "preds"}.
 
 
-  The first function constructs the term for one particular predicate, say
-  @{text "pred"}. The number of arguments of this predicate is
+  The first function constructs the term for one particular predicate (the
+  argument @{text "pred"} in the code belo). The number of arguments of this predicate is
   determined by the number of argument types given in @{text "arg_tys"}. 
   The other arguments are all the @{text "preds"} and the @{text "orules"}.
 *}
 
-ML %linenosgray{*fun defs_aux lthy orules preds (pred, arg_tys) =
+ML %linenosgray{*fun defn_aux lthy orules preds (pred, arg_tys) =
 let 
   fun mk_all x P = HOLogic.all_const (fastype_of x) $ lambda x P
 
@@ -202,34 +107,52 @@
 let
   val pred = @{term "even::nat\<Rightarrow>bool"}
   val arg_tys = [@{typ "nat"}]
-  val def = defs_aux lthy eo_orules eo_preds (pred, arg_tys)
+  val def = defn_aux lthy eo_orules eo_preds (pred, arg_tys)
 in
   warning (Syntax.string_of_term lthy def); lthy
 end *}
 
 text {*
-  The testcase  calls @{ML defs_aux} for the predicate @{text "even"} and prints
+  The testcase  calls @{ML defn_aux} for the predicate @{text "even"} and prints
   out the generated definition. So we obtain as printout 
 
   @{text [display] 
 "\<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n)) 
                          \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> even z"}
 
+  If we try out the function for the definition of freshness
+*}
+
+local_setup %gray{* fn lthy =>
+ (warning (Syntax.string_of_term lthy
+    (defn_aux lthy fresh_orules [fresh_pred] (fresh_pred, fresh_arg_tys)));
+  lthy) *}
+
+text {*
+  we obtain
+
+  @{term [display] 
+"\<lambda>z za. \<forall>fresh. (\<forall>a b. \<not> a = b \<longrightarrow> fresh a (Var b)) \<longrightarrow>
+               (\<forall>a s t. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)) \<longrightarrow>
+                (\<forall>a t. fresh a (Lam a t)) \<longrightarrow>
+                (\<forall>a b t. \<not> a = b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)) \<longrightarrow> fresh z za"}
+
+
   The second function for the definitions has to just iterate the function
-  @{ML defs_aux} over all predicates. The argument @{text "preds"} is again
+  @{ML defn_aux} over all predicates. The argument @{text "preds"} is again
   the the list of predicates as @{ML_type term}s; the argument @{text
   "prednames"} is the list of names of the predicates; @{text syns} are the
   syntax annotations for each predicate; @{text "arg_tyss"} is
   the list of argument-type-lists for each predicate.
 *}
 
-ML %linenosgray{*fun definitions rules preds prednames syns arg_typss lthy =
+ML %linenosgray{*fun defns rules preds prednames syns arg_typss lthy =
 let
   val thy = ProofContext.theory_of lthy
   val orules = map (ObjectLogic.atomize_term thy) rules
-  val defs = map (defs_aux lthy orules preds) (preds ~~ arg_typss) 
+  val defs = map (defn_aux lthy orules preds) (preds ~~ arg_typss) 
 in
-  fold_map make_defs (prednames ~~ syns ~~ defs) lthy
+  fold_map make_defn (prednames ~~ syns ~~ defs) lthy
 end*}
 
 text {*
@@ -239,7 +162,7 @@
   meta-connectives). To do this transformation we have to obtain the theory
   behind the local theory (Line 3); with this theory we can use the function
   @{ML ObjectLogic.atomize_term} to make the transformation (Line 4). The call
-  to @{ML defs_aux} in Line 5 produces all right-hand sides of the
+  to @{ML defn_aux} in Line 5 produces all right-hand sides of the
   definitions. The actual definitions are then made in Line 7.  The result
   of the function is a list of theorems and a local theory. A testcase for 
   this function is 
@@ -248,7 +171,7 @@
 local_setup %gray {* fn lthy =>
 let
   val (defs, lthy') = 
-    definitions eo_rules eo_preds eo_prednames eo_syns eo_arg_tyss lthy
+    defns eo_rules eo_preds eo_prednames eo_syns eo_arg_tyss lthy
 in
   warning (str_of_thms_no_vars lthy' defs); lthy
 end *}
@@ -258,32 +181,30 @@
   the @{text even}-@{text odd} example. The definitions we obtain
   are:
 
-  \begin{isabelle}
-  @{text [break]
-"> even \<equiv> \<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n)) 
->                                 \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> even z,
-> odd \<equiv> \<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n)) 
->                                \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> odd z"}
-  \end{isabelle}
+  @{text [display, break]
+"even \<equiv> \<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n))  
+                                \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> even z,
+odd \<equiv> \<lambda>z. \<forall>even odd. (even 0) \<longrightarrow> (\<forall>n. odd n \<longrightarrow> even (Suc n)) 
+                               \<longrightarrow> (\<forall>n. even n \<longrightarrow> odd (Suc n)) \<longrightarrow> odd z"}
 
   Note that in the testcase we return the local theory @{text lthy} 
   (not the modified @{text lthy'}). As a result the test case has no effect
-  on the ambient theory. The reason is that if we make again the
-  definition, we pollute the name space with two versions of @{text "even"} 
+  on the ambient theory. The reason is that if we introduce the
+  definition again, we pollute the name space with two versions of @{text "even"} 
   and @{text "odd"}.
 
-  This completes the code for making the definitions. Next we deal with
+  This completes the code for introducing the definitions. Next we deal with
   the induction principles. 
 *}
 
-subsection {* Introduction Rules *}
+subsection {* Induction Principles *}
 
 text {*
   Recall that the proof of the induction principle 
   for @{text "even"} was:
 *}
 
-lemma manual_ind_prin: 
+lemma manual_ind_prin_even: 
 assumes prem: "even z"
 shows "P 0 \<Longrightarrow> (\<And>m. Q m \<Longrightarrow> P (Suc m)) \<Longrightarrow> (\<And>m. P m \<Longrightarrow> Q (Suc m)) \<Longrightarrow> P z"
 apply(atomize (full))
@@ -338,7 +259,7 @@
   be implemented as follows:
 *}
 
-ML %linenosgray{*fun induction_tac defs prem insts =
+ML %linenosgray{*fun ind_tac defs prem insts =
   EVERY1 [ObjectLogic.full_atomize_tac,
           cut_facts_tac prem,
           K (rewrite_goals_tac defs),
@@ -346,42 +267,47 @@
           assume_tac]*}
 
 text {*
-  We have to give it as arguments the definitions, the premise 
-  (for example @{text "even n"}) and the instantiations. Compare this with the 
-  manual proof given for the lemma @{thm [source] manual_ind_prin}: 
-  as you can see there is almost a one-to-one correspondence between the \isacommand{apply}-script 
-  and the @{ML induction_tac}. A testcase for this tactic is the function
-*}
-
-ML{*fun test_tac prem = 
-let
-  val insts = [@{cterm "P::nat\<Rightarrow>bool"}, @{cterm "Q::nat\<Rightarrow>bool"}]
-in 
-  induction_tac eo_defs prem insts 
-end*}
-
-text {*
-  which indeed proves the induction principle: 
+  We have to give it as arguments the definitions, the premise (this premise
+  is @{text "even n"} in lemma @{thm [source] manual_ind_prin_even}) and the
+  instantiations. Compare this tactic with the manual for the lemma @{thm
+  [source] manual_ind_prin_even}: as you can see there is almost a one-to-one
+  correspondence between the \isacommand{apply}-script and the @{ML
+  ind_tac}. Two testcases for this tactic are:
 *}
 
-lemma automatic_ind_prin:
+lemma automatic_ind_prin_even:
 assumes prem: "even z"
 shows "P 0 \<Longrightarrow> (\<And>m. Q m \<Longrightarrow> P (Suc m)) \<Longrightarrow> (\<And>m. P m \<Longrightarrow> Q (Suc m)) \<Longrightarrow> P z"
-apply(tactic {* test_tac @{thms prem} *})
-done
+by (tactic {* ind_tac eo_defs @{thms prem} 
+                    [@{cterm "P::nat\<Rightarrow>bool"}, @{cterm "Q::nat\<Rightarrow>bool"}] *})
+
+lemma automatic_ind_prin_fresh:
+assumes prem: "fresh z za" 
+shows "(\<And>a b. a \<noteq> b \<Longrightarrow> P a (Var b)) \<Longrightarrow> 
+        (\<And>a t s. \<lbrakk>P a t; P a s\<rbrakk> \<Longrightarrow> P a (App t s)) \<Longrightarrow>
+        (\<And>a t. P a (Lam a t)) \<Longrightarrow> 
+        (\<And>a b t. \<lbrakk>a \<noteq> b; P a t\<rbrakk> \<Longrightarrow> P a (Lam b t)) \<Longrightarrow> P z za"
+by (tactic {* ind_tac @{thms fresh_def} @{thms prem} 
+                    [@{cterm "P::string\<Rightarrow>trm\<Rightarrow>bool"}] *})
+
 
 text {*
-  This gives the theorem:
+  Let us have a closer look at the first proved theorem:
 
   \begin{isabelle}
-  \isacommand{thm}~@{thm [source] automatic_ind_prin}\\
-  @{text "> "}~@{thm automatic_ind_prin}
+  \isacommand{thm}~@{thm [source] automatic_ind_prin_even}\\
+  @{text "> "}~@{thm automatic_ind_prin_even}
   \end{isabelle}
 
-  While the tactic for the induction principle is relatively simple, 
-  it is a bit harder to construct the goals from the introduction 
-  rules the user provides. In general we have to construct for each predicate 
-  @{text "pred"} a goal of the form
+  The variables @{text "z"}, @{text "P"} and @{text "Q"} are schematic
+  variables (since they are not quantified in the lemma). These schematic
+  variables are needed so that they can be instantiated by the user later
+  on. We have to take care to also generate these schematic variables when
+  generating the goals for the induction principles. While the tactic for
+  proving the induction principles is relatively simple, it will be a bit
+  of work to construct the goals from the introduction rules the user
+  provides. In general we have to construct for each predicate @{text "pred"}
+  a goal of the form
 
   @{text [display] 
   "pred ?zs \<Longrightarrow> rules[preds := ?Ps] \<Longrightarrow> ?P ?zs"}
@@ -397,12 +323,12 @@
   introduction rules are already appropriately substituted. The argument
   @{text "srules"} stands for these substituted rules; @{text cnewpreds} are
   the certified terms coresponding to the variables @{text "?Ps"}; @{text
-  "pred"} is the predicate for which we prove the introduction principle;
+  "pred"} is the predicate for which we prove the induction principle;
   @{text "newpred"} is its replacement and @{text "arg_tys"} are the argument
   types of this predicate.
 *}
 
-ML %linenosgray{*fun prove_induction lthy defs srules cnewpreds ((pred, newpred), arg_tys) =
+ML %linenosgray{*fun prove_ind lthy defs srules cnewpreds ((pred, newpred), arg_tys) =
 let
   val zs = replicate (length arg_tys) "z"
   val (newargnames, lthy') = Variable.variant_fixes zs lthy;
@@ -413,15 +339,16 @@
          (srules, HOLogic.mk_Trueprop (list_comb (newpred, newargs)))
 in
   Goal.prove lthy' [] [prem] goal
-  (fn {prems, ...} => induction_tac defs prems cnewpreds)
+      (fn {prems, ...} => ind_tac defs prems cnewpreds)
   |> singleton (ProofContext.export lthy' lthy)
 end *}
 
 text {* 
   In Line 3 we produce names @{text "zs"} for each type in the 
   argument type list. Line 4 makes these names unique and declares them as 
-  \emph{free} (but fixed) variables in the local theory @{text "lthy'"}. In 
-  Line 5 we construct the terms corresponding to these variables. 
+  \emph{free} (but fixed) variables in the local theory @{text "lthy'"}. 
+  That means they are not (yet) schematic variables.
+  In Line 5 we construct the terms corresponding to these variables. 
   The variables are applied to the predicate in Line 7 (this corresponds
   to the first premise @{text "pred zs"} of the induction principle). 
   In Line 8 and 9, we first construct the term  @{text "P zs"} 
@@ -437,43 +364,40 @@
   lines will return a theorem. However, it is a theorem
   proved inside the local theory @{text "lthy'"}, where the variables @{text
   "zs"} are fixed, but free (see Line 4). By exporting this theorem from @{text
-  "lthy'"} (which contains the @{text "zs"} as free) to @{text
+  "lthy'"} (which contains the @{text "zs"} as free variables) to @{text
   "lthy"} (which does not), we obtain the desired schematic variables @{text "?zs"}.
   A testcase for this function is
 *}
 
 local_setup %gray{* fn lthy =>
 let
-  val srules = [@{prop "P (0::nat)"},
-                @{prop "\<And>n::nat. Q n \<Longrightarrow> P (Suc n)"},
-                @{prop "\<And>n::nat. P n \<Longrightarrow> Q (Suc n)"}] 
+  val newpreds = [@{term "P::nat\<Rightarrow>bool"}, @{term "Q::nat\<Rightarrow>bool"}]
   val cnewpreds = [@{cterm "P::nat\<Rightarrow>bool"}, @{cterm "Q::nat\<Rightarrow>bool"}]
-  val pred = @{term "even::nat\<Rightarrow>bool"}
+  val srules =  map (subst_free (eo_preds ~~ newpreds)) eo_rules
   val newpred = @{term "P::nat\<Rightarrow>bool"}
-  val arg_tys = [@{typ "nat"}]
   val intro = 
-    prove_induction lthy eo_defs srules cnewpreds ((pred, newpred), arg_tys)
+       prove_ind lthy eo_defs srules cnewpreds ((e_pred, newpred), e_arg_tys)
 in
   warning (str_of_thm lthy intro); lthy
-end *} 
+end *}
 
 text {*
-  This prints out:
+  This prints out the theorem:
 
   @{text [display]
   " \<lbrakk>even ?z; P 0; \<And>n. Q n \<Longrightarrow> P (Suc n); \<And>n. P n \<Longrightarrow> Q (Suc n)\<rbrakk> \<Longrightarrow> P ?z"}
 
-  Note that the export from @{text lthy'} to @{text lthy} in Line 13 above 
-  has turned the free, but fixed, @{text "z"} into a schematic 
+  The export from @{text lthy'} to @{text lthy} in Line 13 above 
+  has correctly turned the free, but fixed, @{text "z"} into a schematic 
   variable @{text "?z"}; the variables @{text "P"} and @{text "Q"} are not yet
   schematic. 
 
   We still have to produce the new predicates with which the introduction
-  rules are substituted and iterate @{ML prove_induction} over all
+  rules are substituted and iterate @{ML prove_ind} over all
   predicates. This is what the second function does: 
 *}
 
-ML %linenosgray{*fun inductions rules defs preds arg_tyss lthy  =
+ML %linenosgray{*fun inds rules defs preds arg_tyss lthy  =
 let
   val Ps = replicate (length preds) "P"
   val (newprednames, lthy') = Variable.variant_fixes Ps lthy
@@ -486,7 +410,7 @@
   val srules = map (subst_free (preds ~~ newpreds)) rules
 
 in
-  map (prove_induction lthy' defs srules cnewpreds) 
+  map (prove_ind lthy' defs srules cnewpreds) 
         (preds ~~ newpreds ~~ arg_tyss)
           |> ProofContext.export lthy' lthy
 end*}
@@ -511,7 +435,7 @@
 
 local_setup %gray {* fn lthy =>
 let 
-  val ind_thms = inductions eo_rules eo_defs eo_preds eo_arg_tyss lthy
+  val ind_thms = inds eo_rules eo_defs eo_preds eo_arg_tyss lthy
 in
   warning (str_of_thms lthy ind_thms); lthy
 end *}
@@ -521,23 +445,25 @@
   which prints out
 
 @{text [display]
-"> even ?z \<Longrightarrow> ?P1 0 \<Longrightarrow> 
-> (\<And>m. ?Pa1 m \<Longrightarrow> ?P1 (Suc m)) \<Longrightarrow> (\<And>m. ?P1 m \<Longrightarrow> ?Pa1 (Suc m)) \<Longrightarrow> ?P1 ?z,
-> odd ?z \<Longrightarrow> ?P1 0 \<Longrightarrow>
-> (\<And>m. ?Pa1 m \<Longrightarrow> ?P1 (Suc m)) \<Longrightarrow> (\<And>m. ?P1 m \<Longrightarrow> ?Pa1 (Suc m)) \<Longrightarrow> ?Pa1 ?z"}
+"even ?z \<Longrightarrow> ?P1 0 \<Longrightarrow> 
+ (\<And>m. ?Pa1 m \<Longrightarrow> ?P1 (Suc m)) \<Longrightarrow> (\<And>m. ?P1 m \<Longrightarrow> ?Pa1 (Suc m)) \<Longrightarrow> ?P1 ?z,
+odd ?z \<Longrightarrow> ?P1 0 \<Longrightarrow>
+ (\<And>m. ?Pa1 m \<Longrightarrow> ?P1 (Suc m)) \<Longrightarrow> (\<And>m. ?P1 m \<Longrightarrow> ?Pa1 (Suc m)) \<Longrightarrow> ?Pa1 ?z"}
 
   Note that now both, the @{text "?Ps"} and the @{text "?zs"}, are schematic
-  variables. The numbers have been introduced by the pretty-printer and are 
-  not significant.
+  variables. The numbers attached to these variables have been introduced by 
+  the pretty-printer and are \emph{not} important for the user. 
 
-  This completes the code for the induction principles.  
+  This completes the code for the induction principles. The final peice
+  of reasoning infrastructure we need are the introduction rules. 
 *}
 
 subsection {* Introduction Rules *}
 
 text {*
-  Finally we can prove the introduction rules. Their proofs are quite a bit
-  more involved. To ease these proofs somewhat we use the following two helper
+  The proofs of the introduction rules are quite a bit
+  more involved than the ones for the induction principles. 
+  To ease somewhat our work here, we use the following two helper
   functions.
 
 *}
@@ -555,17 +481,15 @@
   shows "\<forall>x y z. P x y z" sorry
 
 lemma imp_elims_test:
-  fixes A B C::"bool"
   shows "A \<longrightarrow> B \<longrightarrow> C" sorry
 
 lemma imp_elims_test':
-  fixes A::"bool"
   shows "A" "B" sorry
 
 text {*
   The function @{ML all_elims} takes a list of (certified) terms and instantiates
   theorems of the form @{thm [source] all_elims_test}. For example we can instantiate
-  the quantifiers in this theorem with @{term a}, @{term b} and @{term c} as follows
+  the quantifiers in this theorem with @{term a}, @{term b} and @{term c} as follows:
 
   @{ML_response_fake [display, gray]
 "let
@@ -577,98 +501,338 @@
   "P a b c"}
 
   Similarly, the function @{ML imp_elims} eliminates preconditions from implications. 
-  For example: 
+  For example we can eliminate the preconditions @{text "A"} and @{text "B"} from
+  @{thm [source] imp_elims_test}:
 
   @{ML_response_fake [display, gray]
 "warning (str_of_thm_no_vars @{context} 
             (imp_elims @{thms imp_elims_test'} @{thm imp_elims_test}))"
   "C"}
 
-  We now look closely at the proof for the introduction rule
+  To explain the proof for the introduction rule, our running example will be
+  the rule:
 
   \begin{isabelle}
-  @{term "\<lbrakk>a\<sharp>t; a\<sharp>s\<rbrakk> \<Longrightarrow> a\<sharp>App t s"}
+  @{prop "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"}
   \end{isabelle}
   
+  for freshness of applications. We set up the proof as follows:
 *}
 
-
 lemma fresh_App:
-  shows "\<lbrakk>a\<sharp>t; a\<sharp>s\<rbrakk> \<Longrightarrow> a\<sharp>App t s"
-apply(tactic {* ObjectLogic.rulify_tac 1 *})
-apply(tactic {* rewrite_goals_tac [@{thm fresh_def}] *})
-apply(tactic {* REPEAT (resolve_tac [@{thm allI}, @{thm impI}] 1) *})
-apply(tactic {* print_tac "" *})
+  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+(*<*)oops(*>*)
+
+text {*
+  The first step will be to expand the definitions of freshness
+  and then introduce quantifiers and implications. For this we
+  will use the tactic
+*}
+
+ML{*fun expand_tac defs =
+  ObjectLogic.rulify_tac 1
+  THEN rewrite_goals_tac defs
+  THEN (REPEAT (resolve_tac [@{thm allI}, @{thm impI}] 1)) *}
+
+text {*
+  The first step of ``rulifying'' the lemma will turn out important
+  later on. Applying this tactic 
+*}
+
+(*<*)
+lemma fresh_App:
+  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+(*>*)
+apply(tactic {* expand_tac @{thms fresh_def} *})
 
 txt {*
+  we end up in the goal state
+
   \begin{isabelle}
-  @{subgoals}
+  @{subgoals [display]}
   \end{isabelle}
+
+  As you can see, there are parameters (namely @{text "a"}, @{text "b"} 
+  and @{text "t"}) which come from the introduction rule and parameters
+  (in the case above only @{text "fresh"}) which come from the universal
+  quantification in the definition @{term "fresh a (App t s)"}.
+  Similarly, there are preconditions
+  that come from the premises of the rule and premises from the
+  definition. We need to treat these 
+  parameters and preconditions differently. In the code below
+  we will therefore separate them into @{text "params1"} and @{text params2},
+  respectively @{text "prems1"} and @{text "prems2"}. To do this 
+  separation, it is best to open a subproof with the tactic 
+  @{ML SUBPROOF}, since this tactic provides us
+  with the parameters (as list of @{ML_type cterm}s) and the premises
+  (as list of @{ML_type thm}s). The problem we have to overcome 
+  with @{ML SUBPROOF} is that this tactic always expects us to completely 
+  discharge with the goal (see Section ???). This is inconvenient for
+  our gradual explanation of the proof. To circumvent this inconvenience
+  we use the following modified tactic: 
+*}
+(*<*)oops(*>*)
+ML{*fun SUBPROOF_test tac ctxt = (SUBPROOF tac ctxt 1) ORELSE all_tac*}
+
+text {*
+  If the tactic inside @{ML SUBPROOF} fails, then the overall tactic will
+  still succeed. With this testing tactic, we can gradually implement
+  all necessary proof steps.
+*}
+
+text_raw {*
+\begin{figure}[t]
+\begin{minipage}{\textwidth}
+\begin{isabelle}
+*}
+ML{*fun chop_print params1 params2 prems1 prems2 ctxt =
+let 
+  val s = ["Params1 from the rule:", str_of_cterms ctxt params1] 
+        @ ["Params2 from the predicate:", str_of_cterms ctxt params2] 
+        @ ["Prems1 from the rule:"] @ (map (str_of_thm ctxt) prems1) 
+        @ ["Prems2 from the predicate:"] @ (map (str_of_thm ctxt) prems2) 
+in 
+  s |> separate "\n"
+    |> implode
+    |> warning
+end*}
+text_raw{*
+\end{isabelle}
+\end{minipage}
+\caption{FIXME\label{fig:chopprint}}
+\end{figure}
+*}
+
+text {*
+  First we calculate the values for @{text "params1/2"} and @{text "prems1/2"}
+  from @{text "params"} and @{text "prems"}, respectively. To see what is
+  going in our example, we will print out the values using the printing
+  function in Figure~\ref{fig:chopprint}. Since the tactic @{ML SUBPROOF} will
+  supply us the @{text "params"} and @{text "prems"} as lists, we can 
+  separate them using the function @{ML chop}. 
+*}
+
+ML{*fun chop_test_tac preds rules =
+  SUBPROOF_test (fn {params, prems, context, ...} =>
+  let
+    val (params1, params2) = chop (length params - length preds) params
+    val (prems1, prems2) = chop (length prems - length rules) prems
+  in
+    chop_print params1 params2 prems1 prems2 context; no_tac
+  end) *}
+
+text {* 
+  For the separation we can rely on that Isabelle deterministically 
+  produces parameter and premises in a goal state. The last parameters
+  that were introduced, come from the quantifications in the definitions.
+  Therefore we only have to subtract the number of predicates (in this
+  case only @{text "1"} from the lenghts of all parameters. Similarly
+  with the @{text "prems"}. The last premises in the goal state must
+  come from unfolding of the conclusion. So we can just subtract
+  the number of rules from the number of all premises. Applying
+  this tactic in our example 
 *}
 
-ML_prf {* fun SUBPROOF_test tac ctxt = (SUBPROOF tac ctxt 1) ORELSE all_tac *}
+(*<*)
+lemma fresh_App:
+  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+apply(tactic {* expand_tac @{thms fresh_def} *})
+(*>*)
+apply(tactic {* chop_test_tac [fresh_pred] fresh_rules @{context} *})
+(*<*)oops(*>*)
+
+text {*
+  gives
 
-apply(tactic {* SUBPROOF_test (fn {params, prems, ...} =>
-   let
-     val (prems1, prems2) = chop (length prems - length fresh_rules) prems
-     val (params1, params2) = chop (length params - length fresh_preds) params
-   in
-     no_tac
-   end) @{context} *})
-oops
+  \begin{isabelle}
+  @{text "Params1 from the rule:"}\\
+  @{text "a, b, t"}\\
+  @{text "Params2 from the predicate:"}\\
+  @{text "fresh"}\\
+  @{text "Prems1 from the rule:"}\\
+  @{term "a \<noteq> b"}\\
+  @{text [break]
+"\<forall>fresh.
+      (\<forall>a b. a \<noteq> b \<longrightarrow> fresh a (Var b)) \<longrightarrow>
+      (\<forall>a t s. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)) \<longrightarrow>
+      (\<forall>a t. fresh a (Lam a t)) \<longrightarrow> 
+      (\<forall>a b t. a \<noteq> b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)) \<longrightarrow> fresh a t"}\\
+   @{text "Prems2 from the predicate:"}\\
+   @{term "\<forall>a b. a \<noteq> b \<longrightarrow> fresh a (Var b)"}\\
+   @{term "\<forall>a t s. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)"}\\
+   @{term "\<forall>a t. fresh a (Lam a t)"}\\
+   @{term "\<forall>a b t. a \<noteq> b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)"}
+  \end{isabelle}
 
 
-ML{*fun subproof2 prem params2 prems2 =  
- SUBPROOF (fn {prems, ...} =>
-   let
-     val prem' = prems MRS prem;
-     val prem'' = 
-       case prop_of prem' of
+  We now have to select from @{text prems2} the premise 
+  that corresponds to the introduction rule we prove, namely:
+
+  @{term [display] "\<forall>a t s. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)"}
+
+  To use this premise with @{ML rtac}, we need to instantiate its 
+  quantifiers (with @{text params1}) and transform it into a 
+  introduction rule (using @{ML "ObjectLogic.rulify"}. 
+  So we can modify the subproof as follows:
+*}
+
+ML{*fun apply_prem_tac i preds rules =
+  SUBPROOF_test (fn {params, prems, context, ...} =>
+  let
+    val (params1, params2) = chop (length params - length preds) params
+    val (prems1, prems2) = chop (length prems - length rules) prems
+  in
+    rtac (ObjectLogic.rulify (all_elims params1 (nth prems2 i))) 1
+    THEN print_tac ""
+    THEN no_tac
+  end) *}
+
+text {* and apply it with *}
+
+(*<*)
+lemma fresh_App:
+  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+apply(tactic {* expand_tac @{thms fresh_def} *})
+(*>*)
+apply(tactic {* apply_prem_tac 3 [fresh_pred] fresh_rules @{context} *})
+(*<*)oops(*>*)
+
+text {*
+  Since we print out the goal state after the @{ML rtac} we can see
+  the goal state has the two subgoals
+
+  \begin{isabelle}
+  @{text "1."}~@{prop "a \<noteq> b"}\\
+  @{text "2."}~@{prop "fresh a t"}
+  \end{isabelle}
+
+  where the first comes from a non-recursive precondition of the rule
+  and the second comes from a recursive precondition. The first kind
+  can be solved immediately by @{text "prems1"}. The second kind
+  needs more work. It can be solved with the other premise in @{text "prems1"},
+  namely
+
+  @{term [break,display]
+  "\<forall>fresh.
+      (\<forall>a b. a \<noteq> b \<longrightarrow> fresh a (Var b)) \<longrightarrow>
+      (\<forall>a t s. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)) \<longrightarrow>
+      (\<forall>a t. fresh a (Lam a t)) \<longrightarrow> 
+      (\<forall>a b t. a \<noteq> b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)) \<longrightarrow> fresh a t"}
+
+  but we have to instantiate it appropriately. These instantiations
+  come from @{text "params1"} and @{text "prems2"}. We can determine
+  whether we are in the simple or complicated case by checking whether
+  the topmost connective is @{text "\<forall>"}. The premises in the simple
+  case cannot have such a quantification, since in the first step 
+  of @{ML "expand_tac"} was the ``rulification'' of the lemma. So 
+  we can implement the following function
+*}
+
+ML{*fun prepare_prem params2 prems2 prem =  
+  rtac (case prop_of prem of
            _ $ (Const (@{const_name All}, _) $ _) =>
-             prem' |> all_elims params2 
-                   |> imp_elims prems2
-         | _ => prem';
-   in 
-     rtac prem'' 1 
-   end)*}
+                 prem |> all_elims params2 
+                      |> imp_elims prems2
+         | _ => prem) *}
+
+text {* 
+  which either applies the premise outright or if it had an
+  outermost universial quantification, instantiates it first with 
+  @{text "params1"} and then @{text "prems1"}. The following tactic 
+  will therefore prove the lemma.
+*}
+
+ML{*fun prove_intro_tac i preds rules =
+  SUBPROOF (fn {params, prems, context, ...} =>
+  let
+    val (params1, params2) = chop (length params - length preds) params
+    val (prems1, prems2) = chop (length prems - length rules) prems
+  in
+    rtac (ObjectLogic.rulify (all_elims params1 (nth prems2 i))) 1
+    THEN EVERY1 (map (prepare_prem params2 prems2) prems1)
+  end) *}
+
+text {*
+  We can complete the proof of the introduction rule now as follows:
+*}
+
+(*<*)
+lemma fresh_App:
+  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+apply(tactic {* expand_tac @{thms fresh_def} *})
+(*>*)
+apply(tactic {* prove_intro_tac 3 [fresh_pred] fresh_rules @{context} 1 *})
+done
+
+text {* 
+  Unfortunately, not everything is done yet.
+*}
+
+lemma accpartI:
+  shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+apply(tactic {* expand_tac @{thms accpart_def} *})
+apply(tactic {* chop_test_tac [acc_pred] acc_rules @{context} *})
+apply(tactic {* apply_prem_tac 0 [acc_pred] acc_rules @{context} *})
+
+txt {*
+  
+  @{subgoals [display]}
+
+  \begin{isabelle}
+  @{text "Params1 from the rule:"}\\
+  @{text "x"}\\
+  @{text "Params2 from the predicate:"}\\
+  @{text "P"}\\
+  @{text "Prems1 from the rule:"}\\
+  @{text "R ?y x \<Longrightarrow> \<forall>P. (\<forall>x. (\<forall>y. R y x \<longrightarrow> P y) \<longrightarrow> P x) \<longrightarrow> P ?y"}\\
+  @{text "Prems2 from the predicate:"}\\
+  @{term "\<forall>x. (\<forall>y. R y x \<longrightarrow> P y) \<longrightarrow> P x"}\\
+  \end{isabelle}
+
+*}
+(*<*)oops(*>*)
+
+
+ML{*fun prepare_prem params2 prems2 ctxt prem =  
+  SUBPROOF (fn {prems, ...} =>
+  let
+    val prem' = prems MRS prem
+  in 
+    rtac (case prop_of prem' of
+           _ $ (Const (@{const_name All}, _) $ _) =>
+                 prem' |> all_elims params2 
+                       |> imp_elims prems2
+         | _ => prem') 1
+  end) ctxt *}
+
+ML{*fun prove_intro_tac i preds rules =
+  SUBPROOF (fn {params, prems, context, ...} =>
+  let
+    val (params1, params2) = chop (length params - length preds) params
+    val (prems1, prems2) = chop (length prems - length rules) prems
+  in
+    rtac (ObjectLogic.rulify (all_elims params1 (nth prems2 i))) 1
+    THEN EVERY1 (map (prepare_prem params2 prems2 context) prems1)
+  end) *}
+
+lemma accpartI:
+  shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+apply(tactic {* expand_tac @{thms accpart_def} *})
+apply(tactic {* prove_intro_tac 0 [acc_pred] acc_rules @{context} 1 *})
+done
+
+
 
 text {*
 
 *}
 
-
-ML %linenosgray{*fun subproof1 rules preds i = 
- SUBPROOF (fn {params, prems, context = ctxt', ...} =>
-   let
-     val (prems1, prems2) = chop (length prems - length rules) prems
-     val (params1, params2) = chop (length params - length preds) params
-   in
-     rtac (ObjectLogic.rulify (all_elims params1 (nth prems2 i))) 1 
-     (* applicateion of the i-ith intro rule *)
-     THEN
-     EVERY1 (map (fn prem => subproof2 prem params2 prems2 ctxt') prems1)
-   end)*}
-
-text {*
-  @{text "params1"} are the variables of the rules; @{text "params2"} is
-  the variables corresponding to the @{text "preds"}.
-
-  @{text "prems1"} are the assumption corresponding to the rules;
-  @{text "prems2"} are the assumptions coming from the allIs/impIs
-
-  you instantiate the parameters i-th introduction rule with the parameters
-  that come from the rule; and you apply it to the goal
-
-  this now generates subgoals corresponding to the premisses of this
-  intro rule 
-*}
-
 ML{*
 fun intros_tac defs rules preds i ctxt =
   EVERY1 [ObjectLogic.rulify_tac,
           K (rewrite_goals_tac defs),
           REPEAT o (resolve_tac [@{thm allI}, @{thm impI}]),
-          subproof1 rules preds i ctxt]*}
+          prove_intro_tac i preds rules ctxt]*}
 
 text {*
   A test case
@@ -692,7 +856,7 @@
 apply(tactic {* intros_tac_test @{context} 2 *})
 done
 
-ML{*fun introductions rules preds defs lthy = 
+ML{*fun intros rules preds defs lthy = 
 let
   fun prove_intro (i, goal) =
     Goal.prove lthy [] [] goal
@@ -713,9 +877,9 @@
   val tyss = map (binder_types o fastype_of) preds   
   val (attrs, rules) = split_list rule_specs    
 
-  val (defs, lthy') = definitions rules preds prednames syns tyss lthy      
-  val ind_rules = inductions rules defs preds tyss lthy' 	
-  val intro_rules = introductions rules preds defs lthy'
+  val (defs, lthy') = defns rules preds prednames syns tyss lthy      
+  val ind_rules = inds rules defs preds tyss lthy' 	
+  val intro_rules = intros rules preds defs lthy'
 
   val mut_name = space_implode "_" (map Binding.name_of prednames)
   val case_names = map (Binding.name_of o fst) attrs
@@ -768,6 +932,8 @@
   \item say that the induction principle is weaker (weaker than
   what the standard inductive package generates)
   \item say that no conformity test is done
+  \item exercise about strong induction principles
+  \item exercise about the test for the intro rules
   \end{itemize}
   
 *}
--- a/ProgTutorial/Package/Ind_General_Scheme.thy	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/Package/Ind_General_Scheme.thy	Thu Mar 26 19:00:51 2009 +0000
@@ -1,87 +1,235 @@
-theory Ind_General_Scheme
-imports Main
+theory Ind_General_Scheme 
+imports Simple_Inductive_Package Ind_Prelims
 begin
 
-section{* The General Construction Principle \label{sec:ind-general-method} *}
+(*<*)
+datatype trm =
+  Var "string"
+| App "trm" "trm"
+| Lam "string" "trm"
+
+simple_inductive 
+  fresh :: "string \<Rightarrow> trm \<Rightarrow> bool" 
+where
+  fresh_var: "a\<noteq>b \<Longrightarrow> fresh a (Var b)"
+| fresh_app: "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+| fresh_lam1: "fresh a (Lam a t)"
+| fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+(*>*)
+
+section {* The Code in a Nutshell *}
 
 text {*
-  
+  (FIXME: perhaps move somewhere else)
+
   The point of these examples is to get a feeling what the automatic proofs 
   should do in order to solve all inductive definitions we throw at them. For this 
   it is instructive to look at the general construction principle 
   of inductive definitions, which we shall do in the next section.
-
-  Before we start with the implementation, it is useful to describe the general
-  form of inductive definitions that our package should accept.
-  Suppose $R_1,\ldots,R_n$ be mutually inductive predicates and $\vec{p}$ be
-  some fixed parameters. Then the introduction rules for $R_1,\ldots,R_n$ may have
-  the form
-
-  \[
-  \bigwedge\vec{x}_i.~\vec{A}_i \Longrightarrow \left(\bigwedge\vec{y}_{ij}.~\vec{B}_{ij} \Longrightarrow
-  R_{k_{ij}}~\vec{p}~\vec{s}_{ij}\right)_{j=1,\ldots,m_i} \Longrightarrow R_{l_i}~\vec{p}~\vec{t}_i
-  \qquad \mbox{for\ } i=1,\ldots,r
-  \]
-
-  where $\vec{A}_i$ and $\vec{B}_{ij}$ are formulae not containing $R_1,\ldots,R_n$.
-  Note that by disallowing the inductive predicates to occur in $\vec{B}_{ij}$ we make sure
-  that all occurrences of the predicates in the premises of the introduction rules are
-  \emph{strictly positive}. This condition guarantees the existence of predicates
-  that are closed under the introduction rules shown above. Then the definitions of the 
-  inductive predicates $R_1,\ldots,R_n$ is:
-
-  \[
-  \begin{array}{l@ {\qquad}l}
-  R_i \equiv \lambda\vec{p}~\vec{z}_i.~\forall P_1 \ldots P_n.~K_1 \longrightarrow \cdots \longrightarrow K_r \longrightarrow P_i~\vec{z}_i &
-  \mbox{for\ } i=1,\ldots,n \\[1.5ex]
-  \mbox{where} \\
-  K_i \equiv \forall\vec{x}_i.~\vec{A}_i \longrightarrow \left(\forall\vec{y}_{ij}.~\vec{B}_{ij} \longrightarrow
-  P_{k_{ij}}~\vec{s}_{ij}\right)_{j=1,\ldots,m_i} \longrightarrow P_{l_i}~\vec{t}_i &
-  \mbox{for\ } i=1,\ldots,r
-  \end{array}
-  \]
-
-  The induction principles for the inductive predicates $R_1,\ldots,R_n$ are
-
-  \[
-  \begin{array}{l@ {\qquad}l}
-  R_i~\vec{p}~\vec{z}_i \Longrightarrow I_1 \Longrightarrow \cdots \Longrightarrow I_r \Longrightarrow P_i~\vec{z}_i &
-  \mbox{for\ } i=1,\ldots,n \\[1.5ex]
-  \mbox{where} \\
-  I_i \equiv \bigwedge\vec{x}_i.~\vec{A}_i \Longrightarrow \left(\bigwedge\vec{y}_{ij}.~\vec{B}_{ij} \Longrightarrow
-  P_{k_{ij}}~\vec{s}_{ij}\right)_{j=1,\ldots,m_i} \Longrightarrow P_{l_i}~\vec{t}_i &
-  \mbox{for\ } i=1,\ldots,r
-  \end{array}
-  \]
-
-  Since $K_i$ and $I_i$ are equivalent modulo conversion between meta-level and object-level
-  connectives, it is clear that the proof of the induction theorem is straightforward. We will
-  therefore focus on the proof of the introduction rules. When proving the introduction rule
-  shown above, we start by unfolding the definition of $R_1,\ldots,R_n$, which yields
-
-  \[
-  \bigwedge\vec{x}_i.~\vec{A}_i \Longrightarrow \left(\bigwedge\vec{y}_{ij}.~\vec{B}_{ij} \Longrightarrow
-  \forall P_1 \ldots P_n.~\vec{K} \longrightarrow P_{k_{ij}}~\vec{s}_{ij}\right)_{j=1,\ldots,m_i} \Longrightarrow \forall P_1 \ldots P_n.~\vec{K} \longrightarrow P_{l_i}~\vec{t}_i
-  \]
-
-  where $\vec{K}$ abbreviates $K_1,\ldots,K_r$. Applying the introduction rules for
-  $\forall$ and $\longrightarrow$ yields a goal state in which we have to prove $P_{l_i}~\vec{t}_i$
-  from the additional assumptions $\vec{K}$. When using $K_{l_i}$ (converted to the meta-logic format)
-  to prove $P_{l_i}~\vec{t}_i$, we get subgoals $\vec{A}_i$ that are trivially solvable by assumption,
-  as well as subgoals of the form
-
-  \[
-  \bigwedge\vec{y}_{ij}.~\vec{B}_{ij} \Longrightarrow P_{k_{ij}}~\vec{s}_{ij} \qquad \mbox{for\ } j=1,\ldots,m_i
-  \]
-
-  that can be solved using the assumptions
-
-  \[
-  \bigwedge\vec{y}_{ij}.~\vec{B}_{ij} \Longrightarrow
-  \forall P_1 \ldots P_n.~\vec{K} \longrightarrow P_{k_{ij}}~\vec{s}_{ij} \qquad \mbox{and} \qquad \vec{K}
-  \]
-
-  What remains is to implement these proofs generically.
 *}
 
+text {*
+  The inductive package will generate the reasoning infrastructure
+  for mutually recursive predicates @{text "pred\<^isub>1\<dots>pred\<^isub>n"}. In what
+  follows we will have the convention that various, possibly empty collections of 
+  ``things'' are indicated either by adding an @{text "s"} or by adding 
+  a superscript @{text [quotes] "\<^isup>*"}. The shorthand for the predicates 
+  will therefore be @{text "preds"} or @{text "pred\<^sup>*"}. In this case of the
+  predicates there must be at least a single one in order to obtain a meaningful
+  definition.
+
+  The input for the inductive predicate will be some @{text "preds"} with possible 
+  typing and syntax annotations, and also some introduction rules. We call below the 
+  introduction rules short as @{text "rules"}. Borrowing some idealised Isabelle 
+  notation, one such @{text "rule"} is of the form
+
+  \begin{isabelle}
+  @{text "rule ::= 
+  \<And>xs. \<^raw:$\underbrace{\mbox{>As\<^raw:}}_{\text{\makebox[0mm]{\rm non-recursive premises}}}$> \<Longrightarrow> 
+  \<^raw:$\underbrace{\mbox{>(\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>*\<^raw:}}_{\text{\rm recursive premises}}$> 
+  \<Longrightarrow> pred ts"}
+  \end{isabelle}
+  
+  We actually assume the user will always state rules of this form and we omit
+  any code that test this format. So things can go horribly wrong if the
+  @{text "rules"} are not of this form.\footnote{FIXME: Exercise to test this
+  format.} The @{text As} and @{text Bs} in a @{text "rule"} are formulae not
+  involving the inductive predicates @{text "preds"}; the instances @{text
+  "pred ss"} and @{text "pred ts"} can stand for different predicates.
+  Everything left of @{text [quotes] "\<Longrightarrow> pred ts"} are called the premises of
+  the rule. The variables @{text "xs"} are usually omitted in the user's
+  input. The variables @{text "ys"} are local with respect to on recursive
+  premise. Some examples of @{text "rule"}s the user might write are:
+
+
+  @{thm [display] fresh_var[no_vars]}
+
+  which has only a single non-recursive premise, whereas
+
+  @{thm [display] evenS[no_vars]}
+
+  has a single recursive premise; the rule
+
+  @{thm [display] accpartI[no_vars]}
+
+  has a recursive premise which has a precondition. In the examples, all 
+  rules are stated without the leading meta-quantification @{text "\<And>xs"}.
+
+  The code of the inductive package falls roughly in tree parts involving
+  the definitions, the induction principles and the introduction rules, 
+  respectively. For the definitions we need to have the @{text rules}
+  in a form where the meta-quantifiers and meta-implications are replaced
+  by their object logic equivalent. Therefore an @{text "orule"} is of the
+  form 
+
+  @{text [display] "orule ::= \<forall>xs. As \<longrightarrow> (\<forall>ys. Bs \<longrightarrow> pred ss)\<^isup>* \<longrightarrow> pred ts"}
+
+  A definition for the predicate @{text "pred"} has then the form
+
+  @{text [display] "def ::= pred \<equiv> \<lambda>zs. \<forall>preds. orules \<longrightarrow> pred zs"}
+
+  The induction principles for the predicate @{text "pred"} are of the
+  form
+
+  @{text [display] "ind ::= pred ?zs \<Longrightarrow> rules[preds := ?Ps] \<Longrightarrow> ?P ?zs"}
+
+  where in the @{text "rules"} every @{text pred} is replaced by a new
+  (meta)variable @{text "?P"}.
+
+  In order to derive an induction principle for the predicate @{text "pred"}
+  we first transform it into the object logic and fix the meta-variables. Hence 
+  we have to prove a formula of the form
+
+  @{text [display] "pred zs \<longrightarrow> orules[preds := Ps] \<longrightarrow> P zs"}
+
+  If we assume @{text "pred zs"} and unfold its definition, then we have
+  
+  @{text [display] "\<forall>preds. orules \<longrightarrow> pred zs"} 
+
+  and must prove
+
+  @{text [display] "orules[preds := Ps] \<longrightarrow> P zs"}
+
+  This can be done by instantiating the @{text "\<forall>preds"} with the @{text "Ps"}. 
+  Then we are done since we are left with a simple identity.
+  
+  The proofs for the introduction rules are more involved. Assuming we want to
+  prove the introduction rule 
+
+  @{text [display] "\<And>xs. As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
+
+  then we can assume
+
+  \begin{isabelle}
+  (i)~~@{text "As"}\\
+  (ii)~@{text "(\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>*"}
+  \end{isabelle}
+
+  and must show
+
+  @{text [display] "pred ts"}
+  
+  If we now unfold the definitions for the @{text preds}, we have
+
+  \begin{isabelle}
+  (i)~~~@{text "As"}\\
+  (ii)~~@{text "(\<And>ys. Bs \<Longrightarrow> \<forall>preds. orules \<longrightarrow> pred ss)\<^isup>*"}\\
+  (iii)~@{text "orules"}
+  \end{isabelle}
+
+  and need to show
+
+  @{text [display] "pred ts"}
+
+  In the last step we removed some quantifiers and moved the precondition @{text "orules"}  
+  into the assumtion. The @{text "orules"} stand for all introduction rules that are given 
+  by the user. We apply the one which corresponds to introduction rule we are proving.
+  This introduction rule must be of the form 
+
+  @{text [display] "As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
+
+  When we apply this rule we end up in the goal state where we have to prove
+
+  \begin{isabelle}
+  (a)~@{text "As"}\\
+  (b)~@{text "(\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>*"}
+  \end{isabelle}
+
+  The goals @{text "As"} we can immediately discharge with the assumption in @{text "(i)"}.
+  The goals in @{text "(b)"} we discharge as follows: we assume the @{text "(iv)"} 
+  @{text "Bs"} and prove @{text "(c)"} @{text "pred ss"}. We then resolve the 
+  @{text "Bs"}  with the assumptions in @{text "(ii)"}. This gives us
+
+  @{text [display] "(\<forall>preds. orules \<longrightarrow> pred ss)\<^isup>*"}
+
+  Instantiating the universal quantifiers and then resolving with the assumptions 
+  in @{text "(iii)"} gives us @{text "pred ss"}, which is the goal we are after.
+
+  
+  What remains is to implement the reasoning outlined above. 
+  For building testcases, we use some shorthands for the definitions 
+  of @{text "even/odd"}, @{term "fresh"} and @{term "accpart"}. 
+*}
+
+
+text_raw{*
+\begin{figure}[p]
+\begin{minipage}{\textwidth}
+\begin{isabelle}*}  
+ML{*(* even-odd example *)
+val eo_defs = [@{thm even_def}, @{thm odd_def}]
+
+val eo_rules =  
+  [@{prop "even 0"},
+   @{prop "\<And>n. odd n \<Longrightarrow> even (Suc n)"},
+   @{prop "\<And>n. even n \<Longrightarrow> odd (Suc n)"}]
+
+val eo_orules =  
+  [@{prop "even 0"},
+   @{prop "\<forall>n. odd n \<longrightarrow> even (Suc n)"},
+   @{prop "\<forall>n. even n \<longrightarrow> odd (Suc n)"}]
+
+val eo_preds =  [@{term "even::nat\<Rightarrow>bool"}, @{term "odd::nat\<Rightarrow>bool"}] 
+val eo_prednames = [@{binding "even"}, @{binding "odd"}]
+val eo_syns = [NoSyn, NoSyn] 
+val eo_arg_tyss = [[@{typ "nat"}], [@{typ "nat"}]] 
+val e_pred = @{term "even::nat\<Rightarrow>bool"}
+val e_arg_tys = [@{typ "nat"}] 
+
+
+
+(* freshness example *)
+val fresh_rules =  
+  [@{prop "\<And>a b. a \<noteq> b \<Longrightarrow> fresh a (Var b)"},
+   @{prop "\<And>a s t. fresh a t \<Longrightarrow> fresh a s \<Longrightarrow> fresh a (App t s)"},
+   @{prop "\<And>a t. fresh a (Lam a t)"},
+   @{prop "\<And>a b t. a \<noteq> b \<Longrightarrow> fresh a t \<Longrightarrow> fresh a (Lam b t)"}]
+
+val fresh_orules =  
+  [@{prop "\<forall>a b. a \<noteq> b \<longrightarrow> fresh a (Var b)"},
+   @{prop "\<forall>a s t. fresh a t \<longrightarrow> fresh a s \<longrightarrow> fresh a (App t s)"},
+   @{prop "\<forall>a t. fresh a (Lam a t)"},
+   @{prop "\<forall>a b t. a \<noteq> b \<longrightarrow> fresh a t \<longrightarrow> fresh a (Lam b t)"}]
+
+val fresh_pred =  @{term "fresh::string\<Rightarrow>trm\<Rightarrow>bool"} 
+val fresh_arg_tys = [@{typ "string"}, @{typ "trm"}]
+
+
+
+(* accessible-part example *)
+val acc_rules = 
+     [@{prop "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"}]
+val acc_pred = @{term "accpart:: ('a \<Rightarrow>'a\<Rightarrow>bool)\<Rightarrow>'a \<Rightarrow>bool"}*}
+text_raw{*
+\end{isabelle}
+\end{minipage}
+\caption{Shorthands for the inductive predicates of @{text "even"}-@{text "odd"}, 
+  @{text "fresh"} and @{text "accpart"}. The follow the convention @{text "rules"}, 
+  @{text "orules"}, @{text "preds"} and so on as used in Section ???. The purpose 
+  of these shorthands is to simplify the construction of testcases.}
+\end{figure}
+*}
+
+
+
 end
--- a/ProgTutorial/Tactical.thy	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/Tactical.thy	Thu Mar 26 19:00:51 2009 +0000
@@ -1148,17 +1148,13 @@
   fun name_ctrm (nm, ctrm) =
       "  " ^ nm ^ ": " ^ (str_of_cterms ctxt ctrm)
 
-  val s1 = ["Simplification rules:"]
-  val s2 = map name_thm simps
-  val s3 = ["Congruences rules:"]
-  val s4 = map name_thm congs
-  val s5 = ["Simproc patterns:"]
-  val s6 = map name_ctrm procs
+  val s = ["Simplification rules:"] @ (map name_thm simps) @
+          ["Congruences rules:"] @ (map name_thm congs) @
+          ["Simproc patterns:"] @ (map name_ctrm procs)
 in
-  (s1 @ s2 @ s3 @ s4 @ s5 @ s6) 
-     |> separate "\n"
-     |> implode
-     |> warning
+  s |> separate "\n"
+    |> implode
+    |> warning
 end*}
 text_raw {* 
 \end{isabelle}
--- a/ProgTutorial/document/root.tex	Wed Mar 25 15:09:04 2009 +0100
+++ b/ProgTutorial/document/root.tex	Thu Mar 26 19:00:51 2009 +0000
@@ -140,6 +140,7 @@
         \end{tabular}}
 
 \maketitle
+\setcounter{tocdepth}{1}
 \tableofcontents
 
 % generated text of all theories
Binary file progtutorial.pdf has changed