finished the heavy duty stuff for the inductive package
authorChristian Urban <urbanc@in.tum.de>
Wed, 01 Apr 2009 15:42:47 +0100
changeset 224 647cab4a72c2
parent 223 1aaa15ef731b
child 225 7859fc59249a
finished the heavy duty stuff for the inductive package
ProgTutorial/Base.thy
ProgTutorial/Package/Ind_Code.thy
ProgTutorial/Package/Ind_Interface.thy
ProgTutorial/Package/Ind_Prelims.thy
ProgTutorial/ROOT.ML
progtutorial.pdf
--- a/ProgTutorial/Base.thy	Wed Apr 01 12:29:10 2009 +0100
+++ b/ProgTutorial/Base.thy	Wed Apr 01 15:42:47 2009 +0100
@@ -30,6 +30,11 @@
       Toplevel.proof (Proof.map_context (Context.proof_map
         (ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> inherit_env_prf)))
 
+val _ =
+  OuterSyntax.command "ML_val" "diagnostic ML text" 
+  (OuterKeyword.tag "TutorialML" OuterKeyword.diag)
+    (OuterParse.ML_source >> IsarCmd.ml_diag true);
+
 *}
 (*
 ML {*
--- a/ProgTutorial/Package/Ind_Code.thy	Wed Apr 01 12:29:10 2009 +0100
+++ b/ProgTutorial/Package/Ind_Code.thy	Wed Apr 01 15:42:47 2009 +0100
@@ -1,5 +1,5 @@
 theory Ind_Code
-imports "../Base" "../FirstSteps" Ind_Interface Ind_General_Scheme
+imports "../Base" "../FirstSteps" Ind_General_Scheme
 begin
 
 section {* The Gory Details\label{sec:code} *} 
@@ -249,8 +249,8 @@
 *}
 
 lemma 
-  fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
-  shows "\<forall>x y z. P x y z \<Longrightarrow> True"
+fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
+shows "\<forall>x y z. P x y z \<Longrightarrow> True"
 apply (tactic {* 
   inst_spec_tac [@{cterm "a::nat"},@{cterm "b::nat"},@{cterm "c::nat"}] 1 *})
 txt {* 
@@ -495,14 +495,14 @@
 *}
 
 lemma all_elims_test:
-  fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
-  shows "\<forall>x y z. P x y z" sorry
+fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
+shows "\<forall>x y z. P x y z" sorry
 
 lemma imp_elims_test:
-  shows "A \<longrightarrow> B \<longrightarrow> C" sorry
+shows "A \<longrightarrow> B \<longrightarrow> C" sorry
 
 lemma imp_elims_test':
-  shows "A" "B" sorry
+shows "A" "B" sorry
 
 text {*
   The function @{ML all_elims} takes a list of (certified) terms and instantiates
@@ -535,7 +535,7 @@
 *}
 
 lemma fresh_Lam:
-  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 (*<*)oops(*>*)
 
 text {*
@@ -556,7 +556,7 @@
 
 (*<*)
 lemma fresh_Lam:
-  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 (*>*)
 apply(tactic {* expand_tac @{thms fresh_def} *})
 
@@ -652,7 +652,7 @@
 
 (*<*)
 lemma fresh_Lam:
-  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
 (*>*)
 apply(tactic {* chop_test_tac [fresh_pred] fresh_rules @{context} *})
@@ -714,7 +714,7 @@
 
 (*<*)
 lemma fresh_Lam:
-  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
 (*>*)
 apply(tactic {* apply_prem_tac 3 [fresh_pred] fresh_rules @{context} *})
@@ -784,7 +784,7 @@
 *}
 
 lemma fresh_Lam:
-  shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
 apply(tactic {* expand_tac @{thms fresh_def} *})
 apply(tactic {* prove_intro_tac 3 [fresh_pred] fresh_rules @{context} 1 *})
 done
@@ -798,7 +798,7 @@
 *}
 
 lemma accpartI:
-  shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
 apply(tactic {* expand_tac @{thms accpart_def} *})
 apply(tactic {* chop_test_tac [acc_pred] acc_rules @{context} *})
 apply(tactic {* apply_prem_tac 0 [acc_pred] acc_rules @{context} *})
@@ -879,7 +879,7 @@
 *}
 
 lemma accpartI:
-  shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
 apply(tactic {* expand_tac @{thms accpart_def} *})
 apply(tactic {* prove_intro_tac 0 [acc_pred] acc_rules @{context} 1 *})
 done
@@ -901,15 +901,15 @@
 *}
 
 lemma even0_intro:
-  shows "even 0"
+shows "even 0"
 by (tactic {* intro_tac eo_defs eo_rules eo_preds 0 @{context} *})
 
 lemma evenS_intro:
-  shows "\<And>m. odd m \<Longrightarrow> even (Suc m)"
+shows "\<And>m. odd m \<Longrightarrow> even (Suc m)"
 by (tactic {* intro_tac eo_defs eo_rules eo_preds 1 @{context} *})
 
 lemma fresh_App:
-  shows "\<And>a t s. \<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+shows "\<And>a t s. \<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
 by (tactic {* 
   intro_tac @{thms fresh_def} fresh_rules [fresh_pred] 1 @{context} *})
 
@@ -1028,148 +1028,8 @@
   indicates that the first premise of the induction principle (namely
   the predicate over which the induction proceeds) is eliminated. 
 
-  (FIXME: What does @{ML Induct.induct_pred} do?)
-
-  (FIXME: why the mut-name?)
-
-  (FIXME: What does @{ML Binding.qualify} do?)
-
-
   This completes all the code and fits in with the ``front end'' described
-  in Section \ref{sec:interface}
-*}
-
-
-ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
-let
-  val ((pred_specs', rule_specs'), _) = 
-         Specification.read_spec pred_specs rule_specs lthy
-in
-  add_inductive pred_specs' rule_specs' lthy
-end*} 
-
-ML{*val spec_parser = 
-   OuterParse.fixes -- 
-   Scan.optional 
-     (OuterParse.$$$ "where" |--
-        OuterParse.!!! 
-          (OuterParse.enum1 "|" 
-             (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
-
-ML{*val specification =
-  spec_parser >>
-    (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
-
-ML{*val _ = OuterSyntax.local_theory "simple_inductive" 
-              "define inductive predicates"
-                 OuterKeyword.thy_decl specification*}
-
-
-section {* Extensions (TBD) *}
-
-text {*
-  Things to include at the end:
-
-  \begin{itemize}
-  \item include the code for the parameters
-  \item say something about add-inductive-i to return
-  the rules
-  \item say that the induction principle is weaker (weaker than
-  what the standard inductive package generates)
-  \item say that no conformity test is done
-  \item exercise about strong induction principles
-  \item exercise about the test for the intro rules
-  \end{itemize}
-  
+  in Section~\ref{sec:interface}.\footnote{FIXME: Describe @{ML Induct.induct_pred}. Why the mut-name? 
+  What does @{ML Binding.qualify} do?}
 *}
-
-(*
-simple_inductive
-  Even and Odd
-where
-  Even0: "Even 0"
-| EvenS: "Odd n \<Longrightarrow> Even (Suc n)"
-| OddS: "Even n \<Longrightarrow> Odd (Suc n)"
-
-thm Even0
-thm EvenS
-thm OddS
-
-thm Even_Odd.intros
-thm Even.induct
-thm Odd.induct
-
-thm Even_def
-thm Odd_def
-*)
-
-(*
-text {* 
-  Second, we want that the user can specify fixed parameters.
-  Remember in the previous section we stated that the user can give the 
-  specification for the transitive closure of a relation @{text R} as 
-*}
-*)
-
-(*
-simple_inductive
-  trcl\<iota>\<iota> :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
-  base: "trcl\<iota>\<iota> R x x"
-| step: "trcl\<iota>\<iota> R x y \<Longrightarrow> R y z \<Longrightarrow> trcl\<iota>\<iota> R x z"
-*)
-
-(*
-text {*
-  Note that there is no locale given in this specification---the parameter
-  @{text "R"} therefore needs to be included explicitly in @{term trcl\<iota>\<iota>}, but
-  stays fixed throughout the specification. The problem with this way of
-  stating the specification for the transitive closure is that it derives the
-  following induction principle.
-
-  \begin{center}\small
-  \mprset{flushleft}
-  \mbox{\inferrule{
-             @{thm_style prem1  trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
-             @{thm_style prem2  trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
-             @{thm_style prem3  trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}
-            {@{thm_style concl  trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}}  
-  \end{center}
-
-  But this does not correspond to the induction principle we derived by hand, which
-  was
-  
-  
-  %\begin{center}\small
-  %\mprset{flushleft}
-  %\mbox{\inferrule{
-  %           @ { thm_style prem1  trcl_induct[no_vars]}\\\\
-  %           @ { thm_style prem2  trcl_induct[no_vars]}\\\\
-  %           @ { thm_style prem3  trcl_induct[no_vars]}}
-  %          {@ { thm_style concl  trcl_induct[no_vars]}}}  
-  %\end{center}
-
-  The difference is that in the one derived by hand the relation @{term R} is not
-  a parameter of the proposition @{term P} to be proved and it is also not universally
-  qunatified in the second and third premise. The point is that the parameter @{term R}
-  stays fixed thoughout the definition and we do not want to regard it as an ``ordinary''
-  argument of the transitive closure, but one that can be freely instantiated. 
-  In order to recognise such parameters, we have to extend the specification
-  to include a mechanism to state fixed parameters. The user should be able
-  to write 
-
-*}
-*)
-(*
-simple_inductive
-  trcl'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
-  base: "trcl'' R x x"
-| step: "trcl'' R x y \<Longrightarrow> R y z \<Longrightarrow> trcl'' R x z"
-
-simple_inductive
-  accpart'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
-  accpartI: "(\<And>y. R y x \<Longrightarrow> accpart'' R y) \<Longrightarrow> accpart'' R x"
-*)
 (*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_Interface.thy	Wed Apr 01 12:29:10 2009 +0100
+++ b/ProgTutorial/Package/Ind_Interface.thy	Wed Apr 01 15:42:47 2009 +0100
@@ -69,21 +69,21 @@
   \caption{A railroad diagram describing the syntax of \simpleinductive{}. 
   The \emph{target} indicates an optional locale; the \emph{fixes} are an 
   \isacommand{and}-separated list of names for the inductive predicates (they
-  can also contain typing- and syntax anotations); \emph{prop} stands for a 
+  can also contain typing- and syntax annotations); \emph{prop} stands for a 
   introduction rule with an optional theorem declaration (\emph{thmdecl}).
   \label{fig:railroad}}
   \end{figure}
 *}
 
 text {* 
-  To be able to write down the specification in Isabelle, we have to introduce
+  To be able to write down the specifications or inductive predicates, we have 
+  to introduce
   a new command (see Section~\ref{sec:newcommand}).  As the keyword for the
   new command we chose \simpleinductive{}. Examples of specifications we expect
   the user gives for the inductive predicates from the previous section are
-  shown in Figure~ref{fig:specs}. The general syntax we will parse is specified
-  in the railroad diagram shown in Figure~\ref{fig:railroad} for the syntax of 
-  \simpleinductive{}. This diagram more or less translates directly into 
-  the parser:
+  shown in Figure~\ref{fig:specs}. The general syntax we will parse is specified
+  in the railroad diagram shown in Figure~\ref{fig:railroad}. This diagram more 
+  or less translates directly into the parser:
 *}
 
 ML{*val spec_parser = 
@@ -95,12 +95,11 @@
              (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
 
 text {*
-  which we explaind in Section~\ref{sec:parsingspecs}.
-  If you look closely, there is no code for parsing the optional
-  target in Figure~\ref{fig:railroad}. This is an ``advanced'' feature
-  which we will inherit for ``free'' from the infrastructure on which
-  we build the package. The target stands for a locale and allows us 
-  to specify
+  which we explained in Section~\ref{sec:parsingspecs}.  However, if you look
+  closely, there is no code for parsing the target given optionally after the
+  keyword. This is an ``advanced'' feature which we will inherit for ``free''
+  from the infrastructure on which we shall build the package. The target
+  stands for a locale and allows us to specify
 *}
 
 locale rel =
@@ -123,10 +122,13 @@
   accpartI: "(\<And>y. R y x \<Longrightarrow> accpart' y) \<Longrightarrow> accpart' x"
 
 text {*
-  Note that in these definitions the parameter @{text R} for the
-  relation is left implicit.  If we feed into the 
-  parser the string (which corresponds to our definition of @{term even} and 
-  @{term odd}):
+  Note that in these definitions the parameter @{text R}, standing for the
+  relation, is left implicit. For the moment we will ignore this kind
+  of implicit parameters and rely on the fact that the infrastructure will
+  deal with them. Later, however, we will come back to them.
+
+  If we feed into the parser the string that corresponds to our definition 
+  of @{term even} and @{term odd}
 
   @{ML_response [display,gray]
 "let
@@ -144,16 +146,50 @@
       ((evenS,\<dots>), \"\\^E\\^Ftoken\\^Eodd n \<Longrightarrow> even (Suc n)\\^E\\^F\\^E\"),
       ((oddS,\<dots>), \"\\^E\\^Ftoken\\^Eeven n \<Longrightarrow> odd (Suc n)\\^E\\^F\\^E\")]), [])"}
 
-  then we get back the predicates (with type
-  and syntax annotations), and the specifications of the introduction 
-  rules. This is all the information we
+  then we get back the specifications of the predicates (with type and syntax annotations), 
+  and specifications of the introduction rules. This is all the information we
   need for calling the package and setting up the keyword. The latter is
-  done in Lines 6 and 7 in the code below.
+  done in Lines 5 to 7 in the code below.
 *}
 
-(*<*)
-ML{* fun add_inductive pred_specs rule_specs lthy = lthy *}
-(*>*)
+(*<*)ML %linenos{*fun add_inductive pred_specs rule_specs lthy = lthy 
+fun add_inductive_cmd pred_specs rule_specs lthy =
+let
+  val ((pred_specs', rule_specs'), _) = 
+         Specification.read_spec pred_specs rule_specs lthy
+in
+  add_inductive pred_specs' rule_specs' lthy
+end*}(*>*)
+ML_val %linenosgray{*val specification =
+  spec_parser >>
+    (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)
+
+val _ = OuterSyntax.local_theory "simple_inductive" 
+          "definition of simple inductive predicates"
+             OuterKeyword.thy_decl specification*}
+
+text {*
+  We call @{ML local_theory in OuterSyntax} with the kind-indicator 
+  @{ML thy_decl in OuterKeyword} since the package does not need to open 
+  up any proof (see Section~\ref{sec:newcommand}). 
+  The auxiliary function @{text specification} in Lines 1 to 3
+  gathers the information from the parser to be processed further
+  by the function @{text "add_inductive_cmd"}, which we describe below. 
+
+  Note that the predicates when they come out of the parser are just some
+  ``naked'' strings: they have no type yet (even if we annotate them with
+  types) and they are also not defined constants yet (which the predicates 
+  eventually will be).  Also the introduction rules are just strings. What we have
+  to do first is to transform the parser's output into some internal
+  datastructures that can be processed further. For this we can use the
+  function @{ML read_spec in Specification}. This function takes some strings
+  (with possible typing annotations) and some rule specifications, and attempts
+  to find a typing according to the given type constraints given by the 
+  user and the type constraints by the ``ambient'' theory. It returns 
+  the type for the predicates and also returns typed terms for the
+  introduction rules. So at the heart of the function 
+  @{text "add_inductive_cmd"} is a call to @{ML read_spec in Specification}.
+*}
 
 ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
 let
@@ -161,312 +197,16 @@
          Specification.read_spec pred_specs rule_specs lthy
 in
   add_inductive pred_specs' rule_specs' lthy
-end*} 
-
-
-ML{*val specification =
-  spec_parser >>
-    (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
-
-ML{*val _ = OuterSyntax.local_theory "simple_inductive" 
-              "definition of simple inductive predicates"
-                 OuterKeyword.thy_decl specification*}
-
-
-text {*
-  We call @{ML OuterSyntax.command} with the kind-indicator @{ML
-  OuterKeyword.thy_decl} since the package does not need to open up any goal
-  state (see Section~\ref{sec:newcommand}). Note that the predicates and
-  parameters are at the moment only some ``naked'' variables: they have no
-  type yet (even if we annotate them with types) and they are also no defined
-  constants yet (which the predicates will eventually be).  In Lines 1 to 4 we
-  gather the information from the parser to be processed further. The locale
-  is passed as argument to the function @{ML
-  Toplevel.local_theory}.\footnote{FIXME Is this already described?} The other
-  arguments, i.e.~the predicates, parameters and intro rule specifications,
-  are passed to the function @{ML add_inductive in SimpleInductivePackage}
-  (Line 4).
-
-  We now come to the second subtask of the package, namely transforming the 
-  parser output into some internal datastructures that can be processed further. 
-  Remember that at the moment the introduction rules are just strings, and even
-  if the predicates and parameters can contain some typing annotations, they
-  are not yet in any way reflected in the introduction rules. So the task of
-  @{ML add_inductive in SimpleInductivePackage} is to transform the strings
-  into properly typed terms. For this it can use the function 
-  @{ML read_spec in Specification}. This function takes some constants
-  with possible typing annotations and some rule specifications and attempts to
-  find a type according to the given type constraints and the type constraints
-  by the surrounding (local theory). However this function is a bit
-  too general for our purposes: we want that each introduction rule has only 
-  name (for example @{text even0} or @{text evenS}), if a name is given at all.
-  The function @{ML read_spec in Specification} however allows more
-  than one rule. Since it is quite convenient to rely on this function (instead of
-  building your own) we just quick ly write a wrapper function that translates
-  between our specific format and the general format expected by 
-  @{ML read_spec in Specification}. The code of this wrapper is as follows:
-
-  @{ML_chunk [display,gray,linenos] read_specification}
-
-  It takes a list of constants, a list of rule specifications and a local theory 
-  as input. Does the transformation of the rule specifications in Line 3; calls
-  the function and transforms the now typed rule specifications back into our
-  format and returns the type parameter and typed rule specifications. 
-
-
-   @{ML_chunk [display,gray,linenos] add_inductive}
-
+end*}
 
-  In order to add a new inductive predicate to a theory with the help of our
-  package, the user must \emph{invoke} it. For every package, there are
-  essentially two different ways of invoking it, which we will refer to as
-  \emph{external} and \emph{internal}. By external invocation we mean that the
-  package is called from within a theory document. In this case, the
-  specification of the inductive predicate, including type annotations and
-  introduction rules, are given as strings by the user. Before the package can
-  actually make the definition, the type and introduction rules have to be
-  parsed. In contrast, internal invocation means that the package is called by
-  some other package. For example, the function definition package
-  calls the inductive definition package to define the
-  graph of the function. However, it is not a good idea for the function
-  definition package to pass the introduction rules for the function graph to
-  the inductive definition package as strings. In this case, it is better to
-  directly pass the rules to the package as a list of terms, which is more
-  robust than handling strings that are lacking the additional structure of
-  terms. These two ways of invoking the package are reflected in its ML
-  programming interface, which consists of two functions:
-
-
-  @{ML_chunk [display,gray] SIMPLE_INDUCTIVE_PACKAGE}
-*}
-
-text {*
-  (FIXME: explain Binding.binding; Attrib.binding somewhere else)
-
-
-  The function for external invocation of the package is called @{ML
-  add_inductive in SimpleInductivePackage}, whereas the one for internal
-  invocation is called @{ML add_inductive_i in SimpleInductivePackage}. Both
-  of these functions take as arguments the names and types of the inductive
-  predicates, the names and types of their parameters, the actual introduction
-  rules and a \emph{local theory}.  They return a local theory containing the
-  definition and the induction principle as well introduction rules. 
-
-  Note that @{ML add_inductive_i in SimpleInductivePackage} expects
-  the types of the predicates and parameters to be specified using the
-  datatype @{ML_type typ} of Isabelle's logical framework, whereas @{ML
-  add_inductive in SimpleInductivePackage} expects them to be given as
-  optional strings. If no string is given for a particular predicate or
-  parameter, this means that the type should be inferred by the
-  package. 
-
-
-  Additional \emph{mixfix syntax} may be associated with the
-  predicates and parameters as well. Note that @{ML add_inductive_i in
-  SimpleInductivePackage} does not allow mixfix syntax to be associated with
-  parameters, since it can only be used for parsing.\footnote{FIXME: why ist it there then?} 
-  The names of the
-  predicates, parameters and rules are represented by the type @{ML_type
-  Binding.binding}. Strings can be turned into elements of the type @{ML_type
-  Binding.binding} using the function @{ML [display] "Binding.name : string ->
-  Binding.binding"} Each introduction rule is given as a tuple containing its
-  name, a list of \emph{attributes} and a logical formula. Note that the type
-  @{ML_type Attrib.binding} used in the list of introduction rules is just a
-  shorthand for the type @{ML_type "Binding.binding * Attrib.src list"}.  The
-  function @{ML add_inductive_i in SimpleInductivePackage} expects the formula
-  to be specified using the datatype @{ML_type term}, whereas @{ML
-  add_inductive in SimpleInductivePackage} expects it to be given as a string.
-  An attribute specifies additional actions and transformations that should be
-  applied to a theorem, such as storing it in the rule databases used by
-  automatic tactics like the simplifier. The code of the package, which will
-  be described in the following section, will mostly treat attributes as a
-  black box and just forward them to other functions for storing theorems in
-  local theories.  The implementation of the function @{ML add_inductive in
-  SimpleInductivePackage} for external invocation of the package is quite
-  simple. Essentially, it just parses the introduction rules and then passes
-  them on to @{ML add_inductive_i in SimpleInductivePackage}:
-
-  @{ML_chunk [display] add_inductive}
-
-  For parsing and type checking the introduction rules, we use the function
-  
-  @{ML [display] "Specification.read_specification:
-  (Binding.binding * string option * mixfix) list ->  (*{variables}*)
-  (Attrib.binding * string list) list ->  (*{rules}*)
-  local_theory ->
-  (((Binding.binding * typ) * mixfix) list *
-   (Attrib.binding * term list) list) *
-  local_theory"}
-*}
+ML {* Specification.read_spec *}
 
 text {*
-  During parsing, both predicates and parameters are treated as variables, so
-  the lists \verb!preds_syn! and \verb!params_syn! are just appended
-  before being passed to @{ML read_spec in Specification}. Note that the format
-  for rules supported by @{ML read_spec in Specification} is more general than
-  what is required for our package. It allows several rules to be associated
-  with one name, and the list of rules can be partitioned into several
-  sublists. In order for the list \verb!intro_srcs! of introduction rules
-  to be acceptable as an input for @{ML read_spec in Specification}, we first
-  have to turn it into a list of singleton lists. This transformation
-  has to be reversed later on by applying the function
-  @{ML [display] "the_single: 'a list -> 'a"}
-  to the list \verb!specs! containing the parsed introduction rules.
-  The function @{ML read_spec in Specification} also returns the list \verb!vars!
-  of predicates and parameters that contains the inferred types as well.
-  This list has to be chopped into the two lists \verb!preds_syn'! and
-  \verb!params_syn'! for predicates and parameters, respectively.
-  All variables occurring in a rule but not in the list of variables passed to
-  @{ML read_spec in Specification} will be bound by a meta-level universal
-  quantifier.
-*}
-
-text {*
-  Finally, @{ML read_specification in Specification} also returns another local theory,
-  but we can safely discard it. As an example, let us look at how we can use this
-  function to parse the introduction rules of the @{text trcl} predicate:
-
-  @{ML_response [display]
-"Specification.read_specification
-  [(Binding.name \"trcl\", NONE, NoSyn),
-   (Binding.name \"r\", SOME \"'a \<Rightarrow> 'a \<Rightarrow> bool\", NoSyn)]
-  [((Binding.name \"base\", []), [\"trcl r x x\"]),
-   ((Binding.name \"step\", []), [\"trcl r x y \<Longrightarrow> r y z \<Longrightarrow> trcl r x z\"])]
-  @{context}"
-"((\<dots>,
-  [(\<dots>,
-    [Const (\"all\", \<dots>) $ Abs (\"x\", TFree (\"'a\", \<dots>),
-       Const (\"Trueprop\", \<dots>) $
-         (Free (\"trcl\", \<dots>) $ Free (\"r\", \<dots>) $ Bound 0 $ Bound 0))]),
-   (\<dots>,
-    [Const (\"all\", \<dots>) $ Abs (\"x\", TFree (\"'a\", \<dots>),
-       Const (\"all\", \<dots>) $ Abs (\"y\", TFree (\"'a\", \<dots>),
-         Const (\"all\", \<dots>) $ Abs (\"z\", TFree (\"'a\", \<dots>),
-           Const (\"==>\", \<dots>) $
-             (Const (\"Trueprop\", \<dots>) $
-               (Free (\"trcl\", \<dots>) $ Free (\"r\", \<dots>) $ Bound 2 $ Bound 1)) $
-             (Const (\"==>\", \<dots>) $ \<dots> $ \<dots>))))])]),
- \<dots>)
-: (((Binding.binding * typ) * mixfix) list *
-   (Attrib.binding * term list) list) * local_theory"}
-
-  In the list of variables passed to @{ML read_specification in Specification}, we have
-  used the mixfix annotation @{ML NoSyn} to indicate that we do not want to associate any
-  mixfix syntax with the variable. Moreover, we have only specified the type of \texttt{r},
-  whereas the type of \texttt{trcl} is computed using type inference.
-  The local variables \texttt{x}, \texttt{y} and \texttt{z} of the introduction rules
-  are turned into bound variables with the de Bruijn indices,
-  whereas \texttt{trcl} and \texttt{r} remain free variables.
-
-*}
-
-text {*
-
-  \paragraph{Parsers for theory syntax}
-
-  Although the function @{ML add_inductive in SimpleInductivePackage} parses terms and types, it still
-  cannot be used to invoke the package directly from within a theory document.
-  In order to do this, we have to write another parser. Before we describe
-  the process of writing parsers for theory syntax in more detail, we first
-  show some examples of how we would like to use the inductive definition
-  package.
-
-
-  The definition of the transitive closure should look as follows:
-*}
-
-ML {* SpecParse.opt_thm_name *}
-
-text {*
-
-  A proposition can be parsed using the function @{ML prop in OuterParse}.
-  Essentially, a proposition is just a string or an identifier, but using the
-  specific parser function @{ML prop in OuterParse} leads to more instructive
-  error messages, since the parser will complain that a proposition was expected
-  when something else than a string or identifier is found.
-  An optional locale target specification of the form \isa{(\isacommand{in}\ $\ldots$)}
-  can be parsed using @{ML opt_target in OuterParse}.
-  The lists of names of the predicates and parameters, together with optional
-  types and syntax, are parsed using the functions @{ML "fixes" in OuterParse}
-  and @{ML for_fixes in OuterParse}, respectively.
-  In addition, the following function from @{ML_struct SpecParse} for parsing
-  an optional theorem name and attribute, followed by a delimiter, will be useful:
-  
-  \begin{table}
-  @{ML "opt_thm_name:
-  string -> Attrib.binding parser" in SpecParse}
-  \end{table}
-
-  We now have all the necessary tools to write the parser for our
-  \isa{\isacommand{simple{\isacharunderscore}inductive}} command:
-  
- 
-  Once all arguments of the command have been parsed, we apply the function
-  @{ML add_inductive in SimpleInductivePackage}, which yields a local theory
-  transformer of type @{ML_type "local_theory -> local_theory"}. Commands in
-  Isabelle/Isar are realized by transition transformers of type
-  @{ML_type [display] "Toplevel.transition -> Toplevel.transition"}
-  We can turn a local theory transformer into a transition transformer by using
-  the function
-
-  @{ML [display] "Toplevel.local_theory : string option ->
-  (local_theory -> local_theory) ->
-  Toplevel.transition -> Toplevel.transition"}
- 
-  which, apart from the local theory transformer, takes an optional name of a locale
-  to be used as a basis for the local theory. 
-
-  (FIXME : needs to be adjusted to new parser type)
-
-  {\it
-  The whole parser for our command has type
-  @{text [display] "OuterLex.token list ->
-  (Toplevel.transition -> Toplevel.transition) * OuterLex.token list"}
-  which is abbreviated by @{text OuterSyntax.parser_fn}. The new command can be added
-  to the system via the function
-  @{text [display] "OuterSyntax.command :
-  string -> string -> OuterKeyword.T -> OuterSyntax.parser_fn -> unit"}
-  which imperatively updates the parser table behind the scenes. }
-
-  In addition to the parser, this
-  function takes two strings representing the name of the command and a short description,
-  as well as an element of type @{ML_type OuterKeyword.T} describing which \emph{kind} of
-  command we intend to add. Since we want to add a command for declaring new concepts,
-  we choose the kind @{ML "OuterKeyword.thy_decl"}. Other kinds include
-  @{ML "OuterKeyword.thy_goal"}, which is similar to @{ML thy_decl in OuterKeyword},
-  but requires the user to prove a goal before making the declaration, or
-  @{ML "OuterKeyword.diag"}, which corresponds to a purely diagnostic command that does
-  not change the context. For example, the @{ML thy_goal in OuterKeyword} kind is used
-  by the \isa{\isacommand{function}} command \cite{Krauss-IJCAR06}, which requires the user
-  to prove that a given set of equations is non-overlapping and covers all cases. The kind
-  of the command should be chosen with care, since selecting the wrong one can cause strange
-  behaviour of the user interface, such as failure of the undo mechanism.
-*}
-
-text {*
-  Note that the @{text trcl} predicate has two different kinds of parameters: the
-  first parameter @{text R} stays \emph{fixed} throughout the definition, whereas
-  the second and third parameter changes in the ``recursive call''. This will
-  become important later on when we deal with fixed parameters and locales. 
-
-
- 
-  The purpose of the package we show next is that the user just specifies the
-  inductive predicate by stating some introduction rules and then the packages
-  makes the equivalent definition and derives from it the needed properties.
-*}
-
-text {*
-  (FIXME: perhaps move somewhere else)
-
-  The point of these examples is to get a feeling what the automatic proofs 
-  should do in order to solve all inductive definitions we throw at them. For this 
-  it is instructive to look at the general construction principle 
-  of inductive definitions, which we shall do in the next section.
-
-  The code of the inductive package falls roughly in tree parts: the first
-  deals with the definitions, the second with the induction principles and 
-  the third with the introduction rules. 
-  
+  Once we have the input data as some internal datastructure, we call
+  the function @{ML add_inductive}. This function does the  heavy duty 
+  lifting in the package: it generates definitions for the 
+  predicates and derives from them corresponding induction principles and 
+  introduction rules. The description of this function will span over
+  the next two sections.
 *}
 (*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_Prelims.thy	Wed Apr 01 12:29:10 2009 +0100
+++ b/ProgTutorial/Package/Ind_Prelims.thy	Wed Apr 01 15:42:47 2009 +0100
@@ -27,7 +27,8 @@
   @{prop[mode=Rule] "R x y \<Longrightarrow> trcl R y z \<Longrightarrow> trcl R x z"}
   \end{center}
 
-  The package has to make an appropriate definition. Since an inductively
+  The package has to make an appropriate definition for @{term "trcl"}. 
+  Since an inductively
   defined predicate is the least predicate closed under a collection of
   introduction rules, the predicate @{text "trcl R x y"} can be defined so
   that it holds if and only if @{text "P x y"} holds for every predicate
--- a/ProgTutorial/ROOT.ML	Wed Apr 01 12:29:10 2009 +0100
+++ b/ProgTutorial/ROOT.ML	Wed Apr 01 15:42:47 2009 +0100
@@ -13,6 +13,7 @@
 use_thy "Package/Ind_Interface";
 use_thy "Package/Ind_General_Scheme";
 use_thy "Package/Ind_Code";
+use_thy "Package/Ind_Extensions";
 
 use_thy "Appendix";
 use_thy "Recipes/Antiquotes";
Binary file progtutorial.pdf has changed