--- a/ProgTutorial/Base.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Base.thy Thu Apr 02 12:19:11 2009 +0100
@@ -30,6 +30,11 @@
Toplevel.proof (Proof.map_context (Context.proof_map
(ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> inherit_env_prf)))
+val _ =
+ OuterSyntax.command "ML_val" "diagnostic ML text"
+ (OuterKeyword.tag "TutorialML" OuterKeyword.diag)
+ (OuterParse.ML_source >> IsarCmd.ml_diag true);
+
*}
(*
ML {*
--- a/ProgTutorial/FirstSteps.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/FirstSteps.thy Thu Apr 02 12:19:11 2009 +0100
@@ -560,7 +560,7 @@
\begin{isabelle}
\isacommand{thm}~@{text "TrueConj_def"}\\
- @{text "> "}@{thm TrueConj_def}
+ @{text "> "}~@{thm TrueConj_def}
\end{isabelle}
(FIXME give a better example why bindings are important; maybe
--- a/ProgTutorial/Package/Ind_Code.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Package/Ind_Code.thy Thu Apr 02 12:19:11 2009 +0100
@@ -1,5 +1,5 @@
theory Ind_Code
-imports "../Base" "../FirstSteps" Ind_General_Scheme
+imports "../Base" "../FirstSteps" Ind_General_Scheme
begin
section {* The Gory Details\label{sec:code} *}
@@ -249,8 +249,8 @@
*}
lemma
- fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
- shows "\<forall>x y z. P x y z \<Longrightarrow> True"
+fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
+shows "\<forall>x y z. P x y z \<Longrightarrow> True"
apply (tactic {*
inst_spec_tac [@{cterm "a::nat"},@{cterm "b::nat"},@{cterm "c::nat"}] 1 *})
txt {*
@@ -495,14 +495,14 @@
*}
lemma all_elims_test:
- fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
- shows "\<forall>x y z. P x y z" sorry
+fixes P::"nat \<Rightarrow> nat \<Rightarrow> nat \<Rightarrow> bool"
+shows "\<forall>x y z. P x y z" sorry
lemma imp_elims_test:
- shows "A \<longrightarrow> B \<longrightarrow> C" sorry
+shows "A \<longrightarrow> B \<longrightarrow> C" sorry
lemma imp_elims_test':
- shows "A" "B" sorry
+shows "A" "B" sorry
text {*
The function @{ML all_elims} takes a list of (certified) terms and instantiates
@@ -535,7 +535,7 @@
*}
lemma fresh_Lam:
- shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
(*<*)oops(*>*)
text {*
@@ -556,7 +556,7 @@
(*<*)
lemma fresh_Lam:
- shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
(*>*)
apply(tactic {* expand_tac @{thms fresh_def} *})
@@ -652,7 +652,7 @@
(*<*)
lemma fresh_Lam:
- shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
apply(tactic {* expand_tac @{thms fresh_def} *})
(*>*)
apply(tactic {* chop_test_tac [fresh_pred] fresh_rules @{context} *})
@@ -714,7 +714,7 @@
(*<*)
lemma fresh_Lam:
- shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
apply(tactic {* expand_tac @{thms fresh_def} *})
(*>*)
apply(tactic {* apply_prem_tac 3 [fresh_pred] fresh_rules @{context} *})
@@ -784,7 +784,7 @@
*}
lemma fresh_Lam:
- shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+shows "\<And>a b t. \<lbrakk>a \<noteq> b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
apply(tactic {* expand_tac @{thms fresh_def} *})
apply(tactic {* prove_intro_tac 3 [fresh_pred] fresh_rules @{context} 1 *})
done
@@ -798,7 +798,7 @@
*}
lemma accpartI:
- shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
apply(tactic {* expand_tac @{thms accpart_def} *})
apply(tactic {* chop_test_tac [acc_pred] acc_rules @{context} *})
apply(tactic {* apply_prem_tac 0 [acc_pred] acc_rules @{context} *})
@@ -879,7 +879,7 @@
*}
lemma accpartI:
- shows "\<And>x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+shows "\<And>R x. (\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
apply(tactic {* expand_tac @{thms accpart_def} *})
apply(tactic {* prove_intro_tac 0 [acc_pred] acc_rules @{context} 1 *})
done
@@ -901,15 +901,15 @@
*}
lemma even0_intro:
- shows "even 0"
+shows "even 0"
by (tactic {* intro_tac eo_defs eo_rules eo_preds 0 @{context} *})
lemma evenS_intro:
- shows "\<And>m. odd m \<Longrightarrow> even (Suc m)"
+shows "\<And>m. odd m \<Longrightarrow> even (Suc m)"
by (tactic {* intro_tac eo_defs eo_rules eo_preds 1 @{context} *})
lemma fresh_App:
- shows "\<And>a t s. \<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+shows "\<And>a t s. \<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
by (tactic {*
intro_tac @{thms fresh_def} fresh_rules [fresh_pred] 1 @{context} *})
@@ -1028,141 +1028,8 @@
indicates that the first premise of the induction principle (namely
the predicate over which the induction proceeds) is eliminated.
- (FIXME: What does @{ML Induct.induct_pred} do?)
-
- (FIXME: why the mut-name?)
-
- (FIXME: What does @{ML Binding.qualify} do?)
-
-
This completes all the code and fits in with the ``front end'' described
- in Section \ref{sec:interface}
-*}
-
-
-ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
-let
- val ((pred_specs', rule_specs'), _) =
- Specification.read_spec pred_specs rule_specs lthy
-in
- add_inductive pred_specs' rule_specs' lthy
-end*}
-
-ML{*val spec_parser =
- OuterParse.fixes --
- Scan.optional
- (OuterParse.$$$ "where" |--
- OuterParse.!!!
- (OuterParse.enum1 "|"
- (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
-
-ML{*val specification =
- spec_parser >>
- (fn ((pred_specs), rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
-
-ML{*val _ = OuterSyntax.local_theory "simple_inductive"
- "define inductive predicates"
- OuterKeyword.thy_decl specification*}
-
-
-section {* Extensions (TBD) *}
-
-text {*
- Things to include at the end:
-
- \begin{itemize}
- \item include the code for the parameters
- \item say something about add-inductive-i to return
- the rules
- \item say that the induction principle is weaker (weaker than
- what the standard inductive package generates)
- \item say that no conformity test is done
- \item exercise about strong induction principles
- \item exercise about the test for the intro rules
- \end{itemize}
-
+ in Section~\ref{sec:interface}.\footnote{FIXME: Describe @{ML Induct.induct_pred}. Why the mut-name?
+ What does @{ML Binding.qualify} do?}
*}
-
-simple_inductive
- Even and Odd
-where
- Even0: "Even 0"
-| EvenS: "Odd n \<Longrightarrow> Even (Suc n)"
-| OddS: "Even n \<Longrightarrow> Odd (Suc n)"
-
-thm Even0
-thm EvenS
-thm OddS
-
-thm Even_Odd.intros
-thm Even.induct
-thm Odd.induct
-
-thm Even_def
-thm Odd_def
-
-text {*
- Second, we want that the user can specify fixed parameters.
- Remember in the previous section we stated that the user can give the
- specification for the transitive closure of a relation @{text R} as
-*}
-
-simple_inductive
- trcl\<iota>\<iota> :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
- base: "trcl\<iota>\<iota> R x x"
-| step: "trcl\<iota>\<iota> R x y \<Longrightarrow> R y z \<Longrightarrow> trcl\<iota>\<iota> R x z"
-
-text {*
- Note that there is no locale given in this specification---the parameter
- @{text "R"} therefore needs to be included explicitly in @{term trcl\<iota>\<iota>}, but
- stays fixed throughout the specification. The problem with this way of
- stating the specification for the transitive closure is that it derives the
- following induction principle.
-
- \begin{center}\small
- \mprset{flushleft}
- \mbox{\inferrule{
- @{thm_style prem1 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
- @{thm_style prem2 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
- @{thm_style prem3 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}
- {@{thm_style concl trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}}
- \end{center}
-
- But this does not correspond to the induction principle we derived by hand, which
- was
-
- \begin{center}\small
- \mprset{flushleft}
- \mbox{\inferrule{
- @{thm_style prem1 trcl_induct[no_vars]}\\\\
- @{thm_style prem2 trcl_induct[no_vars]}\\\\
- @{thm_style prem3 trcl_induct[no_vars]}}
- {@{thm_style concl trcl_induct[no_vars]}}}
- \end{center}
-
- The difference is that in the one derived by hand the relation @{term R} is not
- a parameter of the proposition @{term P} to be proved and it is also not universally
- qunatified in the second and third premise. The point is that the parameter @{term R}
- stays fixed thoughout the definition and we do not want to regard it as an ``ordinary''
- argument of the transitive closure, but one that can be freely instantiated.
- In order to recognise such parameters, we have to extend the specification
- to include a mechanism to state fixed parameters. The user should be able
- to write
-
-*}
-
-(*
-simple_inductive
- trcl'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
- base: "trcl'' R x x"
-| step: "trcl'' R x y \<Longrightarrow> R y z \<Longrightarrow> trcl'' R x z"
-
-simple_inductive
- accpart'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
- accpartI: "(\<And>y. R y x \<Longrightarrow> accpart'' R y) \<Longrightarrow> accpart'' R x"
-*)
-
-end
+(*<*)end(*>*)
\ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/ProgTutorial/Package/Ind_Extensions.thy Thu Apr 02 12:19:11 2009 +0100
@@ -0,0 +1,186 @@
+theory Ind_Extensions
+imports "../Base" Simple_Inductive_Package
+begin
+
+section {* Extensions of the Package (TBD) *}
+
+(*
+text {*
+ In order to add a new inductive predicate to a theory with the help of our
+ package, the user must \emph{invoke} it. For every package, there are
+ essentially two different ways of invoking it, which we will refer to as
+ \emph{external} and \emph{internal}. By external invocation we mean that the
+ package is called from within a theory document. In this case, the
+ specification of the inductive predicate, including type annotations and
+ introduction rules, are given as strings by the user. Before the package can
+ actually make the definition, the type and introduction rules have to be
+ parsed. In contrast, internal invocation means that the package is called by
+ some other package. For example, the function definition package
+ calls the inductive definition package to define the
+ graph of the function. However, it is not a good idea for the function
+ definition package to pass the introduction rules for the function graph to
+ the inductive definition package as strings.
+
+In this case, it is better to
+ directly pass the rules to the package as a list of terms, which is more
+ robust than handling strings that are lacking the additional structure of
+ terms. These two ways of invoking the package are reflected in its ML
+ programming interface, which consists of two functions:
+*}
+*)
+
+text {*
+ Things to include at the end:
+
+ \begin{itemize}
+ \item include the code for the parameters
+ \item say something about add-inductive to return
+ the rules
+ \item say something about the two interfaces for calling packages
+ \end{itemize}
+
+*}
+
+(*
+simple_inductive
+ Even and Odd
+where
+ Even0: "Even 0"
+| EvenS: "Odd n \<Longrightarrow> Even (Suc n)"
+| OddS: "Even n \<Longrightarrow> Odd (Suc n)"
+
+thm Even0
+thm EvenS
+thm OddS
+
+thm Even_Odd.intros
+thm Even.induct
+thm Odd.induct
+
+thm Even_def
+thm Odd_def
+*)
+
+(*
+text {*
+ Second, we want that the user can specify fixed parameters.
+ Remember in the previous section we stated that the user can give the
+ specification for the transitive closure of a relation @{text R} as
+*}
+
+simple_inductive
+ trcl :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ base: "trcl R x x"
+| step: "trcl R x y \<Longrightarrow> R y z \<Longrightarrow> trcl R x z"
+*)
+
+(*
+text {*
+ Note that there is no locale given in this specification---the parameter
+ @{text "R"} therefore needs to be included explicitly in @{term trcl\<iota>\<iota>}, but
+ stays fixed throughout the specification. The problem with this way of
+ stating the specification for the transitive closure is that it derives the
+ following induction principle.
+
+ \begin{center}\small
+ \mprset{flushleft}
+ \mbox{\inferrule{
+ @{thm_style prem1 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
+ @{thm_style prem2 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}\\\\
+ @{thm_style prem3 trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}
+ {@{thm_style concl trcl\<iota>\<iota>.induct[where P=P, where z=R, where za=x, where zb=y]}}}
+ \end{center}
+
+ But this does not correspond to the induction principle we derived by hand, which
+ was
+
+
+ %\begin{center}\small
+ %\mprset{flushleft}
+ %\mbox{\inferrule{
+ % @ { thm_style prem1 trcl_induct[no_vars]}\\\\
+ % @ { thm_style prem2 trcl_induct[no_vars]}\\\\
+ % @ { thm_style prem3 trcl_induct[no_vars]}}
+ % {@ { thm_style concl trcl_induct[no_vars]}}}
+ %\end{center}
+
+ The difference is that in the one derived by hand the relation @{term R} is not
+ a parameter of the proposition @{term P} to be proved and it is also not universally
+ qunatified in the second and third premise. The point is that the parameter @{term R}
+ stays fixed thoughout the definition and we do not want to regard it as an ``ordinary''
+ argument of the transitive closure, but one that can be freely instantiated.
+ In order to recognise such parameters, we have to extend the specification
+ to include a mechanism to state fixed parameters. The user should be able
+ to write
+
+*}
+*)
+(*
+simple_inductive
+ trcl'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ base: "trcl'' R x x"
+| step: "trcl'' R x y \<Longrightarrow> R y z \<Longrightarrow> trcl'' R x z"
+
+simple_inductive
+ accpart'' for R :: "'a \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ accpartI: "(\<And>y. R y x \<Longrightarrow> accpart'' R y) \<Longrightarrow> accpart'' R x"
+*)
+
+text {*
+ \begin{exercise}
+ In Section~\ref{sec:nutshell} we required that introduction rules must be of the
+ form
+
+ \begin{isabelle}
+ @{text "rule ::= \<And>xs. As \<Longrightarrow> (\<And>ys. Bs \<Longrightarrow> pred ss)\<^isup>* \<Longrightarrow> pred ts"}
+ \end{isabelle}
+
+ where the @{text "As"} and @{text "Bs"} can be any collection of formulae
+ not containing the @{text "preds"}. This requirement is important,
+ because if violated, the theory behind the inductive package does not work
+ and also the proofs break. Write code that tests whether the introduction
+ rules given by the user fit into the scheme described above. Hint: It
+ is not important in which order the premises ar given; the
+ @{text "As"} and @{text "(\<And>ys. Bs \<Longrightarrow> pred ss)"} premises can occur
+ in any order.
+ \end{exercise}
+*}
+
+text_raw {*
+ \begin{exercise}
+ If you define @{text even} and @{text odd} with the standard inductive
+ package
+ \begin{isabelle}
+*}
+inductive
+ even_2 and odd_2
+where
+ even0_2: "even_2 0"
+| evenS_2: "odd_2 m \<Longrightarrow> even_2 (Suc m)"
+| oddS_2: "even_2 m \<Longrightarrow> odd_2 (Suc m)"
+
+text_raw{*
+ \end{isabelle}
+
+ you will see that the generated induction principle for @{text "even'"} (namely
+ @{text "even_2_odd_2.inducts"} has the additional assumptions
+ @{prop "odd_2 m"} and @{prop "even_2 m"} in the recursive cases. These additional
+ assumptions can sometimes make ``life easier'' in proofs. Since
+ more assumptions can be made when proving properties, these induction principles
+ are called strong inductions principles. However, it is the case that the
+ ``weak'' induction principles imply the ``strong'' ones. Hint: Prove a property
+ taking a pair (or tuple in case of more than one predicate) as argument: the
+ property that you originally want to prove and the predicate(s) over which the
+ induction proceeds.
+
+ Write code that automates the derivation of the strong induction
+ principles from the weak ones.
+ \end{exercise}
+*}
+
+
+
+(*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_General_Scheme.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Package/Ind_General_Scheme.thy Thu Apr 02 12:19:11 2009 +0100
@@ -1,23 +1,10 @@
theory Ind_General_Scheme
-imports Simple_Inductive_Package Ind_Prelims
+imports Ind_Interface
begin
-(*<*)
-datatype trm =
- Var "string"
-| App "trm" "trm"
-| Lam "string" "trm"
+section {* The Code in a Nutshell\label{sec:nutshell} *}
-simple_inductive
- fresh :: "string \<Rightarrow> trm \<Rightarrow> bool"
-where
- fresh_var: "a\<noteq>b \<Longrightarrow> fresh a (Var b)"
-| fresh_app: "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
-| fresh_lam1: "fresh a (Lam a t)"
-| fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
-(*>*)
-section {* The Code in a Nutshell\label{sec:nutshell} *}
text {*
The inductive package will generate the reasoning infrastructure for
--- a/ProgTutorial/Package/Ind_Interface.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Package/Ind_Interface.thy Thu Apr 02 12:19:11 2009 +0100
@@ -1,15 +1,105 @@
theory Ind_Interface
-imports "../Base" "../Parsing" Ind_Prelims Simple_Inductive_Package
+imports "../Base" "../Parsing" Simple_Inductive_Package
begin
section {* Parsing and Typing the Specification\label{sec:interface} *}
+text_raw {*
+\begin{figure}[p]
+\begin{boxedminipage}{\textwidth}
+\begin{isabelle}
+*}
+simple_inductive
+ trcl :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ base: "trcl R x x"
+| step: "trcl R x y \<Longrightarrow> R y z \<Longrightarrow> trcl R x z"
+
+simple_inductive
+ even and odd
+where
+ even0: "even 0"
+| evenS: "odd n \<Longrightarrow> even (Suc n)"
+| oddS: "even n \<Longrightarrow> odd (Suc n)"
+
+simple_inductive
+ accpart :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> bool"
+where
+ accpartI: "(\<And>y. R y x \<Longrightarrow> accpart R y) \<Longrightarrow> accpart R x"
+
+(*<*)
+datatype trm =
+ Var "string"
+| App "trm" "trm"
+| Lam "string" "trm"
+(*>*)
+
+simple_inductive
+ fresh :: "string \<Rightarrow> trm \<Rightarrow> bool"
+where
+ fresh_var: "a\<noteq>b \<Longrightarrow> fresh a (Var b)"
+| fresh_app: "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"
+| fresh_lam1: "fresh a (Lam a t)"
+| fresh_lam2: "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"
+text_raw {*
+\end{isabelle}
+\end{boxedminipage}
+\caption{Specification given by the user for the inductive predicates
+@{term "trcl"}, @{term "even"} and @{term "odd"}, @{term "accpart"} and
+@{text "fresh"}.\label{fig:specs}}
+\end{figure}
+*}
+
+text {*
+ \begin{figure}[p]
+ \begin{boxedminipage}{\textwidth}
+ \begin{isabelle}
+ \railnontermfont{\rmfamily\itshape}
+ \railterm{simpleinductive,where,for}
+ \railalias{simpleinductive}{\simpleinductive{}}
+ \railalias{where}{\isacommand{where}}
+ \railalias{for}{\isacommand{for}}
+ \begin{rail}
+ simpleinductive target?\\ fixes
+ (where (thmdecl? prop + '|'))?
+ ;
+ \end{rail}
+ \end{isabelle}
+ \end{boxedminipage}
+ \caption{A railroad diagram describing the syntax of \simpleinductive{}.
+ The \emph{target} indicates an optional locale; the \emph{fixes} are an
+ \isacommand{and}-separated list of names for the inductive predicates (they
+ can also contain typing- and syntax annotations); \emph{prop} stands for a
+ introduction rule with an optional theorem declaration (\emph{thmdecl}).
+ \label{fig:railroad}}
+ \end{figure}
+*}
+
text {*
- To be able to write down the specification in Isabelle, we have to introduce
+ To be able to write down the specifications or inductive predicates, we have
+ to introduce
a new command (see Section~\ref{sec:newcommand}). As the keyword for the
- new command we chose \simpleinductive{}. The ``infrastructure'' already
- provides an ``advanced'' feature for this command. For example we will
- be able to declare the locale
+ new command we chose \simpleinductive{}. Examples of specifications we expect
+ the user gives for the inductive predicates from the previous section are
+ shown in Figure~\ref{fig:specs}. The general syntax we will parse is specified
+ in the railroad diagram shown in Figure~\ref{fig:railroad}. This diagram more
+ or less translates directly into the parser:
+*}
+
+ML{*val spec_parser =
+ OuterParse.fixes --
+ Scan.optional
+ (OuterParse.$$$ "where" |--
+ OuterParse.!!!
+ (OuterParse.enum1 "|"
+ (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
+
+text {*
+ which we explained in Section~\ref{sec:parsingspecs}. However, if you look
+ closely, there is no code for parsing the target given optionally after the
+ keyword. This is an ``advanced'' feature which we will inherit for ``free''
+ from the infrastructure on which we shall build the package. The target
+ stands for a locale and allows us to specify
*}
locale rel =
@@ -32,46 +122,13 @@
accpartI: "(\<And>y. R y x \<Longrightarrow> accpart' y) \<Longrightarrow> accpart' x"
text {*
- \begin{figure}[t]
- \begin{isabelle}
- \railnontermfont{\rmfamily\itshape}
- \railterm{simpleinductive,where,for}
- \railalias{simpleinductive}{\simpleinductive{}}
- \railalias{where}{\isacommand{where}}
- \railalias{for}{\isacommand{for}}
- \begin{rail}
- simpleinductive target?\\ fixes
- (where (thmdecl? prop + '|'))?
- ;
- \end{rail}
- \end{isabelle}
- \caption{A railroad diagram describing the syntax of \simpleinductive{}.
- The \emph{target} indicates an optional locale; the \emph{fixes} are an
- \isacommand{and}-separated list of names for the inductive predicates (they
- can also contain typing- and syntax anotations); \emph{prop} stands for a
- introduction rule with an optional theorem declaration (\emph{thmdecl}).
- \label{fig:railroad}}
- \end{figure}
-*}
+ Note that in these definitions the parameter @{text R}, standing for the
+ relation, is left implicit. For the moment we will ignore this kind
+ of implicit parameters and rely on the fact that the infrastructure will
+ deal with them. Later, however, we will come back to them.
-text {*
- This leads directly to the railroad diagram shown in
- Figure~\ref{fig:railroad} for the syntax of \simpleinductive{}. This diagram
- more or less translates directly into the parser:
-*}
-
-ML %linenosgray{*val spec_parser =
- OuterParse.fixes --
- Scan.optional
- (OuterParse.$$$ "where" |--
- OuterParse.!!!
- (OuterParse.enum1 "|"
- (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
-
-text {*
- which we described in Section~\ref{sec:parsingspecs}. If we feed into the
- parser the string (which corresponds to our definition of @{term even} and
- @{term odd}):
+ If we feed into the parser the string that corresponds to our definition
+ of @{term even} and @{term odd}
@{ML_response [display,gray]
"let
@@ -88,319 +145,68 @@
[((even0,\<dots>), \"\\^E\\^Ftoken\\^Eeven 0\\^E\\^F\\^E\"),
((evenS,\<dots>), \"\\^E\\^Ftoken\\^Eodd n \<Longrightarrow> even (Suc n)\\^E\\^F\\^E\"),
((oddS,\<dots>), \"\\^E\\^Ftoken\\^Eeven n \<Longrightarrow> odd (Suc n)\\^E\\^F\\^E\")]), [])"}
+
+ then we get back the specifications of the predicates (with type and syntax annotations),
+ and specifications of the introduction rules. This is all the information we
+ need for calling the package and setting up the keyword. The latter is
+ done in Lines 5 to 7 in the code below.
*}
-
-text {*
- then we get back a locale (in this case @{ML NONE}), the predicates (with type
- and syntax annotations), the parameters (similar as the predicates) and
- the specifications of the introduction rules.
-
-
-
- This is all the information we
- need for calling the package and setting up the keyword. The latter is
- done in Lines 6 and 7 in the code below.
-
- @{ML_chunk [display,gray,linenos] syntax}
-
- We call @{ML OuterSyntax.command} with the kind-indicator @{ML
- OuterKeyword.thy_decl} since the package does not need to open up any goal
- state (see Section~\ref{sec:newcommand}). Note that the predicates and
- parameters are at the moment only some ``naked'' variables: they have no
- type yet (even if we annotate them with types) and they are also no defined
- constants yet (which the predicates will eventually be). In Lines 1 to 4 we
- gather the information from the parser to be processed further. The locale
- is passed as argument to the function @{ML
- Toplevel.local_theory}.\footnote{FIXME Is this already described?} The other
- arguments, i.e.~the predicates, parameters and intro rule specifications,
- are passed to the function @{ML add_inductive in SimpleInductivePackage}
- (Line 4).
-
- We now come to the second subtask of the package, namely transforming the
- parser output into some internal datastructures that can be processed further.
- Remember that at the moment the introduction rules are just strings, and even
- if the predicates and parameters can contain some typing annotations, they
- are not yet in any way reflected in the introduction rules. So the task of
- @{ML add_inductive in SimpleInductivePackage} is to transform the strings
- into properly typed terms. For this it can use the function
- @{ML read_spec in Specification}. This function takes some constants
- with possible typing annotations and some rule specifications and attempts to
- find a type according to the given type constraints and the type constraints
- by the surrounding (local theory). However this function is a bit
- too general for our purposes: we want that each introduction rule has only
- name (for example @{text even0} or @{text evenS}), if a name is given at all.
- The function @{ML read_spec in Specification} however allows more
- than one rule. Since it is quite convenient to rely on this function (instead of
- building your own) we just quick ly write a wrapper function that translates
- between our specific format and the general format expected by
- @{ML read_spec in Specification}. The code of this wrapper is as follows:
-
- @{ML_chunk [display,gray,linenos] read_specification}
-
- It takes a list of constants, a list of rule specifications and a local theory
- as input. Does the transformation of the rule specifications in Line 3; calls
- the function and transforms the now typed rule specifications back into our
- format and returns the type parameter and typed rule specifications.
-
-
- @{ML_chunk [display,gray,linenos] add_inductive}
-
+(*<*)ML %linenos{*fun add_inductive pred_specs rule_specs lthy = lthy
+fun add_inductive_cmd pred_specs rule_specs lthy =
+let
+ val ((pred_specs', rule_specs'), _) =
+ Specification.read_spec pred_specs rule_specs lthy
+in
+ add_inductive pred_specs' rule_specs' lthy
+end*}(*>*)
+ML_val %linenosgray{*val specification =
+ spec_parser >>
+ (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)
- In order to add a new inductive predicate to a theory with the help of our
- package, the user must \emph{invoke} it. For every package, there are
- essentially two different ways of invoking it, which we will refer to as
- \emph{external} and \emph{internal}. By external invocation we mean that the
- package is called from within a theory document. In this case, the
- specification of the inductive predicate, including type annotations and
- introduction rules, are given as strings by the user. Before the package can
- actually make the definition, the type and introduction rules have to be
- parsed. In contrast, internal invocation means that the package is called by
- some other package. For example, the function definition package
- calls the inductive definition package to define the
- graph of the function. However, it is not a good idea for the function
- definition package to pass the introduction rules for the function graph to
- the inductive definition package as strings. In this case, it is better to
- directly pass the rules to the package as a list of terms, which is more
- robust than handling strings that are lacking the additional structure of
- terms. These two ways of invoking the package are reflected in its ML
- programming interface, which consists of two functions:
-
-
- @{ML_chunk [display,gray] SIMPLE_INDUCTIVE_PACKAGE}
-*}
-
-text {*
- (FIXME: explain Binding.binding; Attrib.binding somewhere else)
-
-
- The function for external invocation of the package is called @{ML
- add_inductive in SimpleInductivePackage}, whereas the one for internal
- invocation is called @{ML add_inductive_i in SimpleInductivePackage}. Both
- of these functions take as arguments the names and types of the inductive
- predicates, the names and types of their parameters, the actual introduction
- rules and a \emph{local theory}. They return a local theory containing the
- definition and the induction principle as well introduction rules.
-
- Note that @{ML add_inductive_i in SimpleInductivePackage} expects
- the types of the predicates and parameters to be specified using the
- datatype @{ML_type typ} of Isabelle's logical framework, whereas @{ML
- add_inductive in SimpleInductivePackage} expects them to be given as
- optional strings. If no string is given for a particular predicate or
- parameter, this means that the type should be inferred by the
- package.
-
-
- Additional \emph{mixfix syntax} may be associated with the
- predicates and parameters as well. Note that @{ML add_inductive_i in
- SimpleInductivePackage} does not allow mixfix syntax to be associated with
- parameters, since it can only be used for parsing.\footnote{FIXME: why ist it there then?}
- The names of the
- predicates, parameters and rules are represented by the type @{ML_type
- Binding.binding}. Strings can be turned into elements of the type @{ML_type
- Binding.binding} using the function @{ML [display] "Binding.name : string ->
- Binding.binding"} Each introduction rule is given as a tuple containing its
- name, a list of \emph{attributes} and a logical formula. Note that the type
- @{ML_type Attrib.binding} used in the list of introduction rules is just a
- shorthand for the type @{ML_type "Binding.binding * Attrib.src list"}. The
- function @{ML add_inductive_i in SimpleInductivePackage} expects the formula
- to be specified using the datatype @{ML_type term}, whereas @{ML
- add_inductive in SimpleInductivePackage} expects it to be given as a string.
- An attribute specifies additional actions and transformations that should be
- applied to a theorem, such as storing it in the rule databases used by
- automatic tactics like the simplifier. The code of the package, which will
- be described in the following section, will mostly treat attributes as a
- black box and just forward them to other functions for storing theorems in
- local theories. The implementation of the function @{ML add_inductive in
- SimpleInductivePackage} for external invocation of the package is quite
- simple. Essentially, it just parses the introduction rules and then passes
- them on to @{ML add_inductive_i in SimpleInductivePackage}:
-
- @{ML_chunk [display] add_inductive}
-
- For parsing and type checking the introduction rules, we use the function
-
- @{ML [display] "Specification.read_specification:
- (Binding.binding * string option * mixfix) list -> (*{variables}*)
- (Attrib.binding * string list) list -> (*{rules}*)
- local_theory ->
- (((Binding.binding * typ) * mixfix) list *
- (Attrib.binding * term list) list) *
- local_theory"}
-*}
+val _ = OuterSyntax.local_theory "simple_inductive"
+ "definition of simple inductive predicates"
+ OuterKeyword.thy_decl specification*}
text {*
- During parsing, both predicates and parameters are treated as variables, so
- the lists \verb!preds_syn! and \verb!params_syn! are just appended
- before being passed to @{ML read_spec in Specification}. Note that the format
- for rules supported by @{ML read_spec in Specification} is more general than
- what is required for our package. It allows several rules to be associated
- with one name, and the list of rules can be partitioned into several
- sublists. In order for the list \verb!intro_srcs! of introduction rules
- to be acceptable as an input for @{ML read_spec in Specification}, we first
- have to turn it into a list of singleton lists. This transformation
- has to be reversed later on by applying the function
- @{ML [display] "the_single: 'a list -> 'a"}
- to the list \verb!specs! containing the parsed introduction rules.
- The function @{ML read_spec in Specification} also returns the list \verb!vars!
- of predicates and parameters that contains the inferred types as well.
- This list has to be chopped into the two lists \verb!preds_syn'! and
- \verb!params_syn'! for predicates and parameters, respectively.
- All variables occurring in a rule but not in the list of variables passed to
- @{ML read_spec in Specification} will be bound by a meta-level universal
- quantifier.
+ We call @{ML local_theory in OuterSyntax} with the kind-indicator
+ @{ML thy_decl in OuterKeyword} since the package does not need to open
+ up any proof (see Section~\ref{sec:newcommand}).
+ The auxiliary function @{text specification} in Lines 1 to 3
+ gathers the information from the parser to be processed further
+ by the function @{text "add_inductive_cmd"}, which we describe below.
+
+ Note that the predicates when they come out of the parser are just some
+ ``naked'' strings: they have no type yet (even if we annotate them with
+ types) and they are also not defined constants yet (which the predicates
+ eventually will be). Also the introduction rules are just strings. What we have
+ to do first is to transform the parser's output into some internal
+ datastructures that can be processed further. For this we can use the
+ function @{ML read_spec in Specification}. This function takes some strings
+ (with possible typing annotations) and some rule specifications, and attempts
+ to find a typing according to the given type constraints given by the
+ user and the type constraints by the ``ambient'' theory. It returns
+ the type for the predicates and also returns typed terms for the
+ introduction rules. So at the heart of the function
+ @{text "add_inductive_cmd"} is a call to @{ML read_spec in Specification}.
*}
-text {*
- Finally, @{ML read_specification in Specification} also returns another local theory,
- but we can safely discard it. As an example, let us look at how we can use this
- function to parse the introduction rules of the @{text trcl} predicate:
+ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
+let
+ val ((pred_specs', rule_specs'), _) =
+ Specification.read_spec pred_specs rule_specs lthy
+in
+ add_inductive pred_specs' rule_specs' lthy
+end*}
- @{ML_response [display]
-"Specification.read_specification
- [(Binding.name \"trcl\", NONE, NoSyn),
- (Binding.name \"r\", SOME \"'a \<Rightarrow> 'a \<Rightarrow> bool\", NoSyn)]
- [((Binding.name \"base\", []), [\"trcl r x x\"]),
- ((Binding.name \"step\", []), [\"trcl r x y \<Longrightarrow> r y z \<Longrightarrow> trcl r x z\"])]
- @{context}"
-"((\<dots>,
- [(\<dots>,
- [Const (\"all\", \<dots>) $ Abs (\"x\", TFree (\"'a\", \<dots>),
- Const (\"Trueprop\", \<dots>) $
- (Free (\"trcl\", \<dots>) $ Free (\"r\", \<dots>) $ Bound 0 $ Bound 0))]),
- (\<dots>,
- [Const (\"all\", \<dots>) $ Abs (\"x\", TFree (\"'a\", \<dots>),
- Const (\"all\", \<dots>) $ Abs (\"y\", TFree (\"'a\", \<dots>),
- Const (\"all\", \<dots>) $ Abs (\"z\", TFree (\"'a\", \<dots>),
- Const (\"==>\", \<dots>) $
- (Const (\"Trueprop\", \<dots>) $
- (Free (\"trcl\", \<dots>) $ Free (\"r\", \<dots>) $ Bound 2 $ Bound 1)) $
- (Const (\"==>\", \<dots>) $ \<dots> $ \<dots>))))])]),
- \<dots>)
-: (((Binding.binding * typ) * mixfix) list *
- (Attrib.binding * term list) list) * local_theory"}
-
- In the list of variables passed to @{ML read_specification in Specification}, we have
- used the mixfix annotation @{ML NoSyn} to indicate that we do not want to associate any
- mixfix syntax with the variable. Moreover, we have only specified the type of \texttt{r},
- whereas the type of \texttt{trcl} is computed using type inference.
- The local variables \texttt{x}, \texttt{y} and \texttt{z} of the introduction rules
- are turned into bound variables with the de Bruijn indices,
- whereas \texttt{trcl} and \texttt{r} remain free variables.
-
-*}
-
-text {*
-
- \paragraph{Parsers for theory syntax}
-
- Although the function @{ML add_inductive in SimpleInductivePackage} parses terms and types, it still
- cannot be used to invoke the package directly from within a theory document.
- In order to do this, we have to write another parser. Before we describe
- the process of writing parsers for theory syntax in more detail, we first
- show some examples of how we would like to use the inductive definition
- package.
-
-
- The definition of the transitive closure should look as follows:
-*}
-
-ML {* SpecParse.opt_thm_name *}
+ML {* Specification.read_spec *}
text {*
-
- A proposition can be parsed using the function @{ML prop in OuterParse}.
- Essentially, a proposition is just a string or an identifier, but using the
- specific parser function @{ML prop in OuterParse} leads to more instructive
- error messages, since the parser will complain that a proposition was expected
- when something else than a string or identifier is found.
- An optional locale target specification of the form \isa{(\isacommand{in}\ $\ldots$)}
- can be parsed using @{ML opt_target in OuterParse}.
- The lists of names of the predicates and parameters, together with optional
- types and syntax, are parsed using the functions @{ML "fixes" in OuterParse}
- and @{ML for_fixes in OuterParse}, respectively.
- In addition, the following function from @{ML_struct SpecParse} for parsing
- an optional theorem name and attribute, followed by a delimiter, will be useful:
-
- \begin{table}
- @{ML "opt_thm_name:
- string -> Attrib.binding parser" in SpecParse}
- \end{table}
-
- We now have all the necessary tools to write the parser for our
- \isa{\isacommand{simple{\isacharunderscore}inductive}} command:
-
-
- Once all arguments of the command have been parsed, we apply the function
- @{ML add_inductive in SimpleInductivePackage}, which yields a local theory
- transformer of type @{ML_type "local_theory -> local_theory"}. Commands in
- Isabelle/Isar are realized by transition transformers of type
- @{ML_type [display] "Toplevel.transition -> Toplevel.transition"}
- We can turn a local theory transformer into a transition transformer by using
- the function
-
- @{ML [display] "Toplevel.local_theory : string option ->
- (local_theory -> local_theory) ->
- Toplevel.transition -> Toplevel.transition"}
-
- which, apart from the local theory transformer, takes an optional name of a locale
- to be used as a basis for the local theory.
-
- (FIXME : needs to be adjusted to new parser type)
-
- {\it
- The whole parser for our command has type
- @{text [display] "OuterLex.token list ->
- (Toplevel.transition -> Toplevel.transition) * OuterLex.token list"}
- which is abbreviated by @{text OuterSyntax.parser_fn}. The new command can be added
- to the system via the function
- @{text [display] "OuterSyntax.command :
- string -> string -> OuterKeyword.T -> OuterSyntax.parser_fn -> unit"}
- which imperatively updates the parser table behind the scenes. }
-
- In addition to the parser, this
- function takes two strings representing the name of the command and a short description,
- as well as an element of type @{ML_type OuterKeyword.T} describing which \emph{kind} of
- command we intend to add. Since we want to add a command for declaring new concepts,
- we choose the kind @{ML "OuterKeyword.thy_decl"}. Other kinds include
- @{ML "OuterKeyword.thy_goal"}, which is similar to @{ML thy_decl in OuterKeyword},
- but requires the user to prove a goal before making the declaration, or
- @{ML "OuterKeyword.diag"}, which corresponds to a purely diagnostic command that does
- not change the context. For example, the @{ML thy_goal in OuterKeyword} kind is used
- by the \isa{\isacommand{function}} command \cite{Krauss-IJCAR06}, which requires the user
- to prove that a given set of equations is non-overlapping and covers all cases. The kind
- of the command should be chosen with care, since selecting the wrong one can cause strange
- behaviour of the user interface, such as failure of the undo mechanism.
+ Once we have the input data as some internal datastructure, we call
+ the function @{ML add_inductive}. This function does the heavy duty
+ lifting in the package: it generates definitions for the
+ predicates and derives from them corresponding induction principles and
+ introduction rules. The description of this function will span over
+ the next two sections.
*}
-
-text {*
- Note that the @{text trcl} predicate has two different kinds of parameters: the
- first parameter @{text R} stays \emph{fixed} throughout the definition, whereas
- the second and third parameter changes in the ``recursive call''. This will
- become important later on when we deal with fixed parameters and locales.
-
-
-
- The purpose of the package we show next is that the user just specifies the
- inductive predicate by stating some introduction rules and then the packages
- makes the equivalent definition and derives from it the needed properties.
-*}
-
-text {*
- (FIXME: perhaps move somewhere else)
-
- The point of these examples is to get a feeling what the automatic proofs
- should do in order to solve all inductive definitions we throw at them. For this
- it is instructive to look at the general construction principle
- of inductive definitions, which we shall do in the next section.
-
- The code of the inductive package falls roughly in tree parts: the first
- deals with the definitions, the second with the induction principles and
- the third with the introduction rules.
-
-*}
-
-
-(*<*)
-end
-(*>*)
+(*<*)end(*>*)
\ No newline at end of file
--- a/ProgTutorial/Package/Ind_Intro.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Package/Ind_Intro.thy Thu Apr 02 12:19:11 2009 +0100
@@ -17,9 +17,9 @@
\medskip
HOL is based on just a few primitive constants, like equality and
implication, whose properties are described by axioms. All other concepts,
- such as inductive predicates, datatypes, or recursive functions have to be defined
+ such as inductive predicates, datatypes or recursive functions, have to be defined
in terms of those constants, and the desired properties, for example
- induction theorems, or recursion equations have to be derived from the definitions
+ induction theorems or recursion equations, have to be derived from the definitions
by a formal proof. Since it would be very tedious for a user to define
complex inductive predicates or datatypes ``by hand'' just using the
primitive operators of higher order logic, \emph{definitional packages} have
@@ -29,7 +29,7 @@
definitions and proofs behind the scenes. In this chapter we explain how
such a package can be implemented.
- As a running example, we have chosen a rather simple package for defining
+ As the running example we have chosen a rather simple package for defining
inductive predicates. To keep things really simple, we will not use the
general Knaster-Tarski fixpoint theorem on complete lattices, which forms
the basis of Isabelle's standard inductive definition package. Instead, we
--- a/ProgTutorial/Package/Ind_Prelims.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Package/Ind_Prelims.thy Thu Apr 02 12:19:11 2009 +0100
@@ -1,5 +1,5 @@
theory Ind_Prelims
-imports Main LaTeXsugar"../Base" Simple_Inductive_Package
+imports Main LaTeXsugar "../Base"
begin
section{* Preliminaries *}
@@ -9,55 +9,26 @@
expects from the package to produce a convenient reasoning
infrastructure. This infrastructure needs to be derived from the definition
that correspond to the specified predicate(s). Before we start with
- explaining all parts of the package, let us first give four examples showing
- how to define inductive predicates by hand and then also how to prove by
- hand properties about them. See Figure \ref{fig:paperpreds} for their usual
- ``pencil-and-paper'' definitions. From these examples, we will
- figure out a general method for defining inductive predicates. The aim in
- this section is \emph{not} to write proofs that are as beautiful as
- possible, but as close as possible to the ML-code we will develop in later
- sections.
+ explaining all parts of the package, let us first give some examples
+ showing how to define inductive predicates and then also how
+ to generate a reasoning infrastructure for them. From the examples
+ we will figure out a general method for
+ defining inductive predicates. The aim in this section is \emph{not} to
+ write proofs that are as beautiful as possible, but as close as possible to
+ the ML-code we will develop in later sections.
- \begin{figure}[t]
- \begin{boxedminipage}{\textwidth}
+
+
+ We first consider the transitive closure of a relation @{text R}. The
+ ``pencil-and-paper'' specification for the transitive closure is:
+
\begin{center}\small
@{prop[mode=Axiom] "trcl R x x"} \hspace{5mm}
@{prop[mode=Rule] "R x y \<Longrightarrow> trcl R y z \<Longrightarrow> trcl R x z"}
\end{center}
- \begin{center}\small
- @{prop[mode=Axiom] "even (0::nat)"} \hspace{5mm}
- @{prop[mode=Rule] "odd n \<Longrightarrow> even (Suc n)"} \hspace{5mm}
- @{prop[mode=Rule] "even n \<Longrightarrow> odd (Suc n)"}
- \end{center}
- \begin{center}\small
- \mbox{\inferrule{@{term "\<And>y. R y x \<Longrightarrow> accpart R y"}}{@{term "accpart R x"}}}
- \end{center}
- \begin{center}\small
- @{prop[mode=Rule] "a\<noteq>b \<Longrightarrow> fresh a (Var b)"}\hspace{5mm}
- @{prop[mode=Rule] "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"}\\[2mm]
- @{prop[mode=Axiom] "fresh a (Lam a t)"}\hspace{5mm}
- @{prop[mode=Rule] "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"}
- \end{center}
- \end{boxedminipage}
- \caption{Examples of four ``Pencil-and-paper'' definitions of inductively defined
- predicates. In formal reasoning with Isabelle, the user just wants to give such
- definitions and expects that the reasoning structure is derived automatically.
- For this definitional packages need to be implemented.\label{fig:paperpreds}}
- \end{figure}
- We first consider the transitive closure of a relation @{text R}. The user will
- state for @{term trcl\<iota>} the specification:
-*}
-
-simple_inductive
- trcl\<iota> :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> bool"
-where
- base: "trcl\<iota> R x x"
-| step: "trcl\<iota> R x y \<Longrightarrow> R y z \<Longrightarrow> trcl\<iota> R x z"
-
-text {*
- The package has to make an appropriate definition and provide
- lemmas to reason about the predicate @{term trcl\<iota>}. Since an inductively
+ The package has to make an appropriate definition for @{term "trcl"}.
+ Since an inductively
defined predicate is the least predicate closed under a collection of
introduction rules, the predicate @{text "trcl R x y"} can be defined so
that it holds if and only if @{text "P x y"} holds for every predicate
@@ -79,7 +50,7 @@
With this definition, the proof of the induction principle for @{term trcl}
is almost immediate. It suffices to convert all the meta-level
connectives in the lemma to object-level connectives using the
- proof method @{text atomize} (Line 4), expand the definition of @{term trcl}
+ proof method @{text atomize} (Line 4 below), expand the definition of @{term trcl}
(Line 5 and 6), eliminate the universal quantifier contained in it (Line~7),
and then solve the goal by @{text assumption} (Line 8).
@@ -124,7 +95,7 @@
The two assumptions come from the definition of @{term trcl} and correspond
to the introduction rules. Thus, all we have to do is to eliminate the
universal quantifier in front of the first assumption (Line 5), and then
- solve the goal by assumption (Line 6).
+ solve the goal by @{text assumption} (Line 6).
*}
text {*
@@ -160,8 +131,8 @@
txt {*
The assumptions @{text "p1"} and @{text "p2"} correspond to the premises of
the second introduction rule (unfolded); the assumptions @{text "r1"} and @{text "r2"}
- come from the definition of @{term trcl} . We apply @{text "r2"} to the goal
- @{term "P x z"}. In order for the assumption to be applicable as a rule, we
+ come from the definition of @{term trcl}. We apply @{text "r2"} to the goal
+ @{term "P x z"}. In order for this assumption to be applicable as a rule, we
have to eliminate the universal quantifier and turn the object-level
implications into meta-level ones. This can be accomplished using the @{text
rule_format} attribute. So we continue the proof with:
@@ -211,28 +182,24 @@
The method of defining inductive predicates by impredicative quantification
also generalises to mutually inductive predicates. The next example defines
- the predicates @{text even} and @{text odd}. The user will state for this
- inductive definition the specification:
-*}
+ the predicates @{text even} and @{text odd} given by
-simple_inductive
- even and odd
-where
- even0: "even 0"
-| evenS: "odd n \<Longrightarrow> even (Suc n)"
-| oddS: "even n \<Longrightarrow> odd (Suc n)"
+ \begin{center}\small
+ @{prop[mode=Axiom] "even (0::nat)"} \hspace{5mm}
+ @{prop[mode=Rule] "odd n \<Longrightarrow> even (Suc n)"} \hspace{5mm}
+ @{prop[mode=Rule] "even n \<Longrightarrow> odd (Suc n)"}
+ \end{center}
-text {*
Since the predicates @{term even} and @{term odd} are mutually inductive, each
corresponding definition must quantify over both predicates (we name them
below @{text "P"} and @{text "Q"}).
*}
-definition "even\<iota> \<equiv>
+definition "even \<equiv>
\<lambda>n. \<forall>P Q. P 0 \<longrightarrow> (\<forall>m. Q m \<longrightarrow> P (Suc m))
\<longrightarrow> (\<forall>m. P m \<longrightarrow> Q (Suc m)) \<longrightarrow> P n"
-definition "odd\<iota> \<equiv>
+definition "odd \<equiv>
\<lambda>n. \<forall>P Q. P 0 \<longrightarrow> (\<forall>m. Q m \<longrightarrow> P (Suc m))
\<longrightarrow> (\<forall>m. P m \<longrightarrow> Q (Suc m)) \<longrightarrow> Q n"
@@ -280,7 +247,7 @@
qed
text {*
- The interesting lines are 7 to 15. The assumptions fall into to categories:
+ The interesting lines are 7 to 15. The assumptions fall into two categories:
@{text p1} corresponds to the premise of the introduction rule; @{text "r1"}
to @{text "r3"} come from the definition of @{text "even"}.
In Line 13, we apply the assumption @{text "r2"} (since we prove the second
@@ -290,10 +257,14 @@
need to be instantiated and then also the implications need to be resolved
with the other rules.
- As a final example, we define the accessible part of a relation @{text R}
- (see Figure~\ref{fig:paperpreds}). There the premsise of the introduction
- rule involves a universal quantifier and an implication. The
- definition of @{text accpart} is:
+ Next we define the accessible part of a relation @{text R} given by
+ the single rule:
+
+ \begin{center}\small
+ \mbox{\inferrule{@{term "\<And>y. R y x \<Longrightarrow> accpart R y"}}{@{term "accpart R x"}}}
+ \end{center}
+
+ The definition of @{text "accpart"} is:
*}
definition "accpart \<equiv> \<lambda>R x. \<forall>P. (\<forall>x. (\<forall>y. R y x \<longrightarrow> P y) \<longrightarrow> P x) \<longrightarrow> P x"
@@ -325,15 +296,32 @@
qed
text {*
- There are now two subproofs. The assumptions fall again into two categories (Lines
- 7 to 9). In Line 11, applying the assumption @{text "r1"} generates a goal state
- with the new local assumption @{term "R y x"}, named @{text "r1_prem"} in the
- proof (Line 14). This local assumption is used to solve
- the goal @{term "P y"} with the help of assumption @{text "p1"}.
+ As you can see, there are now two subproofs. The assumptions fall again into
+ two categories (Lines 7 to 9). In Line 11, applying the assumption @{text
+ "r1"} generates a goal state with the new local assumption @{term "R y x"},
+ named @{text "r1_prem"} in the second subproof (Line 14). This local assumption is
+ used to solve the goal @{term "P y"} with the help of assumption @{text
+ "p1"}.
+
- The point of these examples is to get a feeling what the automatic proofs
- should do in order to solve all inductive definitions we throw at them.
- This is usually the first step in writing a package. We next explain
+ \begin{exercise}
+ Give the definition for the freshness predicate for lambda-terms. The rules
+ for this predicate are:
+
+ \begin{center}\small
+ @{prop[mode=Rule] "a\<noteq>b \<Longrightarrow> fresh a (Var b)"}\hspace{5mm}
+ @{prop[mode=Rule] "\<lbrakk>fresh a t; fresh a s\<rbrakk> \<Longrightarrow> fresh a (App t s)"}\\[2mm]
+ @{prop[mode=Axiom] "fresh a (Lam a t)"}\hspace{5mm}
+ @{prop[mode=Rule] "\<lbrakk>a\<noteq>b; fresh a t\<rbrakk> \<Longrightarrow> fresh a (Lam b t)"}
+ \end{center}
+
+ From the definition derive the induction principle and the introduction
+ rules.
+ \end{exercise}
+
+ The point of all these examples is to get a feeling what the automatic
+ proofs should do in order to solve all inductive definitions we throw at
+ them. This is usually the first step in writing a package. We next explain
the parsing and typing part of the package.
*}
--- a/ProgTutorial/Parsing.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Parsing.thy Thu Apr 02 12:19:11 2009 +0100
@@ -195,7 +195,7 @@
@{ML_response_fake [display,gray] "Scan.error (!! (fn _ => \"foo\") ($$ \"h\"))"
"Exception Error \"foo\" raised"}
- This ``prefixing'' is usually done by wrappers such as @{ML "OuterSyntax.command"}
+ This ``prefixing'' is usually done by wrappers such as @{ML "OuterSyntax.local_theory"}
(see Section~\ref{sec:newcommand} which explains this function in more detail).
Let us now return to our example of parsing @{ML "(p -- q) || r" for p q
@@ -350,8 +350,6 @@
section {* Parsing Theory Syntax *}
text {*
- (FIXME: context parser)
-
Most of the time, however, Isabelle developers have to deal with parsing
tokens, not strings. These token parsers have the type:
*}
@@ -620,12 +618,14 @@
for inductive predicates of the form:
*}
+(*
simple_inductive
even and odd
where
even0: "even 0"
| evenS: "odd n \<Longrightarrow> even (Suc n)"
| oddS: "even n \<Longrightarrow> odd (Suc n)"
+*)
text {*
For this we are going to use the parser:
@@ -680,7 +680,7 @@
val input = filtered_input
\"foo::\\\"int \<Rightarrow> bool\\\" and bar::nat (\\\"BAR\\\" 100) and blonk\"
in
- parse OuterParse.fixes input
+ parse OuterParse.fixes input
end"
"([(foo, SOME \"\\^E\\^Ftoken\\^Eint \<Rightarrow> bool\\^E\\^F\\^E\", NoSyn),
(bar, SOME \"\\^E\\^Ftoken\\^Enat\\^E\\^F\\^E\", Mixfix (\"BAR\", [], 100)),
@@ -700,10 +700,10 @@
\end{readmore}
Lines 3 to 7 in the function @{ML spec_parser} implement the parser for a
- list of introduction rules, that is propositions with theorem
- annotations. The introduction rules are propositions parsed by @{ML
- OuterParse.prop}. However, they can include an optional theorem name plus
- some attributes. For example
+ list of introduction rules, that is propositions with theorem annotations
+ such as rule names and attributes. The introduction rules are propositions
+ parsed by @{ML OuterParse.prop}. However, they can include an optional
+ theorem name plus some attributes. For example
@{ML_response [display,gray] "let
val input = filtered_input \"foo_lemma[intro,dest!]:\"
@@ -753,8 +753,6 @@
section {* New Commands and Keyword Files\label{sec:newcommand} *}
text {*
- (FIXME: update to the right command setup --- is this still needed?)
-
Often new commands, for example for providing new definitional principles,
need to be implemented. While this is not difficult on the ML-level,
new commands, in order to be useful, need to be recognised by
@@ -767,16 +765,16 @@
*}
ML{*let
- val do_nothing = Scan.succeed (Toplevel.theory I)
+ val do_nothing = Scan.succeed (LocalTheory.theory I)
val kind = OuterKeyword.thy_decl
in
- OuterSyntax.command "foobar" "description of foobar" kind do_nothing
+ OuterSyntax.local_theory "foobar" "description of foobar" kind do_nothing
end *}
text {*
- The crucial function @{ML OuterSyntax.command} expects a name for the command, a
- short description, a kind indicator (which we will explain later on more thoroughly) and a
- parser producing a top-level transition function (its purpose will also explained
+ The crucial function @{ML OuterSyntax.local_theory} expects a name for the command, a
+ short description, a kind indicator (which we will explain later more thoroughly) and a
+ parser producing a local theory transition (its purpose will also explained
later).
While this is everything you have to do on the ML-level, you need a keyword
@@ -804,15 +802,15 @@
\isacommand{ML}~@{text "\<verbopen>"}\\
@{ML
"let
- val do_nothing = Scan.succeed (Toplevel.theory I)
+ val do_nothing = Scan.succeed (LocalTheory.theory I)
val kind = OuterKeyword.thy_decl
in
- OuterSyntax.command \"foobar\" \"description of foobar\" kind do_nothing
+ OuterSyntax.local_theory \"foobar\" \"description of foobar\" kind do_nothing
end"}\\
@{text "\<verbclose>"}\\
\isacommand{end}
\end{graybox}
- \caption{\small The file @{text "Command.thy"} is necessary for generating a log
+ \caption{The file @{text "Command.thy"} is necessary for generating a log
file. This log file enables Isabelle to generate a keyword file containing
the command \isacommand{foobar}.\label{fig:commandtheory}}
\end{figure}
@@ -910,22 +908,22 @@
The crucial part of a command is the function that determines the behaviour
of the command. In the code above we used a ``do-nothing''-function, which
because of @{ML Scan.succeed} does not parse any argument, but immediately
- returns the simple toplevel function @{ML "Toplevel.theory I"}. We can
+ returns the simple function @{ML "LocalTheory.theory I"}. We can
replace this code by a function that first parses a proposition (using the
parser @{ML OuterParse.prop}), then prints out the tracing
- information (using a new top-level function @{text trace_top_lvl}) and
+ information (using a new function @{text trace_prop}) and
finally does nothing. For this you can write:
*}
ML{*let
- fun trace_top_lvl str =
+ fun trace_prop str =
LocalTheory.theory (fn lthy => (tracing str; lthy))
- val trace_prop = OuterParse.prop >> trace_top_lvl
-
+ val trace_prop_parser = OuterParse.prop >> trace_prop
val kind = OuterKeyword.thy_decl
in
- OuterSyntax.local_theory "foobar" "traces a proposition" kind trace_prop
+ OuterSyntax.local_theory "foobar" "traces a proposition"
+ kind trace_prop_parser
end *}
@@ -939,17 +937,19 @@
and see the proposition in the tracing buffer.
- Note that so far we used @{ML thy_decl in OuterKeyword} as the kind indicator
- for the command. This means that the command finishes as soon as the
- arguments are processed. Examples of this kind of commands are
- \isacommand{definition} and \isacommand{declare}. In other cases,
- commands are expected to parse some arguments, for example a proposition,
- and then ``open up'' a proof in order to prove the proposition (for example
+ Note that so far we used @{ML thy_decl in OuterKeyword} as the kind
+ indicator for the command. This means that the command finishes as soon as
+ the arguments are processed. Examples of this kind of commands are
+ \isacommand{definition} and \isacommand{declare}. In other cases, commands
+ are expected to parse some arguments, for example a proposition, and then
+ ``open up'' a proof in order to prove the proposition (for example
\isacommand{lemma}) or prove some other properties (for example
- \isacommand{function}). To achieve this kind of behaviour, you have to use the kind
- indicator @{ML thy_goal in OuterKeyword}. Note, however, once you change the
- ``kind'' of a command from @{ML thy_decl in OuterKeyword} to @{ML thy_goal in OuterKeyword}
- then the keyword file needs to be re-created!
+ \isacommand{function}). To achieve this kind of behaviour, you have to use
+ the kind indicator @{ML thy_goal in OuterKeyword} and the function @{ML
+ "local_theory_to_proof" in OuterSyntax} to set up the command. Note,
+ however, once you change the ``kind'' of a command from @{ML thy_decl in
+ OuterKeyword} to @{ML thy_goal in OuterKeyword} then the keyword file needs
+ to be re-created!
Below we change \isacommand{foobar} so that it takes a proposition as
argument and then starts a proof in order to prove it. Therefore in Line 13,
@@ -957,34 +957,32 @@
*}
ML%linenosgray{*let
- fun set_up_thm str ctxt =
+ fun prove_prop str ctxt =
let
val prop = Syntax.read_prop ctxt str
in
Proof.theorem_i NONE (K I) [[(prop,[])]] ctxt
end;
- val prove_prop = OuterParse.prop >>
- (fn str => Toplevel.print o
- Toplevel.local_theory_to_proof NONE (set_up_thm str))
-
+ val prove_prop_parser = OuterParse.prop >> prove_prop
val kind = OuterKeyword.thy_goal
in
- OuterSyntax.command "foobar" "proving a proposition" kind prove_prop
+ OuterSyntax.local_theory_to_proof "foobar" "proving a proposition"
+ kind prove_prop_parser
end *}
text {*
- The function @{text set_up_thm} in Lines 2 to 7 takes a string (the proposition to be
+ The function @{text prove_prop} in Lines 2 to 7 takes a string (the proposition to be
proved) and a context as argument. The context is necessary in order to be able to use
@{ML Syntax.read_prop}, which converts a string into a proper proposition.
In Line 6 the function @{ML Proof.theorem_i} starts the proof for the
proposition. Its argument @{ML NONE} stands for a locale (which we chose to
omit); the argument @{ML "(K I)"} stands for a function that determines what
should be done with the theorem once it is proved (we chose to just forget
- about it). Lines 9 to 11 contain the parser for the proposition.
+ about it). Line 9 contains the parser for the proposition.
If you now type \isacommand{foobar}~@{text [quotes] "True \<and> True"}, you obtain the following
- proof state:
+ proof state
\begin{isabelle}
\isacommand{foobar}~@{text [quotes] "True \<and> True"}\\
@@ -992,7 +990,7 @@
@{text "1. True \<and> True"}
\end{isabelle}
- and you can build the proof
+ and you can build the following proof
\begin{isabelle}
\isacommand{foobar}~@{text [quotes] "True \<and> True"}\\
@@ -1001,12 +999,6 @@
\isacommand{done}
\end{isabelle}
-
-
- (FIXME What do @{ML "Toplevel.theory"}
- @{ML "Toplevel.print"}
- @{ML Toplevel.local_theory} do?)
-
(FIXME read a name and show how to store theorems)
*}
--- a/ProgTutorial/ROOT.ML Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/ROOT.ML Thu Apr 02 12:19:11 2009 +0100
@@ -13,6 +13,7 @@
use_thy "Package/Ind_Interface";
use_thy "Package/Ind_General_Scheme";
use_thy "Package/Ind_Code";
+use_thy "Package/Ind_Extensions";
use_thy "Appendix";
use_thy "Recipes/Antiquotes";
--- a/ProgTutorial/Solutions.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Solutions.thy Thu Apr 02 12:19:11 2009 +0100
@@ -187,7 +187,8 @@
*}
ML{*local
- fun mk_tac tac = timing_wrapper (EVERY1 [tac, K (SkipProof.cheat_tac @{theory})])
+ fun mk_tac tac =
+ timing_wrapper (EVERY1 [tac, K (SkipProof.cheat_tac @{theory})])
in
val c_tac = mk_tac (add_tac @{context})
val s_tac = mk_tac (simp_tac (HOL_basic_ss addsimprocs [@{simproc add_sp}]))
--- a/ProgTutorial/Tactical.thy Wed Apr 01 14:50:09 2009 +0200
+++ b/ProgTutorial/Tactical.thy Thu Apr 02 12:19:11 2009 +0100
@@ -1652,11 +1652,10 @@
@{text num} (Line 3), and then generates the meta-equation @{text "t \<equiv> num"}
(Line 4), which it proves by the arithmetic tactic in Line 6.
- For our purpose at the moment, proving the meta-equation using
- @{ML Arith_Data.arith_tac} is
- fine, but there is also an alternative employing the simplifier with a very
- restricted simpset. For the kind of lemmas we want to prove, the simpset
- @{text "num_ss"} in the code will suffice.
+ For our purpose at the moment, proving the meta-equation using @{ML
+ arith_tac in Arith_Data} is fine, but there is also an alternative employing
+ the simplifier with a special simpset. For the kind of lemmas we
+ want to prove here, the simpset @{text "num_ss"} should suffice.
*}
ML{*fun get_thm_alt ctxt (t, n) =
@@ -1672,7 +1671,7 @@
text {*
The advantage of @{ML get_thm_alt} is that it leaves very little room for
something to go wrong; in contrast it is much more difficult to predict
- what happens with @{ML Arith_Data.arith_tac}, especially in more complicated
+ what happens with @{ML arith_tac in Arith_Data}, especially in more complicated
circumstances. The disatvantage of @{ML get_thm_alt} is to find a simpset
that is sufficiently powerful to solve every instance of the lemmas
we like to prove. This requires careful tuning, but is often necessary in
Binary file progtutorial.pdf has changed