made the Ackermann function example safer and included suggestions from MW
authorChristian Urban <urbanc@in.tum.de>
Wed, 18 Mar 2009 18:27:48 +0100
changeset 186 371e4375c994
parent 185 043ef82000b4
child 187 e2e805678fb0
made the Ackermann function example safer and included suggestions from MW
CookBook/FirstSteps.thy
CookBook/Package/Ind_Code.thy
CookBook/Package/Ind_Interface.thy
CookBook/Parsing.thy
CookBook/Recipes/TimeLimit.thy
CookBook/Solutions.thy
CookBook/Tactical.thy
cookbook.pdf
--- a/CookBook/FirstSteps.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/FirstSteps.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -557,32 +557,31 @@
   While antiquotations are very convenient for constructing terms, they can
   only construct fixed terms (remember they are ``linked'' at compile-time).
   However, you often need to construct terms dynamically. For example, a
-  function that returns the implication @{text "\<And>(x::\<tau>). P x \<Longrightarrow> Q x"} taking
-  @{term P}, @{term Q} and the type @{term "\<tau>"} as arguments can only be
-  written as:
+  function that returns the implication @{text "\<And>(x::nat). P x \<Longrightarrow> Q x"} taking
+  @{term P} and @{term Q} as arguments can only be written as:
 
 *}
 
-ML{*fun make_imp P Q tau =
+ML{*fun make_imp P Q =
 let
-  val x = Free ("x", tau)
+  val x = Free ("x", @{typ nat})
 in 
   Logic.all x (Logic.mk_implies (P $ x, Q $ x))
 end *}
 
 text {*
-  The reason is that you cannot pass the arguments @{term P}, @{term Q} and 
-  @{term "tau"} into an antiquotation. For example the following does \emph{not} work.
+  The reason is that you cannot pass the arguments @{term P} and @{term Q} 
+  into an antiquotation. For example the following does \emph{not} work.
 *}
 
-ML{*fun make_wrong_imp P Q tau = @{prop "\<And>x. P x \<Longrightarrow> Q x"} *}
+ML{*fun make_wrong_imp P Q = @{prop "\<And>(x::nat). P x \<Longrightarrow> Q x"} *}
 
 text {*
-  To see this apply @{text "@{term S}"}, @{text "@{term T}"} and @{text "@{typ nat}"} 
+  To see this apply @{text "@{term S}"} and @{text "@{term T}"} 
   to both functions. With @{ML make_imp} we obtain the intended term involving 
   the given arguments
 
-  @{ML_response [display,gray] "make_imp @{term S} @{term T} @{typ nat}" 
+  @{ML_response [display,gray] "make_imp @{term S} @{term T}" 
 "Const \<dots> $ 
   Abs (\"x\", Type (\"nat\",[]),
     Const \<dots> $ (Free (\"S\",\<dots>) $ \<dots>) $ (Free (\"T\",\<dots>) $ \<dots>))"}
@@ -590,48 +589,17 @@
   whereas with @{ML make_wrong_imp} we obtain a term involving the @{term "P"} 
   and @{text "Q"} from the antiquotation.
 
-  @{ML_response [display,gray] "make_wrong_imp @{term S} @{term T} @{typ nat}" 
+  @{ML_response [display,gray] "make_wrong_imp @{term S} @{term T}" 
 "Const \<dots> $ 
   Abs (\"x\", \<dots>,
     Const \<dots> $ (Const \<dots> $ (Free (\"P\",\<dots>) $ \<dots>)) $ 
                (Const \<dots> $ (Free (\"Q\",\<dots>) $ \<dots>)))"}
 
   (FIXME: handy functions for constructing terms: @{ML list_comb}, @{ML lambda})
-
-
-  Although types of terms can often be inferred, there are many
-  situations where you need to construct types manually, especially  
-  when defining constants. For example the function returning a function 
-  type is as follows:
+*}
 
-*} 
-
-ML{*fun make_fun_type tau1 tau2 = Type ("fun", [tau1, tau2]) *}
-
-text {* This can be equally written with the combinator @{ML "-->"} as: *}
-
-ML{*fun make_fun_type tau1 tau2 = tau1 --> tau2 *}
 
 text {*
-  A handy function for manipulating terms is @{ML map_types}: it takes a 
-  function and applies it to every type in a term. You can, for example,
-  change every @{typ nat} in a term into an @{typ int} using the function:
-*}
-
-ML{*fun nat_to_int t =
-  (case t of
-     @{typ nat} => @{typ int}
-   | Type (s, ts) => Type (s, map nat_to_int ts)
-   | _ => t)*}
-
-text {*
-  An example as follows:
-
-@{ML_response_fake [display,gray] 
-"map_types nat_to_int @{term \"a = (1::nat)\"}" 
-"Const (\"op =\", \"int \<Rightarrow> int \<Rightarrow> bool\")
-           $ Free (\"a\", \"int\") $ Const (\"HOL.one_class.one\", \"int\")"}
-
   \begin{readmore}
   There are many functions in @{ML_file "Pure/term.ML"}, @{ML_file "Pure/logic.ML"} and
   @{ML_file "HOL/Tools/hologic.ML"} that make such manual constructions of terms 
@@ -752,8 +720,42 @@
   \begin{readmore}
   Functions about naming are implemented in @{ML_file "Pure/General/name_space.ML"};
   functions about signatures in @{ML_file "Pure/sign.ML"}.
-  
   \end{readmore}
+
+  Although types of terms can often be inferred, there are many
+  situations where you need to construct types manually, especially  
+  when defining constants. For example the function returning a function 
+  type is as follows:
+
+*} 
+
+ML{*fun make_fun_type tau1 tau2 = Type ("fun", [tau1, tau2]) *}
+
+text {* This can be equally written with the combinator @{ML "-->"} as: *}
+
+ML{*fun make_fun_type tau1 tau2 = tau1 --> tau2 *}
+
+text {*
+  A handy function for manipulating terms is @{ML map_types}: it takes a 
+  function and applies it to every type in a term. You can, for example,
+  change every @{typ nat} in a term into an @{typ int} using the function:
+*}
+
+ML{*fun nat_to_int t =
+  (case t of
+     @{typ nat} => @{typ int}
+   | Type (s, ts) => Type (s, map nat_to_int ts)
+   | _ => t)*}
+
+text {*
+  An example as follows:
+
+@{ML_response_fake [display,gray] 
+"map_types nat_to_int @{term \"a = (1::nat)\"}" 
+"Const (\"op =\", \"int \<Rightarrow> int \<Rightarrow> bool\")
+           $ Free (\"a\", \"int\") $ Const (\"HOL.one_class.one\", \"int\")"}
+
+  (FIXME: readmore about types)
 *}
 
 
@@ -797,7 +799,7 @@
       (Const (@{const_name plus}, natT --> natT --> natT) $ zero $ zero)
 end" "0 + 0"}
 
-  In Isabelle not just types need to be certified, but also types. For example, 
+  In Isabelle not just terms need to be certified, but also types. For example, 
   you obtain the certified type for the Isablle type @{typ "nat \<Rightarrow> bool"} on 
   the ML-level as follows:
 
@@ -1183,23 +1185,23 @@
 text {*
   In the previous section we used \isacommand{setup} in order to make
   a theorem attribute known to Isabelle. What happens behind the scenes
-  is that \isacommand{setup} expects a functions from @{ML_type theory}
-  to @{ML_type theory}: the input theory is the current theory and the 
+  is that \isacommand{setup} expects a functions of type 
+  @{ML_type "theory -> theory"}: the input theory is the current theory and the 
   output the theory where the theory attribute has been stored.
   
   This is a fundamental principle in Isabelle. A similar situation occurs 
   for example with declaring constants. The function that declares a 
-  constant on the ML-level is @{ML Sign.add_consts_i}. Recall that ML-code 
-  needs to be \isacommand{ML}~@{text "\<verbopen> \<dots> \<verbclose>"}.
-  If you write 
+  constant on the ML-level is @{ML Sign.add_consts_i}. 
+  If you write\footnote{Recall that ML-code  needs to be 
+  enclosed in \isacommand{ML}~@{text "\<verbopen> \<dots> \<verbclose>"}.} 
 *}  
 
-ML{*Sign.add_consts_i [(Binding.name "BAR", @{typ "nat"}, NoSyn)] @{theory} *}
+ML{*Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] @{theory} *}
 
 text {*
   for declaring the constant @{text "BAR"} with type @{typ nat} and 
   run the code, then you indeed obtain a theory as result. But if you 
-  query the constant with \isacommand{term}
+  query the constant on the Isabelle level using the command \isacommand{term}
 
   \begin{isabelle}
   \isacommand{term}~@{text [quotes] "BAR"}\\
@@ -1212,7 +1214,7 @@
   \isacommand{setup} is for. The constant is properly declared with
 *}
 
-setup %gray {* Sign.add_consts_i [(Binding.name "BAR", @{typ "nat"}, NoSyn)] *}
+setup %gray {* Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] *}
 
 text {* 
   Now 
@@ -1266,7 +1268,7 @@
 (*
 setup {*   
   Sign.add_consts_i [("foo", @{typ "nat"},NoSyn)] 
- #> PureThy.add_defs false [((Binding.name "foo_def", 
+ #> PureThy.add_defs false [((@{binding "foo_def"}, 
        Logic.mk_equals (Const ("FirstSteps.foo", @{typ "nat"}), @{term "1::nat"})), [])] 
  #> snd
 *}
--- a/CookBook/Package/Ind_Code.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Package/Ind_Code.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -65,7 +65,7 @@
 
 local_setup %gray {* fn lthy =>
 let
-  val arg =  ((Binding.name "MyTrue", NoSyn), @{term True})
+  val arg =  ((@{binding "MyTrue"}, NoSyn), @{term True})
   val (def, lthy') = make_defs arg lthy 
 in
   warning (str_of_thm lthy' def); lthy'
@@ -190,7 +190,7 @@
                @{prop "\<And>n::nat. odd n \<Longrightarrow> even (Suc n)"},
                @{prop "\<And>n::nat. even n \<Longrightarrow> odd (Suc n)"}] 
   val preds = [@{term "even::nat\<Rightarrow>bool"}, @{term "odd::nat\<Rightarrow>bool"}]
-  val prednames = [Binding.name "even", Binding.name "odd"] 
+  val prednames = [@{binding "even"}, @{binding "odd"}] 
   val syns = [NoSyn, NoSyn] 
   val arg_tyss = [[@{typ "nat"}], [@{typ "nat"}]]
   val (defs, lthy') = definitions rules preds prednames syns arg_tyss lthy
@@ -501,7 +501,7 @@
 
 text {* main internal function *}
 
-ML %linenosgray{*fun add_inductive_i pred_specs rule_specs lthy =
+ML %linenosgray{*fun add_inductive pred_specs rule_specs lthy =
 let
   val syns = map snd pred_specs
   val pred_specs' = map fst pred_specs
@@ -522,10 +522,10 @@
     |> LocalTheory.notes Thm.theoremK (map (fn (((a, atts), _), th) =>
         ((Binding.qualify false mut_name a, atts), [([th], [])])) (rule_specs ~~ intro_rules)) 
     |-> (fn intross => LocalTheory.note Thm.theoremK
-         ((Binding.qualify false mut_name (Binding.name "intros"), []), maps snd intross)) 
+         ((Binding.qualify false mut_name (@{binding "intros"}), []), maps snd intross)) 
     |>> snd 
     ||>> (LocalTheory.notes Thm.theoremK (map (fn (((R, _), _), th) =>
-         ((Binding.qualify false (Binding.name_of R) (Binding.name "induct"),
+         ((Binding.qualify false (Binding.name_of R) (@{binding "induct"}),
           [Attrib.internal (K (RuleCases.case_names case_names)),
            Attrib.internal (K (RuleCases.consumes 1)),
            Attrib.internal (K (Induct.induct_pred ""))]), [([th], [])]))
@@ -533,12 +533,12 @@
     |> snd
 end*}
 
-ML{*fun add_inductive pred_specs rule_specs lthy =
+ML{*fun add_inductive_cmd pred_specs rule_specs lthy =
 let
   val ((pred_specs', rule_specs'), _) = 
          Specification.read_spec pred_specs rule_specs lthy
 in
-  add_inductive_i pred_specs' rule_specs' lthy
+  add_inductive pred_specs' rule_specs' lthy
 end*} 
 
 ML{*val spec_parser = 
@@ -551,7 +551,7 @@
 
 ML{*val specification =
   spec_parser >>
-    (fn ((pred_specs), rule_specs) => add_inductive pred_specs rule_specs)*}
+    (fn ((pred_specs), rule_specs) => add_inductive_cmd pred_specs rule_specs)*}
 
 ML{*val _ = OuterSyntax.local_theory "simple_inductive" 
               "define inductive predicates"
--- a/CookBook/Package/Ind_Interface.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Package/Ind_Interface.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -139,7 +139,7 @@
 in
   parse spec_parser input
 end"
-"(((NONE, [(even, NONE, NoSyn), (odd, NONE, NoSyn)]),
+"(([(even, NONE, NoSyn), (odd, NONE, NoSyn)],
      [((even0,\<dots>), \"\\^E\\^Ftoken\\^Eeven 0\\^E\\^F\\^E\"),
       ((evenS,\<dots>), \"\\^E\\^Ftoken\\^Eodd n \<Longrightarrow> even (Suc n)\\^E\\^F\\^E\"),
       ((oddS,\<dots>), \"\\^E\\^Ftoken\\^Eeven n \<Longrightarrow> odd (Suc n)\\^E\\^F\\^E\")]), [])"}
--- a/CookBook/Parsing.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Parsing.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -618,7 +618,6 @@
 *}
 
 ML %linenosgray{*val spec_parser = 
-     OuterParse.opt_target --
      OuterParse.fixes -- 
      Scan.optional 
        (OuterParse.$$$ "where" |--
@@ -646,27 +645,17 @@
 in
   parse spec_parser input
 end"
-"(((NONE, [(even, NONE, NoSyn), (odd, NONE, NoSyn)]),
+"(([(even, NONE, NoSyn), (odd, NONE, NoSyn)],
      [((even0,\<dots>), \"\\^E\\^Ftoken\\^Eeven 0\\^E\\^F\\^E\"),
       ((evenS,\<dots>), \"\\^E\\^Ftoken\\^Eodd n \<Longrightarrow> even (Suc n)\\^E\\^F\\^E\"),
       ((oddS,\<dots>), \"\\^E\\^Ftoken\\^Eeven n \<Longrightarrow> odd (Suc n)\\^E\\^F\\^E\")]), [])"}
 
-  As you see, the result is a ``nested'' four-tuple consisting of an optional
-  locale (in this case @{ML NONE}); a list of variables with optional
-  type-annotation and syntax-annotation; and a list of rules where every rule
-  has optionally a name and an attribute.
-
+  As you see, the result is a pair consisting of a list of
+  variables with optional type-annotation and syntax-annotation, and a list of
+  rules where every rule has optionally a name and an attribute.
 
-  In Line 2 of the parser, the function @{ML OuterParse.opt_target} reads a target 
-  in order to indicate a locale in which the specification is made. For example
-
-@{ML_response [display,gray]
-"parse OuterParse.opt_target (filtered_input \"(in test)\")" "(SOME \"test\",[])"}
-  
-  returns the locale @{text [quotes] "test"}; if no target is given, like in  the
-  case of @{text "even"} and @{text "odd"}, the function returns @{ML NONE}.
-
-  The function @{ML OuterParse.fixes} in Line 3 reads an \isacommand{and}-separated 
+  The function @{ML OuterParse.fixes} in Line 2 of the parser reads an 
+  \isacommand{and}-separated 
   list of variables that can include optional type annotations and syntax translations. 
   For example:\footnote{Note that in the code we need to write 
   @{text "\\\"int \<Rightarrow> bool\\\""} in order to properly escape the double quotes
@@ -696,7 +685,7 @@
   The datastructre for sytax annotations is defined in @{ML_file "Pure/Syntax/mixfix.ML"}.
   \end{readmore}
 
-  Lines 4 to 8 in the function @{ML spec_parser} implement the parser for a
+  Lines 3 to 7 in the function @{ML spec_parser} implement the parser for a
   list of introduction rules, that is propositions with theorem
   annotations. The introduction rules are propositions parsed by @{ML
   OuterParse.prop}. However, they can include an optional theorem name plus
@@ -712,7 +701,7 @@
   The function @{ML opt_thm_name in SpecParse} is the ``optional'' variant of
   @{ML thm_name in SpecParse}. Theorem names can contain attributes. The name 
   has to end with @{text [quotes] ":"}---see the argument of 
-  the function @{ML SpecParse.opt_thm_name} in Line 8.
+  the function @{ML SpecParse.opt_thm_name} in Line 7.
 
   \begin{readmore}
   Attributes and arguments are implemented in the files @{ML_file "Pure/Isar/attrib.ML"} 
@@ -916,7 +905,7 @@
   \isacommand{function}). To achieve this kind of behaviour, you have to use the kind
   indicator @{ML thy_goal in OuterKeyword}.  Note, however, once you change the 
   ``kind'' of a command from @{ML thy_decl in OuterKeyword} to @{ML thy_goal in OuterKeyword} 
-  then the keyword file needs to be re-created.
+  then the keyword file needs to be re-created!
 
   Below we change \isacommand{foobar} so that it takes a proposition as
   argument and then starts a proof in order to prove it. Therefore in Line 13, 
@@ -981,9 +970,9 @@
 section {* Methods *}
 
 text {*
-  Methods are one central concept in Isabelle. Methods can be applied with the command
-  \isacommand{apply}. To print out all currently known methods you can use the Isabelle
-  command. 
+  Methods are a central concept in Isabelle. They are the ones you use for example
+  in \isacommand{apply}. To print out all currently known methods you can use the 
+  Isabelle command. 
 *}
 
 print_methods
@@ -998,9 +987,10 @@
          "foobar method"
 
 text {*
-  It defines the method @{text foobar_meth} which takes no arguments and 
+  It defines the method @{text foobar_meth}, which takes no arguments (therefore the
+  parser @{ML Scan.succeed}) and 
   only applies the tactic @{thm [source] conjE} and then @{thm [source] conjI}.
-  It can be applied in the following proof
+  This method can be used in the following proof
 *}
 
 lemma shows "A \<and> B \<Longrightarrow> C \<and> D"
@@ -1013,6 +1003,9 @@
   \end{minipage} *}
 (*<*)oops(*>*)
 
+text {*
+  (FIXME: explain a version of rule-tac)
+*}
 
 (*<*)
 
--- a/CookBook/Recipes/TimeLimit.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Recipes/TimeLimit.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -27,7 +27,7 @@
   takes a bit of time before it finishes. To avoid this, the call can be encapsulated 
   in a time limit of five seconds. For this you have to write
 
-@{ML_response [display,gray]
+@{ML_response_fake_both [display,gray]
 "TimeLimit.timeLimit (Time.fromSeconds 5) ackermann (4, 12) 
   handle TimeLimit.TimeOut => ~1"
 "~1"}
@@ -35,9 +35,9 @@
   where @{text TimeOut} is the exception raised when the time limit
   is reached.
 
-  Note that @{ML  "timeLimit" in TimeLimit} is only meaningful when you use PolyML, 
-  because PolyML has the infrastructure for multithreaded programming on 
-  which @{ML "timeLimit" in TimeLimit} relies.
+  Note that @{ML  "timeLimit" in TimeLimit} is only meaningful when you use PolyML 5.2.1
+  or later, because this version of PolyML has the infrastructure for multithreaded 
+  programming on which @{ML "timeLimit" in TimeLimit} relies.
 
 \begin{readmore}
    The function @{ML "timeLimit" in TimeLimit} is defined in the structure
--- a/CookBook/Solutions.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Solutions.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -117,22 +117,18 @@
     | Abs _ => Conv.abs_conv (fn (_, ctxt) => add_conv ctxt) ctxt ctrm
     | _ => Conv.all_conv ctrm)
 
-val add_tac = CSUBGOAL (fn (goal, i) =>
-  let
-    val ctxt = ProofContext.init (Thm.theory_of_cterm goal)
-  in
-    CONVERSION
-      (Conv.params_conv ~1 (fn ctxt =>
-        (Conv.prems_conv ~1 (add_conv ctxt) then_conv
-          Conv.concl_conv ~1 (add_conv ctxt))) ctxt) i
-  end)*}
+fun add_tac ctxt = CSUBGOAL (fn (goal, i) =>
+  CONVERSION
+    (Conv.params_conv ~1 (fn ctxt =>
+       (Conv.prems_conv ~1 (add_conv ctxt) then_conv
+          Conv.concl_conv ~1 (add_conv ctxt))) ctxt) i)*}
 
 text {*
   A test case for this conversion is as follows
 *}
 
 lemma "P (Suc (99 + 1)) ((0 + 0)::nat) (Suc (3 + 3 + 3)) (4 + 1)"
-  apply(tactic {* add_tac 1 *})?
+  apply(tactic {* add_tac @{context} 1 *})?
 txt {* 
   where it produces the goal state
   
@@ -197,7 +193,7 @@
 ML{*local
   fun mk_tac tac = timing_wrapper (EVERY1 [tac, rtac @{thm cheat}])
 in
-val c_tac = mk_tac add_tac
+val c_tac = mk_tac (add_tac @{context}) 
 val s_tac = mk_tac (simp_tac (HOL_basic_ss addsimprocs [@{simproc add_sp}]))
 end*}
 
--- a/CookBook/Tactical.thy	Wed Mar 18 03:27:15 2009 +0100
+++ b/CookBook/Tactical.thy	Wed Mar 18 18:27:48 2009 +0100
@@ -137,14 +137,14 @@
 
   The tactics @{ML foo_tac} and @{ML foo_tac'} are very specific for
   analysing goals being only of the form @{prop "P \<or> Q \<Longrightarrow> Q \<or> P"}. If the goal is not
-  of this form, then they return the error message:
+  of this form, then these tactics return the error message:
 
   \begin{isabelle}
   @{text "*** empty result sequence -- proof command failed"}\\
   @{text "*** At command \"apply\"."}
   \end{isabelle}
 
-  This means the tactics failed. The reason for this error message is that tactics 
+  This means they failed. The reason for this error message is that tactics 
   are functions mapping a goal state to a (lazy) sequence of successor states. 
   Hence the type of a tactic is:
 *}
@@ -423,7 +423,7 @@
 (*<*)oops(*>*)
 
 text {* 
-  Similarl versions taking a list of theorems exist for the tactics 
+  Similar versions taking a list of theorems exist for the tactics 
   @{ML dtac} (@{ML dresolve_tac}), @{ML etac} (@{ML eresolve_tac}) and so on.
 
 
@@ -677,7 +677,7 @@
 
 text {*
   The input of the function is a term representing the subgoal and a number
-  specifying the subgoal of interest. In line 3 you need to descend under the
+  specifying the subgoal of interest. In Line 3 you need to descend under the
   outermost @{term "Trueprop"} in order to get to the connective you like to
   analyse. Otherwise goals like @{prop "A \<and> B"} are not properly
   analysed. Similarly with meta-implications in the next line.  While for the
@@ -1154,9 +1154,9 @@
 \end{figure} *}
 
 text {* 
-  To see how they work, consider the two functions in Figure~\ref{fig:printss}, which
-  print out some parts of a simpset. If you use them to print out the components of the
-  empty simpset, in ML @{ML empty_ss} and the most primitive simpset
+  To see how they work, consider the function in Figure~\ref{fig:printss}, which
+  prints out some parts of a simpset. If you use it to print out the components of the
+  empty simpset, i.e.~ @{ML empty_ss}
   
   @{ML_response_fake [display,gray]
   "print_ss @{context} empty_ss"
@@ -1214,7 +1214,12 @@
 
   also produces ``nothing'', the printout is misleading. In fact
   the @{ML HOL_basic_ss} is setup so that it can solve goals of the
-  form  @{thm TrueI}, @{thm refl[no_vars]}, @{term "t \<equiv> t"} and @{thm FalseE[no_vars]}; 
+  form  
+
+  \begin{isabelle}
+  @{thm TrueI}, @{thm refl[no_vars]}, @{term "t \<equiv> t"} and @{thm FalseE[no_vars]}; 
+  \end{isabelle}
+
   and also resolve with assumptions. For example:
 *}
 
@@ -1225,9 +1230,7 @@
 
 text {*
   This behaviour is not because of simplification rules, but how the subgoaler, solver 
-  and looper are set up. @{ML HOL_basic_ss} is usually a good start to build your 
-  own simpsets, because of the low danger of causing loops via interacting simplifiction
-  rules.
+  and looper are set up in @{ML HOL_basic_ss}.
 
   The simpset @{ML HOL_ss} is an extention of @{ML HOL_basic_ss} containing 
   already many useful simplification and congruence rules for the logical 
@@ -1255,10 +1258,14 @@
 
 definition "MyTrue \<equiv> True"
 
+text {*
+  then in the following proof we can unfold this constant
+*}
+
 lemma shows "MyTrue \<Longrightarrow> True \<and> True"
 apply(rule conjI)
 apply(tactic {* rewrite_goals_tac [@{thm MyTrue_def}] *})
-txt{* then the tactic produces the goal state
+txt{* producing the goal state
 
       \begin{minipage}{\textwidth}
       @{subgoals [display]}
@@ -1737,7 +1744,7 @@
   to theorems via tactics. The type for conversions is
 *}
 
-ML{*type conv = Thm.cterm -> Thm.thm*}
+ML{*type conv = cterm -> thm*}
 
 text {*
   whereby the produced theorem is always a meta-equality. A simple conversion
@@ -2009,15 +2016,11 @@
   Here is a tactic doing exactly that:
 *}
 
-ML{*val true1_tac = CSUBGOAL (fn (goal, i) =>
-  let 
-    val ctxt = ProofContext.init (Thm.theory_of_cterm goal)
-  in
-    CONVERSION 
-      (Conv.params_conv ~1 (fn ctxt =>
-        (Conv.prems_conv ~1 (if_true1_conv ctxt) then_conv
-          Conv.concl_conv ~1 (all_true1_conv ctxt))) ctxt) i
-  end)*}
+ML{*fun true1_tac ctxt = CSUBGOAL (fn (goal, i) =>
+  CONVERSION 
+    (Conv.params_conv ~1 (fn ctxt =>
+       (Conv.prems_conv ~1 (if_true1_conv ctxt) then_conv
+          Conv.concl_conv ~1 (all_true1_conv ctxt))) ctxt) i)*}
 
 text {* 
   We call the conversions with the argument @{ML "~1"}. This is to 
@@ -2030,7 +2033,7 @@
 lemma
   "if True \<and> P then P else True \<and> False \<Longrightarrow>
   (if True \<and> Q then True \<and> Q else P) \<longrightarrow> True \<and> (True \<and> Q)"
-apply(tactic {* true1_tac 1 *})
+apply(tactic {* true1_tac @{context} 1 *})
 txt {* where the tactic yields the goal state
 
   \begin{minipage}{\textwidth}
Binary file cookbook.pdf has changed