prefer more result checking in ML antiquotations
authorNorbert Schirmer <norbert.schirmer@web.de>
Tue, 21 May 2019 14:37:39 +0200
changeset 572 438703674711
parent 571 95b42288294e
child 573 321e220a6baa
prefer more result checking in ML antiquotations
ProgTutorial/Advanced.thy
ProgTutorial/Base.thy
ProgTutorial/Essential.thy
ProgTutorial/First_Steps.thy
ProgTutorial/Package/Ind_Code.thy
ProgTutorial/Parsing.thy
ProgTutorial/Readme.thy
ProgTutorial/Recipes/ExternalSolver.thy
ProgTutorial/Recipes/Oracle.thy
ProgTutorial/Recipes/Sat.thy
ProgTutorial/Solutions.thy
ProgTutorial/Tactical.thy
ProgTutorial/antiquote_setup.ML
--- a/ProgTutorial/Advanced.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Advanced.thy	Tue May 21 14:37:39 2019 +0200
@@ -250,19 +250,19 @@
   of fixed variables is actually quite useful. For example it prevents us from 
   fixing a variable twice
 
-  @{ML_matchresult_fake [gray, display]
+  @{ML_response [gray, display]
   \<open>@{context}
 |> Variable.add_fixes ["x", "x"]\<close> 
-  \<open>ERROR: Duplicate fixed variable(s): "x"\<close>}
+  \<open>Duplicate fixed variable(s): "x"\<close>}
 
   More importantly it also allows us to easily create fresh names for
   fixed variables.  For this you have to use the function @{ML_ind
   variant_fixes in Variable} from the structure @{ML_structure Variable}.
 
-  @{ML_matchresult_fake [gray, display]
+  @{ML_response [gray, display]
   \<open>@{context}
 |> Variable.variant_fixes ["y", "y", "z"]\<close> 
-  \<open>(["y", "ya", "z"], ...)\<close>}
+  \<open>(["y", "ya", "z"],\<dots>\<close>}
 
   Now a fresh variant for the second occurence of \<open>y\<close> is created
   avoiding any clash. In this way we can also create fresh free variables
@@ -271,7 +271,7 @@
   create two fresh variables of type @{typ nat} as variants of the
   string @{text [quotes] "x"} (Lines 6 and 7).
 
-  @{ML_matchresult_fake [display, gray, linenos]
+  @{ML_matchresult [display, gray, linenos]
   \<open>let
   val ctxt0 = @{context}
   val (_, ctxt1) = Variable.add_fixes ["x"] ctxt0
@@ -280,8 +280,8 @@
   (Variable.variant_frees ctxt0 [] frees,
    Variable.variant_frees ctxt1 [] frees)
 end\<close>
-  \<open>([("x", "nat"), ("xa", "nat")], 
- [("xa", "nat"), ("xb", "nat")])\<close>}
+  \<open>([("x", _), ("xa", _)], 
+ [("xa", _), ("xb", _)])\<close>}
 
   As you can see, if we create the fresh variables with the context \<open>ctxt0\<close>,
   then we obtain \<open>x\<close> and \<open>xa\<close>; but in \<open>ctxt1\<close> we obtain \<open>xa\<close>
@@ -291,14 +291,14 @@
   avoiding any variable occurring in those terms. For this you can use the
   function @{ML_ind declare_term in Variable} from the structure @{ML_structure Variable}.
 
-  @{ML_matchresult_fake [gray, display]
+  @{ML_matchresult [gray, display]
   \<open>let
   val ctxt1 = Variable.declare_term @{term "(x, xa)"} @{context}
   val frees = replicate 2 ("x", @{typ nat})
 in
   Variable.variant_frees ctxt1 [] frees
 end\<close>
-  \<open>[("xb", "nat"), ("xc", "nat")]\<close>}
+  \<open>[("xb", _), ("xc", _)]\<close>}
 
   The result is \<open>xb\<close> and \<open>xc\<close> for the names of the fresh
   variables, since \<open>x\<close> and \<open>xa\<close> occur in the term we declared. 
@@ -327,7 +327,7 @@
   the function @{ML_ind read_term in Syntax} from the structure
   @{ML_structure Syntax}. Consider the following code:
 
-  @{ML_matchresult_fake [gray, display]
+  @{ML_response [gray, display]
   \<open>let
   val ctxt0 = @{context}
   val ctxt1 = Variable.declare_term @{term "x::nat"} ctxt0
@@ -343,7 +343,7 @@
   type the parsed term receives depends upon the last declaration that
   is made, as the next example illustrates.
 
-  @{ML_matchresult_fake [gray, display]
+  @{ML_response [gray, display]
   \<open>let
   val ctxt1 = Variable.declare_term @{term "x::nat"} @{context}
   val ctxt2 = Variable.declare_term @{term "x::int"} ctxt1
@@ -398,7 +398,7 @@
   term, in our case @{term "P x y z x y z"}, into a theorem (disregarding 
   whether it is actually provable).
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val thy = @{theory}
   val ctxt0 = @{context}
@@ -416,7 +416,7 @@
   function @{ML_ind export in Assumption} from the structure
   @{ML_structure Assumption}. Consider the following code.
 
-  @{ML_matchresult_fake [display, gray, linenos]
+  @{ML_response [display, gray, linenos]
   \<open>let
   val ctxt0 = @{context}
   val ([eq], ctxt1) = Assumption.add_assumes [@{cprop "x \<equiv> y"}] ctxt0
@@ -448,7 +448,7 @@
   @{ML_structure Variable} and @{ML_structure Assumption}. This can be seen
   in the following example.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val ctxt0 = @{context}
   val ((fvs, [eq]), ctxt1) = ctxt0
@@ -458,7 +458,7 @@
 in
   Proof_Context.export ctxt1 ctxt0 [eq']
 end\<close>
-  \<open>[?x \<equiv> ?y \<Longrightarrow> ?y \<equiv> ?x]\<close>}
+  \<open>["?x \<equiv> ?y \<Longrightarrow> ?y \<equiv> ?x"]\<close>}
 \<close>
 
 
--- a/ProgTutorial/Base.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Base.thy	Tue May 21 14:37:39 2019 +0200
@@ -3,12 +3,6 @@
         "~~/src/HOL/Library/LaTeXsugar"
 begin
 
-print_ML_antiquotations
-
-text \<open>
-Why is Base not printed?
-@{cite "isa-imp"}
-\<close>
 notation (latex output)
   Cons ("_ # _" [66,65] 65)
 
@@ -27,10 +21,6 @@
 ML_file "output_tutorial.ML"
 ML_file "antiquote_setup.ML"
 
-
-(*setup {* OutputTutorial.setup *}*) (* Seems to be standard now, we don't need this anymoe *)
 setup \<open>AntiquoteSetup.setup\<close>
 
-print_ML_antiquotations
-
 end
\ No newline at end of file
--- a/ProgTutorial/Essential.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Essential.thy	Tue May 21 14:37:39 2019 +0200
@@ -45,6 +45,15 @@
 | Abs of string * typ * term 
 | $ of term * term\<close>
 
+ML \<open>
+let 
+  val redex = @{term "(\<lambda>(x::int) (y::int). x)"} 
+  val arg1 = @{term "1::int"} 
+  val arg2 = @{term "2::int"}
+in
+  pretty_term @{context} (redex $ arg1 $ arg2)
+end\<close>
+
 text \<open>
   This datatype implements Church-style lambda-terms, where types are
   explicitly recorded in variables, constants and abstractions.  The
@@ -53,7 +62,7 @@
   terms use the usual de Bruijn index mechanism for representing bound
   variables.  For example in
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>@{term "\<lambda>x y. x y"}\<close>
   \<open>Abs ("x", "'a \<Rightarrow> 'b", Abs ("y", "'a", Bound 1 $ Bound 0))\<close>}
 
@@ -69,7 +78,7 @@
   Be careful if you pretty-print terms. Consider pretty-printing the abstraction
   term shown above:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>@{term "\<lambda>x y. x y"}
   |> pretty_term @{context}
   |> pwriteln\<close>
@@ -79,7 +88,7 @@
   tacitly eta-contracted the output. You obtain a similar result 
   with beta-redexes
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>let 
   val redex = @{term "(\<lambda>(x::int) (y::int). x)"} 
   val arg1 = @{term "1::int"} 
@@ -97,7 +106,7 @@
   value @{ML_ind show_abbrevs in Syntax}. For example
 
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let 
   val redex = @{term "(\<lambda>(x::int) (y::int). x)"} 
   val arg1 = @{term "1::int"} 
@@ -114,7 +123,7 @@
   \emph{schematic} variables (term-constructor @{ML Var} and written with a
   leading question mark). Consider the following two examples
   
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>let
   val v1 = Var (("x", 3), @{typ bool})
   val v2 = Var (("x1", 3), @{typ bool})
@@ -144,20 +153,20 @@
   Constructing terms via antiquotations has the advantage that only typable
   terms can be constructed. For example
 
-  @{ML_matchresult_fake_both [display,gray]
+  @{ML_response [display,gray]
   \<open>@{term "x x"}\<close>
   \<open>Type unification failed: Occurs check!\<close>}
 
   raises a typing error, while it is perfectly ok to construct the term
   with the raw ML-constructors:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
 \<open>let
   val omega = Free ("x", @{typ "nat \<Rightarrow> nat"}) $ Free ("x", @{typ nat})
 in 
   pwriteln (pretty_term @{context} omega)
 end\<close>
-  \<open>x x\<close>}
+  "x x"}
   
   Sometimes the internal representation of terms can be surprisingly different
   from what you see at the user-level, because the layers of
@@ -204,7 +213,7 @@
   As already seen above, types can be constructed using the antiquotation 
   \<open>@{typ _}\<close>. For example:
 
-  @{ML_matchresult_fake [display,gray] \<open>@{typ "bool \<Rightarrow> nat"}\<close> \<open>bool \<Rightarrow> nat\<close>}
+  @{ML_response [display,gray] \<open>@{typ "bool \<Rightarrow> nat"}\<close> \<open>bool \<Rightarrow> nat\<close>}
 
   The corresponding datatype is
 \<close>
@@ -294,11 +303,11 @@
   After that the types for booleans, lists and so on are printed out again 
   the standard Isabelle way.
 
-  @{ML_matchresult_fake [display, gray]
-  \<open>@{typ "bool"};
-@{typ "'a list"}\<close>
-  \<open>"bool"
-"'a List.list"\<close>}
+  @{ML_response [display, gray]
+  \<open>(@{typ "bool"},
+@{typ "'a list"})\<close>
+  \<open>("bool",
+"'a list")\<close>}
 
   \begin{readmore}
   Types are described in detail in \isccite{sec:types}. Their
@@ -358,7 +367,7 @@
   term list applied to the term. For example
 
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val trm = @{term "P::bool \<Rightarrow> bool \<Rightarrow> bool"}
   val args = [@{term "True"}, @{term "False"}]
@@ -366,19 +375,19 @@
   list_comb (trm, args)
 end\<close>
 \<open>Free ("P", "bool \<Rightarrow> bool \<Rightarrow> bool") 
-   $ Const ("True", "bool") $ Const ("False", "bool")\<close>}
+   $ Const ("HOL.True", "bool") $ Const ("HOL.False", "bool")\<close>}
 
   Another handy function is @{ML_ind lambda in Term}, which abstracts a variable 
   in a term. For example
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val x_nat = @{term "x::nat"}
   val trm = @{term "(P::nat \<Rightarrow> bool) x"}
 in
   lambda x_nat trm
 end\<close>
-  \<open>Abs ("x", "Nat.nat", Free ("P", "bool \<Rightarrow> bool") $ Bound 0)\<close>}
+  \<open>Abs ("x", "nat", Free ("P", "nat \<Rightarrow> bool") $ Bound 0)\<close>}
 
   In this example, @{ML lambda} produces a de Bruijn index (i.e.\ @{ML \<open>Bound 0\<close>}), 
   and an abstraction, where it also records the type of the abstracted
@@ -387,7 +396,7 @@
   is of the same type as the abstracted variable. If it is of different type,
   as in
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val x_int = @{term "x::int"}
   val trm = @{term "(P::nat \<Rightarrow> bool) x"}
@@ -406,7 +415,7 @@
   "(f::nat \<Rightarrow> nat \<Rightarrow> nat) 0 x"} the subterm @{term "(f::nat \<Rightarrow> nat \<Rightarrow> nat) 0"} by
   @{term y}, and @{term x} by @{term True}.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val sub1 = (@{term "(f::nat \<Rightarrow> nat \<Rightarrow> nat) 0"}, @{term "y::nat \<Rightarrow> nat"})
   val sub2 = (@{term "x::nat"}, @{term "True"})
@@ -414,12 +423,12 @@
 in
   subst_free [sub1, sub2] trm
 end\<close>
-  \<open>Free ("y", "nat \<Rightarrow> nat") $ Const ("True", "bool")\<close>}
+  \<open>Free ("y", "nat \<Rightarrow> nat") $ Const ("HOL.True", "bool")\<close>}
 
   As can be seen, @{ML subst_free} does not take typability into account.
   However it takes alpha-equivalence into account:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>let
   val sub = (@{term "(\<lambda>y::nat. y)"}, @{term "x::nat"})
   val trm = @{term "(\<lambda>x::nat. x)"}
@@ -446,10 +455,10 @@
   The function returns a pair consisting of the stripped off variables and
   the body of the universal quantification. For example
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>strip_alls @{term "\<forall>x y. x = (y::bool)"}\<close>
 \<open>([Free ("x", "bool"), Free ("y", "bool")],
-  Const ("op =", _) $ Bound 1 $ Bound 0)\<close>}
+  Const ("HOL.eq",\<dots>) $ Bound 1 $ Bound 0)\<close>}
 
   Note that we produced in the body two dangling de Bruijn indices. 
   Later on we will also use the inverse function that
@@ -466,7 +475,7 @@
   bound variables. With the function @{ML subst_bounds}, you can replace these
   loose @{ML_ind Bound in Term}s with the stripped off variables.
 
-  @{ML_matchresult_fake [display, gray, linenos]
+  @{ML_response [display, gray, linenos]
   \<open>let
   val (vrs, trm) = strip_alls @{term "\<forall>x y. x = (y::bool)"}
 in 
@@ -488,20 +497,20 @@
   the loose de Bruijn index is replaced by a unique free variable. For example
 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val body = Bound 0 $ Free ("x", @{typ nat})
 in
   Term.dest_abs ("x", @{typ "nat \<Rightarrow> bool"}, body)
 end\<close>
-  \<open>("xa", Free ("xa", "Nat.nat \<Rightarrow> bool") $ Free ("x", "Nat.nat"))\<close>}
+  \<open>("xa", Free ("xa", "nat \<Rightarrow> bool") $ Free ("x", "nat"))\<close>}
 
   Sometimes it is necessary to manipulate de Bruijn indices in terms directly.
   There are many functions to do this. We describe only two. The first,
   @{ML_ind incr_boundvars in Term}, increases by an integer the indices 
   of the loose bound variables in a term. In the code below
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>@{term "\<forall>x y z u. z = u"}
   |> strip_alls
   ||> incr_boundvars 2
@@ -519,7 +528,7 @@
   The second function, @{ML_ind loose_bvar1 in Text}, tests whether a term
   contains a loose bound of a certain index. For example
 
-  @{ML_matchresult_fake [gray,display]
+  @{ML_matchresult [gray,display]
 \<open>let
   val body = snd (strip_alls @{term "\<forall>x y. x = (y::bool)"})
 in
@@ -534,7 +543,7 @@
   HOLogic} constructs an equality out of two terms.  The types needed in this
   equality are calculated from the type of the arguments. For example
 
-@{ML_matchresult_fake [gray,display]
+@{ML_response [gray,display]
 \<open>let
   val eq = HOLogic.mk_eq (@{term "True"}, @{term "False"})
 in
@@ -674,10 +683,10 @@
 text \<open>
   Here is an example:
 
-@{ML_matchresult_fake [display,gray] 
+@{ML_response [display,gray] 
 \<open>map_types nat_to_int @{term "a = (1::nat)"}\<close> 
-\<open>Const ("op =", "int \<Rightarrow> int \<Rightarrow> bool")
-           $ Free ("a", "int") $ Const ("HOL.one_class.one", "int")\<close>}
+\<open>Const ("HOL.eq", "int \<Rightarrow> int \<Rightarrow> bool")
+           $ Free ("a", "int") $ Const ("Groups.one_class.one", "int")\<close>}
 
   If you want to obtain the list of free type-variables of a term, you
   can use the function @{ML_ind  add_tfrees in Term} 
@@ -798,9 +807,9 @@
   @{ML tyenv_unif} with the built-in function @{ML_ind dest in Vartab} from the
   structure @{ML_structure Vartab}.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Vartab.dest tyenv_unif\<close>
-  \<open>[(("'a", 0), (["HOL.type"], "?'b List.list")), 
+  \<open>[(("'a", 0), (["HOL.type"], "?'b list")), 
  (("'b", 0), (["HOL.type"], "nat"))]\<close>} 
 \<close>
 
@@ -839,7 +848,7 @@
   environment in the example this function prints out the more legible:
 
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pretty_tyenv @{context} tyenv_unif\<close>
   \<open>[?'a := ?'b list, ?'b := nat]\<close>}
 
@@ -877,15 +886,21 @@
   Sign.typ_unify @{theory} (ty1, ty2) (Vartab.empty, 0) 
 end\<close>
 
+declare[[show_sorts]]
+
 text \<open>
   To print out the result type environment we switch on the printing 
   of sort information by setting @{ML_ind show_sorts in Syntax} to 
   true. This allows us to inspect the typing environment.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>pretty_tyenv @{context} tyenv\<close>
   \<open>[?'a::s1 := ?'a1::{s1, s2}, ?'b::s2 := ?'a1::{s1, s2}]\<close>}
-
+\<close>
+
+declare[[show_sorts=false]]
+
+text\<open>
   As can be seen, the type variables \<open>?'a\<close> and \<open>?'b\<close> are instantiated
   with a new type variable \<open>?'a1\<close> with sort \<open>{s1, s2}\<close>. Since a new
   type variable has been introduced the @{ML index}, originally being \<open>0\<close>, 
@@ -899,7 +914,7 @@
   \<open>?'b list * nat\<close> from the beginning of this section, and the 
   calculated type environment @{ML tyenv_unif}:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pretty_tyenv @{context} tyenv_unif\<close>
   \<open>[?'a := ?'b list, ?'b := nat]\<close>}
 
@@ -911,9 +926,9 @@
   performance reasons. To apply such a type environment to a type, say \<open>?'a *
   ?'b\<close>, you should use the function @{ML_ind norm_type in Envir}:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Envir.norm_type tyenv_unif @{typ_pat "?'a * ?'b"}\<close>
-  \<open>nat list * nat\<close>}
+  \<open>nat list \<times> nat\<close>}
 
   Matching of types can be done with the function @{ML_ind typ_match in Sign}
   also from the structure @{ML_structure Sign}. This function returns a @{ML_type
@@ -931,7 +946,7 @@
 text \<open>
   Printing out the calculated matcher gives
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>pretty_tyenv @{context} tyenv_match\<close>
   \<open>[?'a := bool list, ?'b := nat]\<close>}
   
@@ -939,9 +954,9 @@
   applying the matcher to a type needs to be done with the function
   @{ML_ind subst_type in Envir}. For example
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Envir.subst_type tyenv_match @{typ_pat "?'a * ?'b"}\<close>
-  \<open>bool list * nat\<close>}
+  \<open>bool list \<times> nat\<close>}
 
   Be careful to observe the difference: always use
   @{ML subst_type in Envir} for matchers and @{ML norm_type in Envir} 
@@ -960,16 +975,16 @@
   Now @{ML tyenv_unif} is equal to @{ML tyenv_match'}. If we apply 
   @{ML norm_type in Envir} to the type \<open>?'a * ?'b\<close> we obtain
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Envir.norm_type tyenv_match' @{typ_pat "?'a * ?'b"}\<close>
-  \<open>nat list * nat\<close>}
+  \<open>nat list \<times> nat\<close>}
 
   which does not solve the matching problem, and if
   we apply @{ML subst_type in Envir} to the same type we obtain
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Envir.subst_type tyenv_unif @{typ_pat "?'a * ?'b"}\<close>
-  \<open>?'b list * nat\<close>}
+  \<open>?'b list \<times> nat\<close>}
   
   which does not solve the unification problem.
 
@@ -1023,7 +1038,7 @@
   the type environment is empty and can be ignored. The 
   resulting term environment is
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pretty_env @{context} fo_env\<close>
   \<open>[?X := P, ?Y := \<lambda>a b. Q b a]\<close>}
 
@@ -1033,7 +1048,7 @@
   unifiers). The function @{ML subst_term in Envir} expects a type environment,
   which is set to empty in the example below, and a term environment.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val trm = @{term_pat "(?X::(nat\<Rightarrow>nat\<Rightarrow>nat)\<Rightarrow>bool) ?Y"}
 in
@@ -1050,7 +1065,7 @@
   matching \<open>\<lambda>x. P x\<close> against the pattern \<open>\<lambda>y. ?X y\<close>. In this
   case, first-order matching produces \<open>[?X := P]\<close>.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val fo_pat = @{term_pat "\<lambda>y. (?X::nat\<Rightarrow>bool) y"}
   val trm = @{term "\<lambda>x. (P::nat\<Rightarrow>bool) x"} 
@@ -1101,7 +1116,7 @@
   and @{ML_ind unify in Pattern}, both implemented in the structure
   @{ML_structure Pattern}. An example for higher-order pattern unification is
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val trm1 = @{term_pat "\<lambda>x y. g (?X y x) (f (?Y x))"}
   val trm2 = @{term_pat "\<lambda>u v. g u (f u)"}
@@ -1152,13 +1167,15 @@
 
   We can print them out as follows.
 
-  @{ML_matchresult_fake [display, gray]
-  \<open>pretty_env @{context} (Envir.term_env un1);
-pretty_env @{context} (Envir.term_env un2);
-pretty_env @{context} (Envir.term_env un3)\<close>
-  \<open>[?X := \<lambda>a. a, ?Y := f a]
-[?X := f, ?Y := a]
-[?X := \<lambda>b. f a]\<close>}
+  @{ML_response [display, gray]
+  \<open>pretty_env @{context} (Envir.term_env un1)\<close>
+  \<open>[?X := \<lambda>a. a, ?Y := f a]\<close>}
+  @{ML_response [display, gray]
+  \<open>pretty_env @{context} (Envir.term_env un2)\<close>
+  \<open>[?X := f, ?Y := a]\<close>}
+  @{ML_response [display, gray]
+  \<open>pretty_env @{context} (Envir.term_env un3)\<close>
+  \<open>[?X := \<lambda>b. f a]\<close>}
 
 
   In case of failure the function @{ML_ind unifiers in Unify} does not raise
@@ -1181,9 +1198,9 @@
   called. This function has a built-in bound, which can be accessed and
   manipulated as a configuration value. For example
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>Config.get_global @{theory} (Unify.search_bound)\<close>
-  \<open>Int 60\<close>}
+  \<open>60\<close>}
 
   If this bound is reached during unification, Isabelle prints out the
   warning message @{text [quotes] "Unification bound exceeded"} and
@@ -1266,7 +1283,7 @@
   @{term "Q True"}. 
  
 
-  @{ML_matchresult_fake [gray,display,linenos] 
+  @{ML_response [gray,display,linenos] 
   \<open>let  
   val pat = Logic.strip_imp_concl (Thm.prop_of @{thm spec})
   val trm = @{term "Trueprop (Q True)"}
@@ -1361,9 +1378,9 @@
   detect any typing inconsistency. For example changing the type of the variable 
   @{term "x"} from @{typ "nat"} to @{typ "int"} will result in the error message: 
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>type_of (@{term "f::nat \<Rightarrow> bool"} $ @{term "x::int"})\<close> 
-  \<open>*** Exception- TYPE ("type_of: type mismatch in application" \<dots>\<close>}
+  \<open>exception TYPE raised \<dots>\<close>}
 
   Since the complete traversal might sometimes be too costly and
   not necessary, there is the function @{ML_ind fastype_of in Term}, which 
@@ -1387,7 +1404,7 @@
   complete type. The type inference can be invoked with the function
   @{ML_ind check_term in Syntax}. An example is as follows:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
 \<open>let
   val c = Const (@{const_name "plus"}, dummyT) 
   val o = @{term "1::nat"} 
@@ -1395,8 +1412,8 @@
 in   
   Syntax.check_term @{context} (c $ o $ v)
 end\<close>
-\<open>Const ("HOL.plus_class.plus", "nat \<Rightarrow> nat \<Rightarrow> nat") $
-  Const ("HOL.one_class.one", "nat") $ Free ("x", "nat")\<close>}
+\<open>Const ("Groups.plus_class.plus", "nat \<Rightarrow> nat \<Rightarrow> nat") $
+  Const ("Groups.one_class.one", "nat") $ Free ("x", "nat")\<close>}
 
   Instead of giving explicitly the type for the constant \<open>plus\<close> and the free 
   variable \<open>x\<close>, type-inference fills in the missing information.
@@ -1432,26 +1449,26 @@
   Certification is always relative to a context. For example you can 
   write:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>Thm.cterm_of @{context} @{term "(a::nat) + b = c"}\<close> 
   \<open>a + b = c\<close>}
 
   This can also be written with an antiquotation:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>@{cterm "(a::nat) + b = c"}\<close> 
   \<open>a + b = c\<close>}
 
   Attempting to obtain the certified term for
 
-  @{ML_matchresult_fake_both [display,gray] 
+  @{ML_response [display,gray] 
   \<open>@{cterm "1 + True"}\<close> 
-  \<open>Type unification failed \<dots>\<close>}
+  \<open>Type unification failed\<dots>\<close>}
 
   yields an error (since the term is not typable). A slightly more elaborate
   example that type-checks is:
 
-@{ML_matchresult_fake [display,gray] 
+@{ML_response [display,gray] 
 \<open>let
   val natT = @{typ "nat"}
   val zero = @{term "0::nat"}
@@ -1465,13 +1482,13 @@
   you obtain the certified type for the Isabelle type @{typ "nat \<Rightarrow> bool"} on 
   the ML-level as follows:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Thm.ctyp_of @{context} (@{typ nat} --> @{typ bool})\<close>
   \<open>nat \<Rightarrow> bool\<close>}
 
   or with the antiquotation:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>@{ctyp "nat \<Rightarrow> bool"}\<close>
   \<open>nat \<Rightarrow> bool\<close>}
 
@@ -1479,14 +1496,14 @@
   pattern-match against them. However, we can construct them. For example
   the function @{ML_ind apply in Thm} produces a certified application.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Thm.apply @{cterm "P::nat \<Rightarrow> bool"} @{cterm "3::nat"}\<close>
   \<open>P 3\<close>}
 
   Similarly the function @{ML_ind list_comb in Drule} from the structure @{ML_structure Drule}
   applies a list of @{ML_type cterm}s.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val chead = @{cterm "P::unit \<Rightarrow> nat \<Rightarrow> bool"}
   val cargs = [@{cterm "()"}, @{cterm "3::nat"}]
@@ -1546,7 +1563,7 @@
   If we print out the value of @{ML my_thm} then we see only the 
   final statement of the theorem.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pwriteln (pretty_thm @{context} my_thm)\<close>
   \<open>\<lbrakk>\<And>x. P x \<Longrightarrow> Q x; P t\<rbrakk> \<Longrightarrow> Q t\<close>}
 
@@ -1725,14 +1742,14 @@
   the functions @{ML_ind lhs_of in Thm} and @{ML_ind rhs_of in Thm} return 
   the left and right-hand side, respectively, of a meta-equality.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val eq = @{thm True_def}
 in
   (Thm.lhs_of eq, Thm.rhs_of eq) 
-  |> apply2 (Pretty.string_of o (pretty_cterm @{context}))
+  |> apply2 (YXML.content_of o Pretty.string_of o (pretty_cterm @{context}))
 end\<close>
-  \<open>(True, (\<lambda>x. x) = (\<lambda>x. x))\<close>}
+  \<open>("True", "(\<lambda>x. x) = (\<lambda>x. x)")\<close>}
 
   Other function produce terms that can be pattern-matched. 
   Suppose the following two theorems.
@@ -1778,7 +1795,7 @@
   this, we build the theorem @{term "True \<equiv> True"} (Line 1) and then 
   unfold the constant @{term "True"} according to its definition (Line 2).
 
-  @{ML_matchresult_fake [display,gray,linenos]
+  @{ML_response [display,gray,linenos]
   \<open>Thm.reflexive @{cterm "True"}
   |> Simplifier.rewrite_rule @{context} [@{thm True_def}]
   |> pretty_thm @{context}
@@ -1795,14 +1812,14 @@
   @{ML_ind rulify in Object_Logic} 
   replaces all object connectives by equivalents in the meta logic. For example
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Object_Logic.rulify @{context} @{thm foo_test2}\<close>
   \<open>\<lbrakk>?A; ?B\<rbrakk> \<Longrightarrow> ?C\<close>}
 
   The transformation in the other direction can be achieved with function
   @{ML_ind atomize in Object_Logic} and the following code.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val thm = @{thm foo_test1}
   val meta_eq = Object_Logic.atomize @{context} (Thm.cprop_of thm)
@@ -1837,16 +1854,16 @@
 text \<open>
   This function produces for the theorem @{thm [source] list.induct}
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>atomize_thm @{context} @{thm list.induct}\<close>
-  \<open>\<forall>P list. P [] \<longrightarrow> (\<forall>a list. P list \<longrightarrow> P (a # list)) \<longrightarrow> P list\<close>}
+  \<open>"\<forall>P list. P [] \<longrightarrow> (\<forall>x1 x2. P x2 \<longrightarrow> P (x1 # x2)) \<longrightarrow> P list"\<close>}
 
   Theorems can also be produced from terms by giving an explicit proof. 
   One way to achieve this is by using the function @{ML_ind prove in Goal}
   in the structure @{ML_structure Goal}. For example below we use this function
   to prove the term @{term "P \<Longrightarrow> P"}.
   
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val trm = @{term "P \<Longrightarrow> P::bool"}
   val tac = K (assume_tac @{context} 1)
@@ -1868,17 +1885,17 @@
 \<close>
 
 ML %grayML\<open>fun rep_goals i = replicate i @{prop "f x = f x"}
-fun rep_tacs i = replicate i (resolve_tac @{context} [@{thm refl}])
+fun rep_tacs ctxt i = replicate i (resolve_tac ctxt [@{thm refl}])
 
 fun multi_test ctxt i =
   Goal.prove_common ctxt NONE ["f", "x"] [] (rep_goals i) 
-    (K ((Goal.conjunction_tac THEN' RANGE (rep_tacs i)) 1))\<close>
+    (K ((Goal.conjunction_tac THEN' RANGE (rep_tacs ctxt i)) 1))\<close>
 
 text \<open>
   With them we can now produce three theorem instances of the 
   proposition.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>multi_test @{context} 3\<close>
   \<open>["?f ?x = ?f ?x", "?f ?x = ?f ?x", "?f ?x = ?f ?x"]\<close>}
 
@@ -1903,7 +1920,7 @@
   purposes, or when explicit proof construction should be omitted due to performace 
   reasons. An example of this function is as follows:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>Skip_Proof.make_thm @{theory} @{prop "True = False"}\<close>
   \<open>True = False\<close>}
 
@@ -1919,16 +1936,16 @@
   Thm}. Assume you prove the following lemma.
 \<close>
 
-lemma foo_data: 
+theorem foo_data: 
   shows "P \<Longrightarrow> P \<Longrightarrow> P" by assumption
 
 text \<open>
   The auxiliary data of this lemma can be retrieved using the function 
   @{ML_ind get_tags in Thm}. So far the the auxiliary data of this lemma is 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>Thm.get_tags @{thm foo_data}\<close>
-  \<open>[("name", "General.foo_data"), ("kind", "lemma")]\<close>}
+  \<open>[("name", "Essential.foo_data"), ("kind", "theorem")]\<close>}
 
   consisting of a name and a kind.  When we store lemmas in the theorem database, 
   we might want to explicitly extend this data by attaching case names to the 
@@ -1944,10 +1961,10 @@
 text \<open>
   The data of the theorem @{thm [source] foo_data'} is then as follows:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>Thm.get_tags @{thm foo_data'}\<close>
-  \<open>[("name", "General.foo_data'"), ("kind", "lemma"), 
- ("case_names", "foo_case_one;foo_case_two")]\<close>}
+  \<open>[("name", "Essential.foo_data'"), 
+ ("case_names", "foo_case_one;foo_case_two"), ("kind", "theorem")]\<close>}
 
   You can observe the case names of this lemma on the user level when using 
   the proof methods \<open>cases\<close> and \<open>induct\<close>. In the proof below
@@ -2001,12 +2018,22 @@
 
 ML %grayML\<open>fun pthms_of (PBody {thms, ...}) = map #2 thms 
 val get_names = (map Proofterm.thm_node_name) o pthms_of 
-val get_pbodies = map (Future.join o Proofterm.thm_node_body) o pthms_of\<close>
+val get_pbodies = map (Future.join o Proofterm.thm_node_body) o pthms_of
+fun get_all_names thm =
+let
+  (*proof body with digest*)
+  val body = Proofterm.strip_thm (Thm.proof_body_of thm);
+  (*all theorems used in the graph of nested proofs*)
+in Proofterm.fold_body_thms
+     (fn {name, ...} => insert (op =) name) [body] []
+end\<close>
 
 text \<open>
   To see what their purpose is, consider the following three short proofs.
 \<close>
-
+ML \<open>
+
+\<close>
 lemma my_conjIa:
 shows "A \<and> B \<Longrightarrow> A \<and> B"
 apply(rule conjI)
@@ -2026,7 +2053,9 @@
 apply(auto)
 done
 
-
+ML "Proofterm.proofs"
+ML \<open>@{thm my_conjIa}
+  |> get_all_names\<close>
 text \<open>
   While the information about which theorems are used is obvious in
   the first two proofs, it is not obvious in the third, because of the
@@ -2036,7 +2065,7 @@
   extracting this information.  Let us first extract the name of the
   established theorem. This can be done with
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>@{thm my_conjIa}
   |> Thm.proof_body_of
   |> get_names\<close>
@@ -2048,61 +2077,44 @@
   theorem.  Notice that the first proof above references
   three theorems, namely @{thm [source] conjI}, @{thm [source] conjunct1} 
   and @{thm [source] conjunct2}. We can obtain them by descending into the
-  first level of the proof-tree, as follows.
-
-  @{ML_matchresult_fake [display,gray]
+  proof-tree. The function @{ML get_all_names} recursively selects all names.
+
+  @{ML_response [display,gray]
   \<open>@{thm my_conjIa}
-  |> Thm.proof_body_of
-  |> get_pbodies
-  |> map get_names
-  |> List.concat\<close>
-  \<open>["HOL.conjunct2", "HOL.conjunct1", "HOL.conjI", "Pure.protectD", 
-  "Pure.protectI"]\<close>}
+  |> get_all_names |> sort string_ord\<close>
+  \<open>["", "HOL.All_def", "HOL.FalseE", "HOL.False_def", "HOL.TrueI", "HOL.True_def", 
+   "HOL.True_or_False", "HOL.allI", "HOL.and_def", "HOL.conjI",
+   "HOL.conjunct1", "HOL.conjunct2", "HOL.disjE", "HOL.eqTrueE", "HOL.eqTrueI", 
+   "HOL.ext", "HOL.fun_cong", "HOL.iffD1", "HOL.iffD2", "HOL.iffI",
+   "HOL.impI", "HOL.mp", "HOL.or_def", "HOL.refl", "HOL.rev_iffD1", 
+   "HOL.rev_iffD2", "HOL.spec", "HOL.ssubst", "HOL.subst", "HOL.sym",
+   "Pure.protectD", "Pure.protectI"]\<close>}
 
   The theorems @{thm [source] Pure.protectD} and @{thm [source]
   Pure.protectI} that are internal theorems are always part of a
-  proof in Isabelle. Note also that applications of \<open>assumption\<close> do not
+  proof in Isabelle. The other theorems are the theorems used in the proofs of the theorems
+  @{thm [source] conjunct1}, @{thm [source] conjunct2} and @{thm [source] conjI}. 
+
+  Note also that applications of \<open>assumption\<close> do not
   count as a separate theorem, as you can see in the following code.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>@{thm my_conjIb}
-  |> Thm.proof_body_of
-  |> get_pbodies
-  |> map get_names
-  |> List.concat\<close>
-  \<open>["Pure.protectD", "Pure.protectI"]\<close>}
+  |> get_all_names |> sort string_ord\<close>
+  \<open>["", "Pure.protectD", "Pure.protectI"]\<close>}
+
 
   Interestingly, but not surprisingly, the proof of @{thm [source]
   my_conjIc} procceeds quite differently from @{thm [source] my_conjIa}
   and @{thm [source] my_conjIb}, as can be seen by the theorems that
   are returned for @{thm [source] my_conjIc}.
-
-  @{ML_matchresult_fake [display,gray]
+ 
+  @{ML_response [display,gray]
   \<open>@{thm my_conjIc}
-  |> Thm.proof_body_of
-  |> get_pbodies
-  |> map get_names
-  |> List.concat\<close>
-  \<open>["HOL.Eq_TrueI", "HOL.simp_thms_25", "HOL.eq_reflection",
-  "HOL.conjunct2", "HOL.conjunct1", "HOL.TrueI", "Pure.protectD",
-  "Pure.protectI"]\<close>}
-
-  Of course we can also descend into the second level of the tree 
-  ``behind'' @{thm [source] my_conjIa} say, which
-  means we obtain the theorems that are used in order to prove
-  @{thm [source] conjunct1}, @{thm [source] conjunct2} and @{thm [source] conjI}.
-
-  @{ML_matchresult_fake [display, gray]
-  \<open>@{thm my_conjIa}
-  |> Thm.proof_body_of
-  |> get_pbodies
-  |> map get_pbodies
-  |> (map o map) get_names
-  |> List.concat
-  |> List.concat
-  |> duplicates (op=)\<close>
-  \<open>["HOL.spec", "HOL.and_def", "HOL.mp", "HOL.impI", "Pure.protectD",
-  "Pure.protectI"]\<close>}
+  |> get_all_names\<close>
+  \<open>["HOL.simp_thms_25", "Pure.conjunctionD1", "Pure.conjunctionD2", "Pure.conjunctionI", 
+    "HOL.rev_mp", "HOL.exI", "HOL.allE", "HOL.exE",\<dots>]\<close>}
+
 
   \begin{exercise}
   Have a look at the theorems that are used when a lemma is ``proved''
@@ -2408,7 +2420,7 @@
 text \<open>
   then you can see it is added to the initially empty list.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>MyThms.get @{context}\<close> 
   \<open>["True"]\<close>}
 
@@ -2422,7 +2434,7 @@
   not need to be explicitly given. These three declarations will cause the 
   theorem list to be updated as:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>MyThms.get @{context}\<close>
   \<open>["True", "Suc (Suc 0) = 2"]\<close>}
 
@@ -2435,7 +2447,7 @@
 
 text \<open>After this, the theorem list is again: 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>MyThms.get @{context}\<close>
   \<open>["True"]\<close>}
 
@@ -2482,7 +2494,7 @@
 \<close>
 
 ML %grayML\<open>fun pprint prt = tracing (Pretty.string_of prt)\<close>
-
+ML %invisible\<open>val pprint = YXML.content_of o Pretty.string_of\<close>
 text \<open>
   The point of the pretty-printing infrastructure is to give hints about how to
   layout text and let Isabelle do the actual layout. Let us first explain
@@ -2512,7 +2524,7 @@
 
   We obtain the same if we just use the function @{ML pprint}.
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>pprint (Pretty.str test_str)\<close>
 \<open>fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar foooooooooo
 ooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaa
@@ -2526,7 +2538,7 @@
   list. To print this list of pretty types as a single string, we concatenate
   them with the function @{ML_ind blk in Pretty} as follows:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs = map Pretty.str (space_explode " " test_str)
 in
@@ -2542,7 +2554,7 @@
   indentation of the printed string. You can increase the indentation 
   and obtain
   
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs = map Pretty.str (space_explode " " test_str)
 in
@@ -2557,7 +2569,7 @@
   that every line starts with the same indent, you can use the
   function @{ML_ind  indent in Pretty} as follows:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs = map Pretty.str (space_explode " " test_str)
 in
@@ -2572,7 +2584,7 @@
   have the linebreaks handled properly, you can use the function 
   @{ML_ind  commas in Pretty}. For example
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020)
 in
@@ -2590,7 +2602,7 @@
 
 text \<open>
   
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020)
 in
@@ -2629,39 +2641,48 @@
   can occur. We do the same after the @{text [quotes] "and"}. This gives you
   for example
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ptrs1 = map (Pretty.str o string_of_int) (1 upto 22)
+in
+  pprint (Pretty.blk (0, and_list ptrs1))
+end\<close>
+\<open>1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 
+and 22\<close>}
+@{ML_response [display,gray]
+\<open>let
   val ptrs2 = map (Pretty.str o string_of_int) (10 upto 28)
 in
-  pprint (Pretty.blk (0, and_list ptrs1));
   pprint (Pretty.blk (0, and_list ptrs2))
 end\<close>
-\<open>1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 
-and 22
-10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 and
+\<open>10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 and
 28\<close>}
 
   Like @{ML blk in Pretty}, the function @{ML_ind chunks in Pretty} prints out 
   a list of items, but automatically inserts forced breaks between each item.
   Compare
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val a_and_b = [Pretty.str "a", Pretty.str "b"]
 in
-  pprint (Pretty.blk (0, a_and_b));
+  pprint (Pretty.blk (0, a_and_b))
+end\<close>
+\<open>ab\<close>}
+ @{ML_response [display,gray]
+  \<open>let
+  val a_and_b = [Pretty.str "a", Pretty.str "b"]
+in
   pprint (Pretty.chunks a_and_b)
 end\<close>
-\<open>ab
-a
+\<open>a
 b\<close>}
 
   The function @{ML_ind big_list in Pretty} helps with printing long lists.
   It is used for example in the command \isacommand{print\_theorems}. An
   example is as follows.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>let
   val pstrs = map (Pretty.str o string_of_int) (4 upto 10)
 in
@@ -2681,7 +2702,7 @@
   out the the terms produced by the the function @{ML de_bruijn} from 
   exercise~\ref{ex:debruijn} we obtain the following: 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>pprint (Syntax.pretty_term  @{context} (de_bruijn 4))\<close>
   \<open>(P 3 = P 4 \<longrightarrow> P 4 \<and> P 3 \<and> P 2 \<and> P 1) \<and>
 (P 2 = P 3 \<longrightarrow> P 4 \<and> P 3 \<and> P 2 \<and> P 1) \<and>
--- a/ProgTutorial/First_Steps.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/First_Steps.thy	Tue May 21 14:37:39 2019 +0200
@@ -4,7 +4,6 @@
                                                   
 chapter \<open>First Steps\label{chp:firststeps}\<close>
 
-
 text \<open>
   \begin{flushright}
   {\em ``The most effective debugging tool is still careful thought,\\ 
@@ -109,10 +108,9 @@
   You can print out error messages with the function @{ML_ind error in Library}; 
   for example:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>if 0 = 1 then true else (error "foo")\<close> 
-\<open>*** foo
-***\<close>}
+\<open>foo\<close>}
 
   This function raises the exception \<open>ERROR\<close>, which will then 
   be displayed by the infrastructure indicating that it is an error by
@@ -132,11 +130,14 @@
 
 ML %grayML\<open>val pretty_term = Syntax.pretty_term
 val pwriteln = Pretty.writeln\<close>
-
+(* We redfine pwriteln to return a value not just a side effect on the output in order to
+use some checking of output with ML_response antiquotation. *)
+ML %invisible\<open>val pretty_term = Syntax.pretty_term
+val pwriteln = YXML.content_of o Pretty.string_of\<close>
 text \<open>
   They can be used as follows
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>pwriteln (pretty_term @{context} @{term "1::nat"})\<close>
   \<open>"1"\<close>}
 
@@ -159,7 +160,7 @@
 text \<open>
   Now by using this context @{ML pretty_term} prints out
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pwriteln (pretty_term show_types_ctxt @{term "(1::nat, x)"})\<close>
   \<open>(1::nat, x::'a)\<close>}
 
@@ -203,7 +204,7 @@
   @{thm [source] conjI} shown below can be used for any (typable) 
   instantiation of \<open>?P\<close> and \<open>?Q\<close>. 
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pwriteln (pretty_thm @{context} @{thm conjI})\<close>
   \<open>\<lbrakk>?P; ?Q\<rbrakk> \<Longrightarrow> ?P \<and> ?Q\<close>}
 
@@ -221,7 +222,7 @@
 text \<open>
   With this function, theorem @{thm [source] conjI} is now printed as follows:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pwriteln (pretty_thm_no_vars @{context} @{thm conjI})\<close>
   \<open>\<lbrakk>P; Q\<rbrakk> \<Longrightarrow> P \<and> Q\<close>}
   
@@ -267,12 +268,12 @@
 @{ML_matchresult_fake [display,gray]
 \<open>pwriteln (Pretty.str "First half,"); 
 pwriteln (Pretty.str "and second half.")\<close>
-\<open>First half,
-and second half.\<close>}
+\<open>"First half,
+and second half."\<close>}
 
   but as a single string with appropriate formatting. For example
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>pwriteln (Pretty.str ("First half," ^ "\\n" ^ "and second half."))\<close>
 \<open>First half,
 and second half.\<close>}
@@ -282,7 +283,7 @@
   @{ML_ind cat_lines in Library} concatenates a list of strings 
   and inserts newlines in between each element. 
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>pwriteln (Pretty.str (cat_lines ["foo", "bar"]))\<close>
   \<open>foo
 bar\<close>}
@@ -387,7 +388,7 @@
   applied to it. For example, below three variables are applied to the term 
   @{term [show_types] "P::nat \<Rightarrow> int \<Rightarrow> unit \<Rightarrow> bool"}:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val trm = @{term "P::nat \<Rightarrow> int \<Rightarrow> unit \<Rightarrow> bool"}
   val ctxt = @{context}
@@ -414,7 +415,7 @@
   terms involving fresh variables. For this the infrastructure helps
   tremendously to avoid any name clashes. Consider for example:
 
-   @{ML_matchresult_fake [display,gray]
+   @{ML_response [display,gray]
 \<open>let
   val trm = @{term "za::'a \<Rightarrow> 'b \<Rightarrow> 'c"}
   val ctxt = @{context}
@@ -599,7 +600,7 @@
 
   An example where this combinator is useful is as follows
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_matchresult [display, gray]
   \<open>let
   val ((names1, names2), _) =
     @{context}
@@ -639,12 +640,12 @@
   both as pairs. We can use this information for example to print out the definiens and 
   the theorem corresponding to the definitions. For example for the first definition:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let 
   val (one_trm, (_, one_thm)) = one_def
 in
-  pwriteln (pretty_term ctxt' one_trm);
-  pwriteln (pretty_thm ctxt' one_thm)
+  (pwriteln (pretty_term ctxt' one_trm),
+   pwriteln (pretty_thm ctxt' one_thm))
 end\<close>
   \<open>One
 One \<equiv> 1\<close>}
@@ -673,7 +674,7 @@
   @{ML_ind check_terms in Syntax}, whose purpose is to simultaneously type-check 
   a list of terms. Consider the code:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>let
   val ctxt = @{context}
 in
@@ -694,7 +695,7 @@
   Syntax} in the file @{ML_file "Pure/Syntax/syntax.ML"} that is implemented 
   in terms of @{ML singleton} and @{ML check_terms in Syntax}.} For example
 
-  @{ML_matchresult_fake [display, gray, linenos]
+  @{ML_response [display, gray, linenos]
   \<open>let 
   val ctxt = @{context}
 in
@@ -774,7 +775,7 @@
   @{ML_matchresult_fake [display, gray]
   \<open>Proof_Context.print_abbrevs false @{context}\<close>
 \<open>\<dots>
-INTER \<equiv> INFI
+INTER \<equiv> INFIMUM
 Inter \<equiv> Inf
 \<dots>\<close>}
 
@@ -783,21 +784,22 @@
   You can also use antiquotations to refer to proved theorems: 
   \<open>@{thm \<dots>}\<close> for a single theorem
 
-  @{ML_matchresult_fake [display,gray] \<open>@{thm allI}\<close> \<open>(\<And>x. ?P x) \<Longrightarrow> \<forall>x. ?P x\<close>}
+  @{ML_response [display,gray] \<open>@{thm allI}\<close> \<open>(\<And>x. ?P x) \<Longrightarrow> \<forall>x. ?P x\<close>}
 
   and \<open>@{thms \<dots>}\<close> for more than one
 
-@{ML_matchresult_fake [display,gray] 
+
+@{ML_response [display,gray] 
   \<open>@{thms conj_ac}\<close> 
-\<open>(?P \<and> ?Q) = (?Q \<and> ?P)
-(?P \<and> ?Q \<and> ?R) = (?Q \<and> ?P \<and> ?R)
-((?P \<and> ?Q) \<and> ?R) = (?P \<and> ?Q \<and> ?R)\<close>}  
+\<open>["(?P \<and> ?Q) = (?Q \<and> ?P)", 
+  "(?P \<and> ?Q \<and> ?R) = (?Q \<and> ?P \<and> ?R)", 
+  "((?P \<and> ?Q) \<and> ?R) = (?P \<and> ?Q \<and> ?R)"]\<close>}  
 
   The thm-antiquotations can also be used for manipulating theorems. For 
   example, if you need the version of the theorem @{thm [source] refl} that 
   has a meta-equality instead of an equality, you can write
 
-@{ML_matchresult_fake [display,gray] 
+@{ML_response [display,gray] 
   \<open>@{thm refl[THEN eq_reflection]}\<close>
   \<open>?x \<equiv> ?x\<close>}
 
@@ -816,7 +818,7 @@
 text \<open>
   The result can be printed out as follows.
 
-  @{ML_matchresult_fake [gray,display]
+  @{ML_response [gray,display]
 \<open>foo_thms |> pretty_thms_no_vars @{context}
          |> pwriteln\<close>
   \<open>True, False \<Longrightarrow> P\<close>}
@@ -840,9 +842,9 @@
   simp-rules. Now you can feed in the current simpset into this function. 
   The current simpset can be referred to using @{ML_ind simpset_of in Raw_Simplifier}.
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>get_thm_names_from_ss @{context}\<close> 
-  \<open>["Nat.of_nat_eq_id", "Int.of_int_eq_id", "Nat.One_nat_def", \<dots>]\<close>}
+  \<open>["Euclidean_Division.euclidean_size_int_def",\<dots>\<close>}
 
   Again, this way of referencing simpsets makes you independent from additions
   of lemmas to the simpset by the user, which can potentially cause loops in your
@@ -884,9 +886,9 @@
   so that the ML-system can understand it. (All these functions will be explained
   in more detail in later sections.) An example for this antiquotation is:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_matchresult [display,gray]
   \<open>@{term_pat "Suc (?x::nat)"}\<close>
-  \<open>Const ("Suc", "nat \<Rightarrow> nat") $ Var (("x", 0), "nat")\<close>}
+  \<open>Const ("Nat.Suc", _) $ Var (("x", 0), _)\<close>}
 
   which shows the internal representation of the term \<open>Suc ?x\<close>. Similarly
   we can write an antiquotation for type patterns. Its code is
@@ -980,19 +982,19 @@
 text \<open>
   The data can be retrieved with the projection functions defined above.
   
-  @{ML_matchresult_fake [display, gray]
-\<open>project_int (nth foo_list 0); 
-project_bool (nth foo_list 1)\<close> 
-\<open>13
+  @{ML_response [display, gray]
+\<open>(project_int (nth foo_list 0), 
+project_bool (nth foo_list 1))\<close> 
+\<open>13,
 true\<close>}
 
   Notice that we access the integer as an integer and the boolean as
   a boolean. If we attempt to access the integer as a boolean, then we get 
   a runtime error. 
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>project_bool (nth foo_list 0)\<close>  
-\<open>*** exception Match raised\<close>}
+\<open>exception Match raised\<close>}
 
   This runtime error is the reason why ML is still type-sound despite
   containing a universal type.
@@ -1062,7 +1064,7 @@
   \emph{current} theory is updated (this is explained further in 
   Section~\ref{sec:theories}). The lookup can now be performed as follows.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>lookup @{theory} "conj"\<close>
 \<open>SOME "\<lbrakk>?P; ?Q\<rbrakk> \<Longrightarrow> ?P \<and> ?Q"\<close>}
 
@@ -1073,10 +1075,11 @@
 
 setup %gray \<open>update "conj" @{thm TrueI}\<close>
 
+
 text \<open>
   and accordingly, @{ML lookup} now produces the introduction rule for @{term "True"}
   
-@{ML_matchresult_fake [display, gray]
+@{ML_response [display, gray]
 \<open>lookup @{theory} "conj"\<close>
 \<open>SOME "True"\<close>}
 
@@ -1271,9 +1274,9 @@
   The rules in the list can be retrieved using the function 
   @{ML FooRules.get}:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>FooRules.get @{context}\<close> 
-  \<open>["True", "?C","?B"]\<close>}
+  \<open>["True", "?C", "?B"]\<close>}
 
   Note that this function takes a proof context as argument. This might be 
   confusing, since the theorem list is stored as theory data. It becomes clear by knowing
--- a/ProgTutorial/Package/Ind_Code.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Package/Ind_Code.thy	Tue May 21 14:37:39 2019 +0200
@@ -493,7 +493,7 @@
   theorems of the form @{thm [source] all_elims_test}. For example we can instantiate
   the quantifiers in this theorem with @{term a}, @{term b} and @{term c} as follows:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>let
   val ctrms = [@{cterm "a::nat"}, @{cterm "b::nat"}, @{cterm "c::nat"}]
   val new_thm = all_elims ctrms @{thm all_elims_test}
@@ -510,13 +510,13 @@
   For example we can eliminate the preconditions \<open>A\<close> and \<open>B\<close> from
   @{thm [source] imp_elims_test}:
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
 \<open>let
   val res = imp_elims @{thms imp_elims_test'} @{thm imp_elims_test}
 in
   pwriteln (pretty_thm_no_vars @{context} res)
 end\<close>
-  \<open>C\<close>}
+  \<open>Q\<close>}
 
   Now we set up the proof for the introduction rule as follows:
 \<close>
--- a/ProgTutorial/Parsing.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Parsing.thy	Tue May 21 14:37:39 2019 +0200
@@ -69,9 +69,9 @@
   or raise the exception \<open>FAIL\<close> if no string can be consumed. For
   example trying to parse
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>($$ "x") (Symbol.explode "world")\<close> 
-  \<open>Exception FAIL raised\<close>}
+  \<open>exception FAIL NONE raised\<close>}
   
   will raise the exception \<open>FAIL\<close>. The function @{ML_ind "$$" in Scan} will also
   fail if you try to consume more than a single character.
@@ -97,14 +97,13 @@
   sequences, for example \<open>\<foo>\<close>, that have a special meaning in
   Isabelle. To see the difference consider
 
-@{ML_matchresult_fake [display,gray]
-\<open>let 
-  val input = "\<foo> bar"
-in
-  (String.explode input, Symbol.explode input)
-end\<close>
-\<open>(["\\", "<", "f", "o", "o", ">", " ", "b", "a", "r"],
- ["\<foo>", " ", "b", "a", "r"])\<close>}
+@{ML_matchresult [display,gray]
+\<open>String.explode "\<foo> bar"\<close>
+\<open>[#"\\", #"<", #"f", #"o", #"o", #">", #" ", #"b", #"a", #"r"]\<close>}
+
+@{ML_matchresult [display,gray]
+\<open>Symbol.explode "\<foo> bar"\<close>
+\<open>["\<foo>", " ", "b", "a", "r"]\<close>}
 
   Slightly more general than the parser @{ML \<open>$$\<close>} is the function 
   @{ML_ind one in Scan}, in that it takes a predicate as argument and 
@@ -236,16 +235,16 @@
 
   but if you invoke it on @{text [quotes] "world"}
 
-  @{ML_matchresult_fake [display,gray] \<open>(!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\<close>
-                               \<open>Exception ABORT raised\<close>}
+  @{ML_response [display,gray] \<open>(!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\<close>
+                               \<open>exception ABORT fn raised\<close>}
 
   then the parsing aborts and the error message \<open>foo\<close> is printed. In order to
   see the error message properly, you need to prefix the parser with the function 
   @{ML_ind error in Scan}. For example:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>Scan.error (!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\<close>
-  \<open>Exception Error "foo" raised\<close>}
+  \<open>foo\<close>}
 
   This kind of ``prefixing'' to see the correct error message is usually done by wrappers 
   such as @{ML_ind local_theory in Outer_Syntax} 
@@ -269,8 +268,8 @@
   @{text [quotes] "h"}, @{text [quotes] "e"} and @{text [quotes] "w"}, and 
   the input @{text [quotes] "holle"} 
 
-  @{ML_matchresult_fake [display,gray] \<open>Scan.error (p_followed_by_q "h" "e" "w") (Symbol.explode "holle")\<close>
-                               \<open>Exception ERROR "h is not followed by e" raised\<close>} 
+  @{ML_response [display,gray] \<open>Scan.error (p_followed_by_q "h" "e" "w") (Symbol.explode "holle")\<close>
+                               \<open>h is not followed by e\<close>} 
 
   produces the correct error message. Running it with
  
@@ -318,9 +317,9 @@
 
   The function @{ML_ind unless in Scan} takes two parsers: if the first one can 
   parse the input, then the whole parser fails; if not, then the second is tried. Therefore
-  
-  @{ML_matchresult_fake_both [display,gray] \<open>Scan.unless ($$ "h") ($$ "w") (Symbol.explode "hello")\<close>
-                               \<open>Exception FAIL raised\<close>}
+
+  @{ML_response [display,gray] \<open>Scan.unless ($$ "h") ($$ "w") (Symbol.explode "hello")\<close>
+                               "exception FAIL NONE raised"}
 
   fails, while
 
@@ -501,11 +500,11 @@
   messages. The following code
 
 
-@{ML_matchresult_fake [display,gray] \<open>Token.explode 
+@{ML_response [display,gray] \<open>Token.explode 
   (Thy_Header.get_keywords' @{context}) Position.none "hello world"\<close>
-\<open>[Token (_,(Ident, "hello"),_), 
- Token (_,(Space, " "),_), 
- Token (_,(Ident, "world"),_)]\<close>}
+\<open>[Token (\<dots>(Ident, "hello"),\<dots>), 
+ Token (\<dots>(Space, " "),\<dots>), 
+ Token (\<dots>(Ident, "world"),\<dots>)]\<close>}
 
   produces three tokens where the first and the last are identifiers, since
   @{text [quotes] "hello"} and @{text [quotes] "world"} do not match any
@@ -523,24 +522,24 @@
 text \<open>
   then lexing @{text [quotes] "hello world"} will produce
 
-  @{ML_matchresult_fake [display,gray] \<open>Token.explode 
+  @{ML_response [display,gray] \<open>Token.explode
   (Thy_Header.get_keywords' @{context}) Position.none "hello world"\<close> 
-\<open>[Token (_,(Keyword, "hello"),_), 
- Token (_,(Space, " "),_), 
- Token (_,(Ident, "world"),_)]\<close>}
+\<open>[Token (\<dots>(Keyword, "hello"),\<dots>), 
+ Token (\<dots>(Space, " "),\<dots>), 
+ Token (\<dots>(Ident, "world"),\<dots>)]\<close>}
 
   Many parsing functions later on will require white space, comments and the like
   to have already been filtered out.  So from now on we are going to use the 
   functions @{ML filter} and @{ML_ind is_proper in Token} to do this. 
   For example:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
    val input = Token.explode (Thy_Header.get_keywords' @{context}) Position.none "hello world"
 in
    filter Token.is_proper input
 end\<close> 
-\<open>[Token (_,(Ident, "hello"), _), Token (_,(Ident, "world"), _)]\<close>}
+\<open>[Token (\<dots>(Keyword, "hello"), \<dots>), Token (\<dots>(Ident, "world"), \<dots>)]\<close>}
 
   For convenience we define the function:
 \<close>
@@ -552,11 +551,11 @@
 text \<open>
   If you now parse
 
-@{ML_matchresult_fake [display,gray] 
+@{ML_response [display,gray] 
 \<open>filtered_input "inductive | for"\<close> 
-\<open>[Token (_,(Command, "inductive"),_), 
- Token (_,(Keyword, "|"),_), 
- Token (_,(Keyword, "for"),_)]\<close>}
+\<open>[Token (\<dots>(Command, "inductive"),\<dots>), 
+ Token (\<dots>(Keyword, "|"),\<dots>), 
+ Token (\<dots>(Keyword, "for"),\<dots>)]\<close>}
 
   you obtain a list consisting of only one command and two keyword tokens.
   If you want to see which keywords and commands are currently known to Isabelle, 
@@ -637,15 +636,15 @@
   section). A difference, however, is that the error message of @{ML \<open>Parse.!!!\<close>} is fixed to be @{text [quotes] "Outer syntax error"}
   together with a relatively precise description of the failure. For example:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let 
   val input = filtered_input "in |"
   val parse_bar_then_in = Parse.$$$ "|" -- Parse.$$$ "in"
 in 
   parse (Parse.!!! parse_bar_then_in) input 
 end\<close>
-\<open>Exception ERROR "Outer syntax error: keyword "|" expected, 
-but keyword in was found" raised\<close>
+\<open>Outer syntax error: keyword "|" expected, 
+but keyword in was found\<close>
 }
 
   \begin{exercise} (FIXME)
@@ -669,10 +668,10 @@
 text \<open>
   where we pretend the parsed string starts on line 7. An example is
 
-@{ML_matchresult_fake [display,gray]
-\<open>filtered_input' "foo \\n bar"\<close>
-\<open>[Token (("foo", ({line=7, end_line=7}, {line=7})), (Ident, "foo"), _),
- Token (("bar", ({line=8, end_line=8}, {line=8})), (Ident, "bar"), _)]\<close>}
+@{ML_response [display,gray]
+\<open>filtered_input' "foo \n bar"\<close>
+\<open>[Token (("foo", ({line=7, offset=1, end_offset=4}, {line=7, offset=4})), (Ident, "foo"),\<dots>),
+ Token (("bar", ({line=8, offset=7, end_offset=10}, {line=8, offset=10})), (Ident, "bar"),\<dots>)]\<close>}
 
   in which the @{text [quotes] "\\n"} causes the second token to be in 
   line 8.
@@ -680,13 +679,13 @@
   By using the parser @{ML position in Parse} you can access the token 
   position and return it as part of the parser result. For example
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val input = filtered_input' "where"
 in 
   parse (Parse.position (Parse.$$$ "where")) input
 end\<close>
-\<open>(("where", {line=7, end_line=7}), [])\<close>}
+\<open>(("where", {line=7, offset=1, end_offset=6}), [])\<close>}
 
   \begin{readmore}
   The functions related to positions are implemented in the file
@@ -751,7 +750,7 @@
   for terms and  types: you can just call the predefined parsers. Terms can 
   be parsed using the function @{ML_ind term in Parse}. For example:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let 
   val input = Token.explode (Thy_Header.get_keywords' @{context}) Position.none "foo"
 in 
@@ -767,13 +766,13 @@
   The result of the decoding is an XML-tree. You can see better what is going on if
   you replace @{ML Position.none} by @{ML \<open>Position.line 42\<close>}, say:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let 
   val input = Token.explode (Thy_Header.get_keywords' @{context}) (Position.line 42) "foo"
 in 
   YXML.parse (fst (Parse.term input))
 end\<close>
-\<open>Elem ("token", [("line", "42"), ("end_line", "42")], [XML.Text "foo"])\<close>}
+\<open><input delimited="false" line="42" offset="1" end_offset="4">foo</input>\<close>}
 
   The positional information is stored as part of an XML-tree so that code 
   called later on will be able to give more precise error messages. 
@@ -847,13 +846,6 @@
     ((oddS,_),_)]), [])\<close>}
 \<close>
 
-ML \<open>let
-  val input = filtered_input 
-        "foo::\"int \<Rightarrow> bool\" and bar::nat (\"BAR\" 100) and blonk"
-in
-  parse Parse.vars input
-end\<close>
-
 text \<open>
   As you see, the result is a pair consisting of a list of
   variables with optional type-annotation and syntax-annotation, and a list of
@@ -866,16 +858,16 @@
   \<open>"int \<Rightarrow> bool"\<close> in order to properly escape the double quotes
   in the compound type.}
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val input = filtered_input 
         "foo::\"int \<Rightarrow> bool\" and bar::nat (\"BAR\" 100) and blonk"
 in
   parse Parse.vars input
 end\<close>
-\<open>([(foo, SOME _, NoSyn), 
-  (bar, SOME _, Mixfix (Source {text="BAR",...}, [], 100, _)), 
-  (blonk, NONE, NoSyn)],[])\<close>}  
+\<open>([("foo", SOME \<dots>, NoSyn), 
+  ("bar", SOME \<dots>, Mixfix (Source {\<dots>text = "BAR"}, [], 100, \<dots>)), 
+  ("blonk", NONE, NoSyn)], [])\<close>}  
 \<close>
 
 text \<open>
@@ -953,11 +945,7 @@
 
 text \<open>
   Often new commands, for example for providing new definitional principles,
-  need to be implemented. While this is not difficult on the ML-level and for
-  jEdit, in order to be backwards compatible, new commands need also to be recognised 
-  by Proof-General. This results in some subtle configuration issues, which we will 
-  explain in the next section. Here we just describe how to define new commands
-  to work with jEdit.
+  need to be implemented. 
 
   Let us start with a ``silly'' command that does nothing at all. We
   shall name this command \isacommand{foobar}.  Before you can
@@ -1000,9 +988,7 @@
 
 text \<open>
   but of course you will not see anything since \isacommand{foobar} is
-  not intended to do anything.  Remember, however, that this only
-  works in jEdit. In order to enable also Proof-General recognise this
-  command, a keyword file needs to be generated (see next section).
+  not intended to do anything.
 
   As it stands, the command \isacommand{foobar} is not very useful. Let
   us refine it a bit next by letting it take a proposition as argument
--- a/ProgTutorial/Readme.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Readme.thy	Tue May 21 14:37:39 2019 +0200
@@ -90,7 +90,7 @@
 
   \begin{center}\small
   \begin{tabular}{ll}
-  \<open>@{ML_matchresult_fake\<close> & \<open>\<open>term_of @{theory} @{term "a + b = c"}\<close>}\<close>\\
+  \<open>@{ML_matchresult_fake\<close> & \<open>\<open>term_of @{theory} @{term "a + b = c"}\<close>\<close>\\
                                & \<open>\<open>a + b = c\<close>}\<close>\smallskip\\ 
   \<open>@{ML_matchresult_fake\<close> & \<open>\<open>($$ "x") (explode "world")\<close>\<close>\\ 
                                & \<open>\<open>Exception FAIL raised\<close>}\<close>
@@ -129,14 +129,42 @@
   \end{tabular}
   \end{center}
 
- which produce respectively
+ which produces
+
+  \begin{center}\small
+  @{ML_response \<open>1 upto 10\<close>}
+  \end{center}
+ 
+  You can give a second argument for the expected response. This is matched against the actual
+  response by crude wildcard matching where whitespace and \<open>\<dots>\<close> are treated as wildcard.
 
   \begin{center}\small
-  \begin{tabular}{p{3cm}p{3cm}}
-  @{ML_response \<open>1 upto 10\<close>}
+  \begin{tabular}{ll}
+  \<open>@{ML_response\<close> & \<open>\<open>1 upto 20\<close>\<close>\\
+                  & \<open>"[1, 2, 3, 4, 5, 6\<dots>\<close>\\
+                  &   \<open>18, 19, 20]"}\<close>\\
   \end{tabular}
   \end{center}
- 
+
+  will produce
+
+  @{ML_response \<open>1 upto 20\<close>
+\<open>[1, 2, 3, 4, 5, 6\<dots> 
+  18, 19, 20]\<close>}
+
+  Note that exceptions are also converted to strings and can thus be checked in the response 
+  string.
+
+  @{ML_response \<open>error "hallo"\<close>
+  \<open>hallo\<close>}
+
+So as a rule of thumb, to facilitate result checking use prefer this order:
+\begin{enumerate}
+ \item \<open>@{ML_matchresult \<open>expr\<close> \<open>pat\<close>}\<close>
+ \item \<open>@{ML_response \<open>expr\<close> \<open>string\<close>}\<close>
+ \item \<open>@{ML_matchresult_fake \<open>expr\<close> \<open>pat\<close>}\<close> or \<open>@{ML_response \<open>expr\<close>}\<close>
+ \item \<open>@{ML_matchresult_fake_both \<open>expr\<close> \<open>pat\<close>}\<close>
+\end{enumerate}
 
   \item[$\bullet$] \<open>@{ML_file "name"}\<close> should be used when
   referring to a file. It checks whether the file exists.  An example
--- a/ProgTutorial/Recipes/ExternalSolver.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Recipes/ExternalSolver.thy	Tue May 21 14:37:39 2019 +0200
@@ -27,7 +27,7 @@
   properly. For example, the following expression takes only approximately
   one second:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
     \<open>Timeout.apply (Time.fromSeconds 1) Isabelle_System.bash_output "sleep 30"
      handle TIMEOUT => ("timeout", ~1)\<close> \<open>("timeout", ~1)\<close>}
 
@@ -46,7 +46,7 @@
 
   In Isabelle, this application may now be executed by
 
-  @{ML_matchresult_fake [display,gray] \<open>Isabelle_System.bash_output "$FOO"\<close> \<open>\<dots>\<close>}
+  @{ML_response [display,gray] \<open>Isabelle_System.bash_output "$FOO"\<close> \<open>\<dots>\<close>}
 \<close>
 
 
--- a/ProgTutorial/Recipes/Oracle.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Recipes/Oracle.thy	Tue May 21 14:37:39 2019 +0200
@@ -110,7 +110,7 @@
 text \<open>
   Here is what we get when applying the oracle:
 
-  @{ML_matchresult_fake \<open>iff_oracle @{cprop "p = (p::bool)"}\<close> \<open>p = p\<close>}
+  @{ML_response \<open>iff_oracle @{cprop "p = (p::bool)"}\<close> \<open>p = p\<close>}
 
   (FIXME: is there a better way to present the theorem?)
 
--- a/ProgTutorial/Recipes/Sat.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Recipes/Sat.thy	Tue May 21 14:37:39 2019 +0200
@@ -42,7 +42,7 @@
   (appropriately updated) in the result. In the case above the
   input table is empty (i.e.~@{ML Termtab.empty}) and the output table is
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Termtab.dest table\<close>
   \<open>[(Free ("A", "bool"), 1), (Free ("B", "bool"), 2)]\<close>}
 
@@ -58,9 +58,9 @@
   returns @{ML \<open>BoolVar 1\<close> in Prop_Logic} for  @{ML pform'} and the table 
   @{ML table'} is:
 
-  @{ML_matchresult_fake [display,gray]
-  \<open>map (apfst (Syntax.string_of_term @{context})) (Termtab.dest table')\<close>
-  \<open>(\<forall>x. P x, 1)\<close>}
+  @{ML_response [display,gray]
+  \<open>map (apfst (YXML.content_of o Syntax.string_of_term @{context})) (Termtab.dest table')\<close>
+  \<open>("\<forall>x. P x", 1)\<close>}
   
   In the print out of the tabel, we used some pretty printing scaffolding 
   to see better which assignment the table contains.
--- a/ProgTutorial/Solutions.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Solutions.thy	Tue May 21 14:37:39 2019 +0200
@@ -55,7 +55,7 @@
   \<open>-1\<close> to account for the deleted quantifier. An example is 
   as follows:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>@{prop "\<forall>x y z. P x = P z"}
   |> kill_trivial_quantifiers
   |> pretty_term @{context} 
@@ -330,9 +330,9 @@
 text \<open>
   This function generates for example:
 
-  @{ML_matchresult_fake [display,gray] 
+  @{ML_response [display,gray] 
   \<open>pwriteln (pretty_term @{context} (term_tree 2))\<close> 
-  \<open>(1 + 2) + (3 + 4)\<close>} 
+  \<open>1 + 2 + (3 + 4)\<close>} 
 
   The next function constructs a goal of the form \<open>P \<dots>\<close> with a term 
   produced by @{ML term_tree} filled in.
--- a/ProgTutorial/Tactical.thy	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/Tactical.thy	Tue May 21 14:37:39 2019 +0200
@@ -47,7 +47,7 @@
 text \<open>
   This proof translates to the following ML-code.
   
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val ctxt = @{context}
   val goal = @{prop "P \<or> Q \<Longrightarrow> Q \<or> P"}
@@ -763,17 +763,19 @@
   constraints is by pre-instantiating theorems with other theorems. 
   The function @{ML_ind RS in Drule}, for example, does this.
   
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>@{thm disjI1} RS @{thm conjI}\<close> \<open>\<lbrakk>?P1; ?Q\<rbrakk> \<Longrightarrow> (?P1 \<or> ?Q1) \<and> ?Q\<close>}
 
   In this example it instantiates the first premise of the \<open>conjI\<close>-theorem 
   with the theorem \<open>disjI1\<close>. If the instantiation is impossible, as in the 
   case of
 
-  @{ML_matchresult_fake_both [display,gray]
+  @{ML_response [display,gray]
   \<open>@{thm conjI} RS @{thm mp}\<close> 
-\<open>*** Exception- THM ("RSN: no unifiers", 1, 
-["\<lbrakk>?P; ?Q\<rbrakk> \<Longrightarrow> ?P \<and> ?Q", "\<lbrakk>?P \<longrightarrow> ?Q; ?P\<rbrakk> \<Longrightarrow> ?Q"]) raised\<close>}
+\<open>exception THM 1 raised\<dots> 
+ RSN: no unifiers 
+ \<lbrakk>?P; ?Q\<rbrakk> \<Longrightarrow> ?P \<and> ?Q 
+ \<lbrakk>?P \<longrightarrow> ?Q; ?P\<rbrakk> \<Longrightarrow> ?Q\<close>}
 
   then the function raises an exception. The function @{ML_ind  RSN in Drule} 
   is similar to @{ML RS}, but takes an additional number as argument. This 
@@ -782,7 +784,7 @@
   If you want to instantiate more than one premise of a theorem, you can use 
   the function @{ML_ind MRS in Drule}:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>[@{thm disjI1}, @{thm disjI2}] MRS @{thm conjI}\<close> 
   \<open>\<lbrakk>?P2; ?Q1\<rbrakk> \<Longrightarrow> (?P2 \<or> ?Q2) \<and> (?P1 \<or> ?Q1)\<close>}
 
@@ -791,17 +793,17 @@
   example in the code below, every theorem in the second list is 
   instantiated with every theorem in the first.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val list1 = [@{thm impI}, @{thm disjI2}]
   val list2 = [@{thm conjI}, @{thm disjI1}]
 in
   list1 RL list2
 end\<close> 
-\<open>[\<lbrakk>?P1 \<Longrightarrow> ?Q1; ?Q\<rbrakk> \<Longrightarrow> (?P1 \<longrightarrow> ?Q1) \<and> ?Q, 
- \<lbrakk>?Q1; ?Q\<rbrakk> \<Longrightarrow> (?P1 \<or> ?Q1) \<and> ?Q,
- (?P1 \<Longrightarrow> ?Q1) \<Longrightarrow> (?P1 \<longrightarrow> ?Q1) \<or> ?Q, 
- ?Q1 \<Longrightarrow> (?P1 \<or> ?Q1) \<or> ?Q]\<close>}
+\<open>["\<lbrakk>?P1 \<Longrightarrow> ?Q1; ?Q\<rbrakk> \<Longrightarrow> (?P1 \<longrightarrow> ?Q1) \<and> ?Q", 
+ "\<lbrakk>?Q1; ?Q\<rbrakk> \<Longrightarrow> (?P1 \<or> ?Q1) \<and> ?Q",
+ "(?P1 \<Longrightarrow> ?Q1) \<Longrightarrow> (?P1 \<longrightarrow> ?Q1) \<or> ?Q", 
+ "?Q1 \<Longrightarrow> (?P1 \<or> ?Q1) \<or> ?Q"]\<close>}
 
   \begin{readmore}
   The combinators for instantiating theorems with other theorems are 
@@ -1458,7 +1460,7 @@
   prints out some parts of a simpset. If you use it to print out the components of the
   empty simpset, i.e., @{ML_ind empty_ss in Raw_Simplifier}
   
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>print_ss @{context} empty_ss\<close>
 \<open>Simplification rules:
 Congruences rules:
@@ -1474,10 +1476,10 @@
 text \<open>
   Printing then out the components of the simpset gives:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>print_ss @{context} (Raw_Simplifier.simpset_of ss1)\<close>
 \<open>Simplification rules:
-  ??.unknown: A - B \<inter> C \<equiv> A - B \<union> (A - C)
+  ??.unknown: A1 - B1 \<inter> C1 \<equiv> A1 - B1 \<union> (A1 - C1)
 Congruences rules:
 Simproc patterns:\<close>}
 
@@ -1491,12 +1493,12 @@
 text \<open>
   gives
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>print_ss @{context} (Raw_Simplifier.simpset_of ss2)\<close>
 \<open>Simplification rules:
-  ??.unknown: A - B \<inter> C \<equiv> A - B \<union> (A - C)
+  ??.unknown: A1 - B1 \<inter> C1 \<equiv> A1 - B1 \<union> (A1 - C1)
 Congruences rules:
-  Complete_Lattices.Inf_class.INFIMUM: 
+  Complete_Lattices.Inf_class.Inf: 
     \<lbrakk>A1 = B1; \<And>x. x \<in> B1 =simp=> C1 x = D1 x\<rbrakk> \<Longrightarrow> INFIMUM A1 C1 \<equiv> INFIMUM B1 D1
 Simproc patterns:\<close>}
 
@@ -1508,7 +1510,7 @@
   In the context of HOL, the first really useful simpset is @{ML_ind
   HOL_basic_ss in Simpdata}. While printing out the components of this simpset
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>print_ss @{context} HOL_basic_ss\<close>
 \<open>Simplification rules:
 Congruences rules:
@@ -1534,6 +1536,7 @@
 apply(tactic \<open>ALLGOALS (simp_tac (put_simpset HOL_basic_ss @{context}))\<close>)
 done
 
+declare [[ML_print_depth = 200]]
 text \<open>
   This behaviour is not because of simplification rules, but how the subgoaler, solver 
   and looper are set up in @{ML HOL_basic_ss}.
@@ -1542,17 +1545,17 @@
   already many useful simplification and congruence rules for the logical 
   connectives in HOL. 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>print_ss @{context} HOL_ss\<close>
 \<open>Simplification rules:
   Pure.triv_forall_equality: (\<And>x. PROP V) \<equiv> PROP V
   HOL.the_eq_trivial: THE x. x = y \<equiv> y
-  HOL.the_sym_eq_trivial: THE ya. y = ya \<equiv> y
+  HOL.the_sym_eq_trivial: THE y. y = y \<equiv> y
   \<dots>
 Congruences rules:
   HOL.simp_implies: \<dots>
     \<Longrightarrow> (PROP P =simp=> PROP Q) \<equiv> (PROP P' =simp=> PROP Q')
-  op -->: \<lbrakk>P \<equiv> P'; P' \<Longrightarrow> Q \<equiv> Q'\<rbrakk> \<Longrightarrow> P \<longrightarrow> Q \<equiv> P' \<longrightarrow> Q'
+  HOL.implies: \<lbrakk>P \<equiv> P'; P' \<Longrightarrow> Q \<equiv> Q'\<rbrakk> \<Longrightarrow> P \<longrightarrow> Q \<equiv> P' \<longrightarrow> Q'
 Simproc patterns:
   \<dots>\<close>}
 
@@ -2064,16 +2067,16 @@
   is the function @{ML_ind  all_conv in Conv}, which maps a @{ML_type cterm} to an
   instance of the (meta)reflexivity theorem. For example:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Conv.all_conv @{cterm "Foo \<or> Bar"}\<close>
   \<open>Foo \<or> Bar \<equiv> Foo \<or> Bar\<close>}
 
   Another simple conversion is @{ML_ind  no_conv in Conv} which always raises the 
   exception @{ML CTERM}.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Conv.no_conv @{cterm True}\<close> 
-  \<open>*** Exception- CTERM ("no conversion", []) raised\<close>}
+  \<open>exception CTERM \<dots>: no conversion\<close>}
 
   A more interesting conversion is the function @{ML_ind  beta_conversion in Thm}: it 
   produces a meta-equation between a term and its beta-normal form. For example
@@ -2111,7 +2114,7 @@
 
 or in the pretty-printed form
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
    val add = @{cterm "\<lambda>x y. x + (y::nat)"}
    val two = @{cterm "2::nat"}
@@ -2146,7 +2149,7 @@
   It can be used for example to rewrite @{term "True \<and> (Foo \<longrightarrow> Bar)"} 
   to @{term "Foo \<longrightarrow> Bar"}. The code is as follows.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let 
   val ctrm = @{cterm "True \<and> (Foo \<longrightarrow> Bar)"}
 in
@@ -2182,7 +2185,7 @@
   The conversion combinator @{ML_ind else_conv in Conv} tries out the 
   first one, and if it does not apply, tries the second. For example 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val conv = Conv.rewr_conv @{thm true_conj1} else_conv Conv.all_conv
   val ctrm1 = @{cterm "True \<and> Q"}
@@ -2190,7 +2193,7 @@
 in
   (conv ctrm1, conv ctrm2)
 end\<close>
-\<open>(True \<and> Q \<equiv> Q, P \<or> True \<and> Q \<equiv> P \<or> True \<and> Q)\<close>}
+\<open>("True \<and> Q \<equiv> Q", "P \<or> True \<and> Q \<equiv> P \<or> True \<and> Q")\<close>}
 
   Here the conversion @{thm [source] true_conj1} only applies
   in the first case, but fails in the second. The whole conversion
@@ -2199,7 +2202,7 @@
   behaviour can also be achieved with conversion combinator @{ML_ind try_conv in Conv}.
   For example
   
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val conv = Conv.try_conv (Conv.rewr_conv @{thm true_conj1})
   val ctrm = @{cterm "True \<or> P"}
@@ -2220,14 +2223,14 @@
   a conversion to the first, respectively second, argument of an application.  
   For example
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let 
   val conv = Conv.arg_conv (Conv.rewr_conv @{thm true_conj1})
   val ctrm = @{cterm "P \<or> (True \<and> Q)"}
 in
   conv ctrm
 end\<close>
-\<open>P \<or> (True \<and> Q) \<equiv> P \<or> Q\<close>}
+\<open>P \<or> True \<and> Q \<equiv> P \<or> Q\<close>}
 
   The reason for this behaviour is that \<open>(op \<or>)\<close> expects two
   arguments.  Therefore the term must be of the form \<open>(Const \<dots> $ t1) $ t2\<close>. The
@@ -2238,14 +2241,14 @@
   The function @{ML_ind  abs_conv in Conv} applies a conversion under an 
   abstraction. For example:
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let 
   val conv = Conv.rewr_conv @{thm true_conj1} 
   val ctrm = @{cterm "\<lambda>P. True \<and> (P \<and> Foo)"}
 in
   Conv.abs_conv (K conv) @{context} ctrm
 end\<close>
-  \<open>\<lambda>P. True \<and> (P \<and> Foo) \<equiv> \<lambda>P. P \<and> Foo\<close>}
+  \<open>"\<lambda>P. True \<and> P \<and> Foo \<equiv> \<lambda>P. P \<and> Foo"\<close>}
   
   Note that this conversion needs a context as an argument. We also give the
   conversion as \<open>(K conv)\<close>, which is a function that ignores its
@@ -2279,7 +2282,7 @@
   to be able to pattern-match the term. To see this 
   conversion in action, consider the following example:
 
-@{ML_matchresult_fake [display,gray]
+@{ML_response [display,gray]
 \<open>let
   val conv = true_conj1_conv @{context}
   val ctrm = @{cterm "distinct [1::nat, x] \<longrightarrow> True \<and> 1 \<noteq> x"}
@@ -2358,7 +2361,7 @@
   soon as it found one redex. Here is an example for this conversion:
 
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val ctrm = @{cterm "if P (True \<and> 1 \<noteq> (2::nat)) 
                         then True \<and> True else True \<and> False"}
@@ -2382,7 +2385,7 @@
   Using the conversion you can transform this theorem into a 
   new theorem as follows
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
 \<open>let
   val conv = Conv.fconv_rule (true_conj1_conv @{context}) 
   val thm = @{thm foo_test}
@@ -2465,7 +2468,7 @@
   into @{thm [source] id2_def} by abstracting all variables on the 
   left-hand side in the right-hand side.
 
-  @{ML_matchresult_fake [display,gray]
+  @{ML_response [display,gray]
   \<open>Drule.abs_def @{thm id1_def}\<close>
   \<open>id1 \<equiv> \<lambda>x. x\<close>}
 
@@ -2513,7 +2516,7 @@
   export the context \<open>ctxt'\<close> back to \<open>ctxt\<close> in order to 
   produce meta-variables for the theorem.  An example is as follows.
 
-  @{ML_matchresult_fake [display, gray]
+  @{ML_response [display, gray]
   \<open>unabs_def @{context} @{thm id2_def}\<close>
   \<open>id2 ?x1 \<equiv> ?x1\<close>}
 \<close>
--- a/ProgTutorial/antiquote_setup.ML	Fri May 17 11:21:09 2019 +0200
+++ b/ProgTutorial/antiquote_setup.ML	Tue May 21 14:37:39 2019 +0200
@@ -12,6 +12,27 @@
 fun prefix_lines prfx txt = 
   map (fn s => prfx ^ s) (split_lines txt)
 
+fun is_sep "\<dots>" = true
+  | is_sep s = Symbol.is_ascii_blank s; 
+
+fun scan_word sep =
+  Scan.many1 sep >> K NONE ||
+  Scan.many1 (fn s => not (sep s) andalso Symbol.not_eof s) >> (SOME o implode);
+
+fun split_words sep = Symbol.scanner "Bad text" (Scan.repeat (scan_word sep) >> map_filter I);
+
+fun explode_words sep = split_words sep o Symbol.explode;
+
+fun match_string sep pat str =
+  let
+    fun match [] _ = true
+      | match (p :: ps) s =
+          size p <= size s andalso
+            (case try (unprefix p) s of
+              SOME s' => match ps s'
+            | NONE => match (p :: ps) (String.substring (s, 1, size s - 1)));
+  in match (explode_words sep pat) str end;
+
 fun ml_with_vars' ys txt = 
     implode ["fn ", (case ys of [] => "_" | _ => enclose "(" ")" (commas ys)), " => (", txt, ")"]
 
@@ -70,14 +91,20 @@
   (Scan.optional (Args.$$$ "for" |-- Parse.!!! (Scan.repeat1 Args.name)) [] --
    Scan.option (Args.$$$ "in"  |-- Parse.!!! Args.name))) 
 
-fun output_ml_response ctxt src =
+fun output_ml_response ignore_pat ctxt (src, opat) =
 let 
   val res = eval_response ctxt src
+  val _ = writeln res
+  val cnt = YXML.content_of res
+  val pat = case opat of NONE => cnt 
+            | SOME p => p |>  Input.source_content 
+  val _ = if ignore_pat orelse Print_Mode.print_mode_active Latex.latexN orelse match_string is_sep pat cnt then () 
+          else error (cat_lines ["Substring:", pat, "not contained in:", cnt])
+  val out = if ignore_pat then cnt else pat
 in 
-  OutputTutorial.output ctxt ([Input.source_content src] @ [Library.prefix_lines "> " res])
+  OutputTutorial.output ctxt ([Input.source_content src] @ [Library.prefix_lines "> " out])
 end
 
-
 (* checks and prints a single ML-item and produces an index entry *)
 fun output_ml_ind ctxt (src, stru) =
 let 
@@ -149,11 +176,13 @@
 
 val single_arg = Scan.lift (Args.text_input)
 val two_args   = Scan.lift (Args.text_input -- Args.text_input)
+val maybe_two_args = Scan.lift (Args.text_input -- Scan.option Args.text_input)
 val test = Scan.lift (Args.name -- Args.name -- Scan.option (Args.$$$ "with"  |-- Args.name))
 
 val ml_setup = 
   Thy_Output.antiquotation_raw @{binding "ML"} parser_ml output_ml
-  #> Thy_Output.antiquotation_raw @{binding "ML_response"} single_arg output_ml_response
+  #> Thy_Output.antiquotation_raw @{binding "ML_response"} maybe_two_args (output_ml_response false)
+  #> Thy_Output.antiquotation_raw @{binding "ML_response_ignore"} maybe_two_args (output_ml_response true)
   #> Thy_Output.antiquotation_raw @{binding "ML_ind"} parser_ml_ind output_ml_ind
   #> Thy_Output.antiquotation_raw @{binding "ML_type_ind"} single_arg output_type_ind
   #> Thy_Output.antiquotation_raw @{binding "ML_structure_ind"} single_arg output_struct_ind