# HG changeset patch # User Norbert Schirmer # Date 1558442259 -7200 # Node ID 438703674711f11fbf545e8f513970f25cccc70b # Parent 95b42288294e201e6fc60f0700b10e9a6eac3c09 prefer more result checking in ML antiquotations diff -r 95b42288294e -r 438703674711 ProgTutorial/Advanced.thy --- a/ProgTutorial/Advanced.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Advanced.thy Tue May 21 14:37:39 2019 +0200 @@ -250,19 +250,19 @@ of fixed variables is actually quite useful. For example it prevents us from fixing a variable twice - @{ML_matchresult_fake [gray, display] + @{ML_response [gray, display] \@{context} |> Variable.add_fixes ["x", "x"]\ - \ERROR: Duplicate fixed variable(s): "x"\} + \Duplicate fixed variable(s): "x"\} More importantly it also allows us to easily create fresh names for fixed variables. For this you have to use the function @{ML_ind variant_fixes in Variable} from the structure @{ML_structure Variable}. - @{ML_matchresult_fake [gray, display] + @{ML_response [gray, display] \@{context} |> Variable.variant_fixes ["y", "y", "z"]\ - \(["y", "ya", "z"], ...)\} + \(["y", "ya", "z"],\\} Now a fresh variant for the second occurence of \y\ is created avoiding any clash. In this way we can also create fresh free variables @@ -271,7 +271,7 @@ create two fresh variables of type @{typ nat} as variants of the string @{text [quotes] "x"} (Lines 6 and 7). - @{ML_matchresult_fake [display, gray, linenos] + @{ML_matchresult [display, gray, linenos] \let val ctxt0 = @{context} val (_, ctxt1) = Variable.add_fixes ["x"] ctxt0 @@ -280,8 +280,8 @@ (Variable.variant_frees ctxt0 [] frees, Variable.variant_frees ctxt1 [] frees) end\ - \([("x", "nat"), ("xa", "nat")], - [("xa", "nat"), ("xb", "nat")])\} + \([("x", _), ("xa", _)], + [("xa", _), ("xb", _)])\} As you can see, if we create the fresh variables with the context \ctxt0\, then we obtain \x\ and \xa\; but in \ctxt1\ we obtain \xa\ @@ -291,14 +291,14 @@ avoiding any variable occurring in those terms. For this you can use the function @{ML_ind declare_term in Variable} from the structure @{ML_structure Variable}. - @{ML_matchresult_fake [gray, display] + @{ML_matchresult [gray, display] \let val ctxt1 = Variable.declare_term @{term "(x, xa)"} @{context} val frees = replicate 2 ("x", @{typ nat}) in Variable.variant_frees ctxt1 [] frees end\ - \[("xb", "nat"), ("xc", "nat")]\} + \[("xb", _), ("xc", _)]\} The result is \xb\ and \xc\ for the names of the fresh variables, since \x\ and \xa\ occur in the term we declared. @@ -327,7 +327,7 @@ the function @{ML_ind read_term in Syntax} from the structure @{ML_structure Syntax}. Consider the following code: - @{ML_matchresult_fake [gray, display] + @{ML_response [gray, display] \let val ctxt0 = @{context} val ctxt1 = Variable.declare_term @{term "x::nat"} ctxt0 @@ -343,7 +343,7 @@ type the parsed term receives depends upon the last declaration that is made, as the next example illustrates. - @{ML_matchresult_fake [gray, display] + @{ML_response [gray, display] \let val ctxt1 = Variable.declare_term @{term "x::nat"} @{context} val ctxt2 = Variable.declare_term @{term "x::int"} ctxt1 @@ -398,7 +398,7 @@ term, in our case @{term "P x y z x y z"}, into a theorem (disregarding whether it is actually provable). - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val thy = @{theory} val ctxt0 = @{context} @@ -416,7 +416,7 @@ function @{ML_ind export in Assumption} from the structure @{ML_structure Assumption}. Consider the following code. - @{ML_matchresult_fake [display, gray, linenos] + @{ML_response [display, gray, linenos] \let val ctxt0 = @{context} val ([eq], ctxt1) = Assumption.add_assumes [@{cprop "x \ y"}] ctxt0 @@ -448,7 +448,7 @@ @{ML_structure Variable} and @{ML_structure Assumption}. This can be seen in the following example. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val ctxt0 = @{context} val ((fvs, [eq]), ctxt1) = ctxt0 @@ -458,7 +458,7 @@ in Proof_Context.export ctxt1 ctxt0 [eq'] end\ - \[?x \ ?y \ ?y \ ?x]\} + \["?x \ ?y \ ?y \ ?x"]\} \ diff -r 95b42288294e -r 438703674711 ProgTutorial/Base.thy --- a/ProgTutorial/Base.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Base.thy Tue May 21 14:37:39 2019 +0200 @@ -3,12 +3,6 @@ "~~/src/HOL/Library/LaTeXsugar" begin -print_ML_antiquotations - -text \ -Why is Base not printed? -@{cite "isa-imp"} -\ notation (latex output) Cons ("_ # _" [66,65] 65) @@ -27,10 +21,6 @@ ML_file "output_tutorial.ML" ML_file "antiquote_setup.ML" - -(*setup {* OutputTutorial.setup *}*) (* Seems to be standard now, we don't need this anymoe *) setup \AntiquoteSetup.setup\ -print_ML_antiquotations - end \ No newline at end of file diff -r 95b42288294e -r 438703674711 ProgTutorial/Essential.thy --- a/ProgTutorial/Essential.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Essential.thy Tue May 21 14:37:39 2019 +0200 @@ -45,6 +45,15 @@ | Abs of string * typ * term | $ of term * term\ +ML \ +let + val redex = @{term "(\(x::int) (y::int). x)"} + val arg1 = @{term "1::int"} + val arg2 = @{term "2::int"} +in + pretty_term @{context} (redex $ arg1 $ arg2) +end\ + text \ This datatype implements Church-style lambda-terms, where types are explicitly recorded in variables, constants and abstractions. The @@ -53,7 +62,7 @@ terms use the usual de Bruijn index mechanism for representing bound variables. For example in - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \@{term "\x y. x y"}\ \Abs ("x", "'a \ 'b", Abs ("y", "'a", Bound 1 $ Bound 0))\} @@ -69,7 +78,7 @@ Be careful if you pretty-print terms. Consider pretty-printing the abstraction term shown above: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \@{term "\x y. x y"} |> pretty_term @{context} |> pwriteln\ @@ -79,7 +88,7 @@ tacitly eta-contracted the output. You obtain a similar result with beta-redexes - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val redex = @{term "(\(x::int) (y::int). x)"} val arg1 = @{term "1::int"} @@ -97,7 +106,7 @@ value @{ML_ind show_abbrevs in Syntax}. For example - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val redex = @{term "(\(x::int) (y::int). x)"} val arg1 = @{term "1::int"} @@ -114,7 +123,7 @@ \emph{schematic} variables (term-constructor @{ML Var} and written with a leading question mark). Consider the following two examples - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val v1 = Var (("x", 3), @{typ bool}) val v2 = Var (("x1", 3), @{typ bool}) @@ -144,20 +153,20 @@ Constructing terms via antiquotations has the advantage that only typable terms can be constructed. For example - @{ML_matchresult_fake_both [display,gray] + @{ML_response [display,gray] \@{term "x x"}\ \Type unification failed: Occurs check!\} raises a typing error, while it is perfectly ok to construct the term with the raw ML-constructors: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val omega = Free ("x", @{typ "nat \ nat"}) $ Free ("x", @{typ nat}) in pwriteln (pretty_term @{context} omega) end\ - \x x\} + "x x"} Sometimes the internal representation of terms can be surprisingly different from what you see at the user-level, because the layers of @@ -204,7 +213,7 @@ As already seen above, types can be constructed using the antiquotation \@{typ _}\. For example: - @{ML_matchresult_fake [display,gray] \@{typ "bool \ nat"}\ \bool \ nat\} + @{ML_response [display,gray] \@{typ "bool \ nat"}\ \bool \ nat\} The corresponding datatype is \ @@ -294,11 +303,11 @@ After that the types for booleans, lists and so on are printed out again the standard Isabelle way. - @{ML_matchresult_fake [display, gray] - \@{typ "bool"}; -@{typ "'a list"}\ - \"bool" -"'a List.list"\} + @{ML_response [display, gray] + \(@{typ "bool"}, +@{typ "'a list"})\ + \("bool", +"'a list")\} \begin{readmore} Types are described in detail in \isccite{sec:types}. Their @@ -358,7 +367,7 @@ term list applied to the term. For example -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val trm = @{term "P::bool \ bool \ bool"} val args = [@{term "True"}, @{term "False"}] @@ -366,19 +375,19 @@ list_comb (trm, args) end\ \Free ("P", "bool \ bool \ bool") - $ Const ("True", "bool") $ Const ("False", "bool")\} + $ Const ("HOL.True", "bool") $ Const ("HOL.False", "bool")\} Another handy function is @{ML_ind lambda in Term}, which abstracts a variable in a term. For example - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val x_nat = @{term "x::nat"} val trm = @{term "(P::nat \ bool) x"} in lambda x_nat trm end\ - \Abs ("x", "Nat.nat", Free ("P", "bool \ bool") $ Bound 0)\} + \Abs ("x", "nat", Free ("P", "nat \ bool") $ Bound 0)\} In this example, @{ML lambda} produces a de Bruijn index (i.e.\ @{ML \Bound 0\}), and an abstraction, where it also records the type of the abstracted @@ -387,7 +396,7 @@ is of the same type as the abstracted variable. If it is of different type, as in - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val x_int = @{term "x::int"} val trm = @{term "(P::nat \ bool) x"} @@ -406,7 +415,7 @@ "(f::nat \ nat \ nat) 0 x"} the subterm @{term "(f::nat \ nat \ nat) 0"} by @{term y}, and @{term x} by @{term True}. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val sub1 = (@{term "(f::nat \ nat \ nat) 0"}, @{term "y::nat \ nat"}) val sub2 = (@{term "x::nat"}, @{term "True"}) @@ -414,12 +423,12 @@ in subst_free [sub1, sub2] trm end\ - \Free ("y", "nat \ nat") $ Const ("True", "bool")\} + \Free ("y", "nat \ nat") $ Const ("HOL.True", "bool")\} As can be seen, @{ML subst_free} does not take typability into account. However it takes alpha-equivalence into account: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val sub = (@{term "(\y::nat. y)"}, @{term "x::nat"}) val trm = @{term "(\x::nat. x)"} @@ -446,10 +455,10 @@ The function returns a pair consisting of the stripped off variables and the body of the universal quantification. For example - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \strip_alls @{term "\x y. x = (y::bool)"}\ \([Free ("x", "bool"), Free ("y", "bool")], - Const ("op =", _) $ Bound 1 $ Bound 0)\} + Const ("HOL.eq",\) $ Bound 1 $ Bound 0)\} Note that we produced in the body two dangling de Bruijn indices. Later on we will also use the inverse function that @@ -466,7 +475,7 @@ bound variables. With the function @{ML subst_bounds}, you can replace these loose @{ML_ind Bound in Term}s with the stripped off variables. - @{ML_matchresult_fake [display, gray, linenos] + @{ML_response [display, gray, linenos] \let val (vrs, trm) = strip_alls @{term "\x y. x = (y::bool)"} in @@ -488,20 +497,20 @@ the loose de Bruijn index is replaced by a unique free variable. For example - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val body = Bound 0 $ Free ("x", @{typ nat}) in Term.dest_abs ("x", @{typ "nat \ bool"}, body) end\ - \("xa", Free ("xa", "Nat.nat \ bool") $ Free ("x", "Nat.nat"))\} + \("xa", Free ("xa", "nat \ bool") $ Free ("x", "nat"))\} Sometimes it is necessary to manipulate de Bruijn indices in terms directly. There are many functions to do this. We describe only two. The first, @{ML_ind incr_boundvars in Term}, increases by an integer the indices of the loose bound variables in a term. In the code below -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \@{term "\x y z u. z = u"} |> strip_alls ||> incr_boundvars 2 @@ -519,7 +528,7 @@ The second function, @{ML_ind loose_bvar1 in Text}, tests whether a term contains a loose bound of a certain index. For example - @{ML_matchresult_fake [gray,display] + @{ML_matchresult [gray,display] \let val body = snd (strip_alls @{term "\x y. x = (y::bool)"}) in @@ -534,7 +543,7 @@ HOLogic} constructs an equality out of two terms. The types needed in this equality are calculated from the type of the arguments. For example -@{ML_matchresult_fake [gray,display] +@{ML_response [gray,display] \let val eq = HOLogic.mk_eq (@{term "True"}, @{term "False"}) in @@ -674,10 +683,10 @@ text \ Here is an example: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \map_types nat_to_int @{term "a = (1::nat)"}\ -\Const ("op =", "int \ int \ bool") - $ Free ("a", "int") $ Const ("HOL.one_class.one", "int")\} +\Const ("HOL.eq", "int \ int \ bool") + $ Free ("a", "int") $ Const ("Groups.one_class.one", "int")\} If you want to obtain the list of free type-variables of a term, you can use the function @{ML_ind add_tfrees in Term} @@ -798,9 +807,9 @@ @{ML tyenv_unif} with the built-in function @{ML_ind dest in Vartab} from the structure @{ML_structure Vartab}. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Vartab.dest tyenv_unif\ - \[(("'a", 0), (["HOL.type"], "?'b List.list")), + \[(("'a", 0), (["HOL.type"], "?'b list")), (("'b", 0), (["HOL.type"], "nat"))]\} \ @@ -839,7 +848,7 @@ environment in the example this function prints out the more legible: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pretty_tyenv @{context} tyenv_unif\ \[?'a := ?'b list, ?'b := nat]\} @@ -877,15 +886,21 @@ Sign.typ_unify @{theory} (ty1, ty2) (Vartab.empty, 0) end\ +declare[[show_sorts]] + text \ To print out the result type environment we switch on the printing of sort information by setting @{ML_ind show_sorts in Syntax} to true. This allows us to inspect the typing environment. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \pretty_tyenv @{context} tyenv\ \[?'a::s1 := ?'a1::{s1, s2}, ?'b::s2 := ?'a1::{s1, s2}]\} - +\ + +declare[[show_sorts=false]] + +text\ As can be seen, the type variables \?'a\ and \?'b\ are instantiated with a new type variable \?'a1\ with sort \{s1, s2}\. Since a new type variable has been introduced the @{ML index}, originally being \0\, @@ -899,7 +914,7 @@ \?'b list * nat\ from the beginning of this section, and the calculated type environment @{ML tyenv_unif}: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pretty_tyenv @{context} tyenv_unif\ \[?'a := ?'b list, ?'b := nat]\} @@ -911,9 +926,9 @@ performance reasons. To apply such a type environment to a type, say \?'a * ?'b\, you should use the function @{ML_ind norm_type in Envir}: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Envir.norm_type tyenv_unif @{typ_pat "?'a * ?'b"}\ - \nat list * nat\} + \nat list \ nat\} Matching of types can be done with the function @{ML_ind typ_match in Sign} also from the structure @{ML_structure Sign}. This function returns a @{ML_type @@ -931,7 +946,7 @@ text \ Printing out the calculated matcher gives - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \pretty_tyenv @{context} tyenv_match\ \[?'a := bool list, ?'b := nat]\} @@ -939,9 +954,9 @@ applying the matcher to a type needs to be done with the function @{ML_ind subst_type in Envir}. For example - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Envir.subst_type tyenv_match @{typ_pat "?'a * ?'b"}\ - \bool list * nat\} + \bool list \ nat\} Be careful to observe the difference: always use @{ML subst_type in Envir} for matchers and @{ML norm_type in Envir} @@ -960,16 +975,16 @@ Now @{ML tyenv_unif} is equal to @{ML tyenv_match'}. If we apply @{ML norm_type in Envir} to the type \?'a * ?'b\ we obtain - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Envir.norm_type tyenv_match' @{typ_pat "?'a * ?'b"}\ - \nat list * nat\} + \nat list \ nat\} which does not solve the matching problem, and if we apply @{ML subst_type in Envir} to the same type we obtain - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Envir.subst_type tyenv_unif @{typ_pat "?'a * ?'b"}\ - \?'b list * nat\} + \?'b list \ nat\} which does not solve the unification problem. @@ -1023,7 +1038,7 @@ the type environment is empty and can be ignored. The resulting term environment is - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pretty_env @{context} fo_env\ \[?X := P, ?Y := \a b. Q b a]\} @@ -1033,7 +1048,7 @@ unifiers). The function @{ML subst_term in Envir} expects a type environment, which is set to empty in the example below, and a term environment. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val trm = @{term_pat "(?X::(nat\nat\nat)\bool) ?Y"} in @@ -1050,7 +1065,7 @@ matching \\x. P x\ against the pattern \\y. ?X y\. In this case, first-order matching produces \[?X := P]\. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val fo_pat = @{term_pat "\y. (?X::nat\bool) y"} val trm = @{term "\x. (P::nat\bool) x"} @@ -1101,7 +1116,7 @@ and @{ML_ind unify in Pattern}, both implemented in the structure @{ML_structure Pattern}. An example for higher-order pattern unification is - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val trm1 = @{term_pat "\x y. g (?X y x) (f (?Y x))"} val trm2 = @{term_pat "\u v. g u (f u)"} @@ -1152,13 +1167,15 @@ We can print them out as follows. - @{ML_matchresult_fake [display, gray] - \pretty_env @{context} (Envir.term_env un1); -pretty_env @{context} (Envir.term_env un2); -pretty_env @{context} (Envir.term_env un3)\ - \[?X := \a. a, ?Y := f a] -[?X := f, ?Y := a] -[?X := \b. f a]\} + @{ML_response [display, gray] + \pretty_env @{context} (Envir.term_env un1)\ + \[?X := \a. a, ?Y := f a]\} + @{ML_response [display, gray] + \pretty_env @{context} (Envir.term_env un2)\ + \[?X := f, ?Y := a]\} + @{ML_response [display, gray] + \pretty_env @{context} (Envir.term_env un3)\ + \[?X := \b. f a]\} In case of failure the function @{ML_ind unifiers in Unify} does not raise @@ -1181,9 +1198,9 @@ called. This function has a built-in bound, which can be accessed and manipulated as a configuration value. For example - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \Config.get_global @{theory} (Unify.search_bound)\ - \Int 60\} + \60\} If this bound is reached during unification, Isabelle prints out the warning message @{text [quotes] "Unification bound exceeded"} and @@ -1266,7 +1283,7 @@ @{term "Q True"}. - @{ML_matchresult_fake [gray,display,linenos] + @{ML_response [gray,display,linenos] \let val pat = Logic.strip_imp_concl (Thm.prop_of @{thm spec}) val trm = @{term "Trueprop (Q True)"} @@ -1361,9 +1378,9 @@ detect any typing inconsistency. For example changing the type of the variable @{term "x"} from @{typ "nat"} to @{typ "int"} will result in the error message: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \type_of (@{term "f::nat \ bool"} $ @{term "x::int"})\ - \*** Exception- TYPE ("type_of: type mismatch in application" \\} + \exception TYPE raised \\} Since the complete traversal might sometimes be too costly and not necessary, there is the function @{ML_ind fastype_of in Term}, which @@ -1387,7 +1404,7 @@ complete type. The type inference can be invoked with the function @{ML_ind check_term in Syntax}. An example is as follows: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val c = Const (@{const_name "plus"}, dummyT) val o = @{term "1::nat"} @@ -1395,8 +1412,8 @@ in Syntax.check_term @{context} (c $ o $ v) end\ -\Const ("HOL.plus_class.plus", "nat \ nat \ nat") $ - Const ("HOL.one_class.one", "nat") $ Free ("x", "nat")\} +\Const ("Groups.plus_class.plus", "nat \ nat \ nat") $ + Const ("Groups.one_class.one", "nat") $ Free ("x", "nat")\} Instead of giving explicitly the type for the constant \plus\ and the free variable \x\, type-inference fills in the missing information. @@ -1432,26 +1449,26 @@ Certification is always relative to a context. For example you can write: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Thm.cterm_of @{context} @{term "(a::nat) + b = c"}\ \a + b = c\} This can also be written with an antiquotation: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \@{cterm "(a::nat) + b = c"}\ \a + b = c\} Attempting to obtain the certified term for - @{ML_matchresult_fake_both [display,gray] + @{ML_response [display,gray] \@{cterm "1 + True"}\ - \Type unification failed \\} + \Type unification failed\\} yields an error (since the term is not typable). A slightly more elaborate example that type-checks is: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val natT = @{typ "nat"} val zero = @{term "0::nat"} @@ -1465,13 +1482,13 @@ you obtain the certified type for the Isabelle type @{typ "nat \ bool"} on the ML-level as follows: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Thm.ctyp_of @{context} (@{typ nat} --> @{typ bool})\ \nat \ bool\} or with the antiquotation: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \@{ctyp "nat \ bool"}\ \nat \ bool\} @@ -1479,14 +1496,14 @@ pattern-match against them. However, we can construct them. For example the function @{ML_ind apply in Thm} produces a certified application. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Thm.apply @{cterm "P::nat \ bool"} @{cterm "3::nat"}\ \P 3\} Similarly the function @{ML_ind list_comb in Drule} from the structure @{ML_structure Drule} applies a list of @{ML_type cterm}s. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val chead = @{cterm "P::unit \ nat \ bool"} val cargs = [@{cterm "()"}, @{cterm "3::nat"}] @@ -1546,7 +1563,7 @@ If we print out the value of @{ML my_thm} then we see only the final statement of the theorem. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pwriteln (pretty_thm @{context} my_thm)\ \\\x. P x \ Q x; P t\ \ Q t\} @@ -1725,14 +1742,14 @@ the functions @{ML_ind lhs_of in Thm} and @{ML_ind rhs_of in Thm} return the left and right-hand side, respectively, of a meta-equality. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val eq = @{thm True_def} in (Thm.lhs_of eq, Thm.rhs_of eq) - |> apply2 (Pretty.string_of o (pretty_cterm @{context})) + |> apply2 (YXML.content_of o Pretty.string_of o (pretty_cterm @{context})) end\ - \(True, (\x. x) = (\x. x))\} + \("True", "(\x. x) = (\x. x)")\} Other function produce terms that can be pattern-matched. Suppose the following two theorems. @@ -1778,7 +1795,7 @@ this, we build the theorem @{term "True \ True"} (Line 1) and then unfold the constant @{term "True"} according to its definition (Line 2). - @{ML_matchresult_fake [display,gray,linenos] + @{ML_response [display,gray,linenos] \Thm.reflexive @{cterm "True"} |> Simplifier.rewrite_rule @{context} [@{thm True_def}] |> pretty_thm @{context} @@ -1795,14 +1812,14 @@ @{ML_ind rulify in Object_Logic} replaces all object connectives by equivalents in the meta logic. For example - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Object_Logic.rulify @{context} @{thm foo_test2}\ \\?A; ?B\ \ ?C\} The transformation in the other direction can be achieved with function @{ML_ind atomize in Object_Logic} and the following code. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val thm = @{thm foo_test1} val meta_eq = Object_Logic.atomize @{context} (Thm.cprop_of thm) @@ -1837,16 +1854,16 @@ text \ This function produces for the theorem @{thm [source] list.induct} - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \atomize_thm @{context} @{thm list.induct}\ - \\P list. P [] \ (\a list. P list \ P (a # list)) \ P list\} + \"\P list. P [] \ (\x1 x2. P x2 \ P (x1 # x2)) \ P list"\} Theorems can also be produced from terms by giving an explicit proof. One way to achieve this is by using the function @{ML_ind prove in Goal} in the structure @{ML_structure Goal}. For example below we use this function to prove the term @{term "P \ P"}. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val trm = @{term "P \ P::bool"} val tac = K (assume_tac @{context} 1) @@ -1868,17 +1885,17 @@ \ ML %grayML\fun rep_goals i = replicate i @{prop "f x = f x"} -fun rep_tacs i = replicate i (resolve_tac @{context} [@{thm refl}]) +fun rep_tacs ctxt i = replicate i (resolve_tac ctxt [@{thm refl}]) fun multi_test ctxt i = Goal.prove_common ctxt NONE ["f", "x"] [] (rep_goals i) - (K ((Goal.conjunction_tac THEN' RANGE (rep_tacs i)) 1))\ + (K ((Goal.conjunction_tac THEN' RANGE (rep_tacs ctxt i)) 1))\ text \ With them we can now produce three theorem instances of the proposition. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \multi_test @{context} 3\ \["?f ?x = ?f ?x", "?f ?x = ?f ?x", "?f ?x = ?f ?x"]\} @@ -1903,7 +1920,7 @@ purposes, or when explicit proof construction should be omitted due to performace reasons. An example of this function is as follows: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \Skip_Proof.make_thm @{theory} @{prop "True = False"}\ \True = False\} @@ -1919,16 +1936,16 @@ Thm}. Assume you prove the following lemma. \ -lemma foo_data: +theorem foo_data: shows "P \ P \ P" by assumption text \ The auxiliary data of this lemma can be retrieved using the function @{ML_ind get_tags in Thm}. So far the the auxiliary data of this lemma is - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \Thm.get_tags @{thm foo_data}\ - \[("name", "General.foo_data"), ("kind", "lemma")]\} + \[("name", "Essential.foo_data"), ("kind", "theorem")]\} consisting of a name and a kind. When we store lemmas in the theorem database, we might want to explicitly extend this data by attaching case names to the @@ -1944,10 +1961,10 @@ text \ The data of the theorem @{thm [source] foo_data'} is then as follows: - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \Thm.get_tags @{thm foo_data'}\ - \[("name", "General.foo_data'"), ("kind", "lemma"), - ("case_names", "foo_case_one;foo_case_two")]\} + \[("name", "Essential.foo_data'"), + ("case_names", "foo_case_one;foo_case_two"), ("kind", "theorem")]\} You can observe the case names of this lemma on the user level when using the proof methods \cases\ and \induct\. In the proof below @@ -2001,12 +2018,22 @@ ML %grayML\fun pthms_of (PBody {thms, ...}) = map #2 thms val get_names = (map Proofterm.thm_node_name) o pthms_of -val get_pbodies = map (Future.join o Proofterm.thm_node_body) o pthms_of\ +val get_pbodies = map (Future.join o Proofterm.thm_node_body) o pthms_of +fun get_all_names thm = +let + (*proof body with digest*) + val body = Proofterm.strip_thm (Thm.proof_body_of thm); + (*all theorems used in the graph of nested proofs*) +in Proofterm.fold_body_thms + (fn {name, ...} => insert (op =) name) [body] [] +end\ text \ To see what their purpose is, consider the following three short proofs. \ - +ML \ + +\ lemma my_conjIa: shows "A \ B \ A \ B" apply(rule conjI) @@ -2026,7 +2053,9 @@ apply(auto) done - +ML "Proofterm.proofs" +ML \@{thm my_conjIa} + |> get_all_names\ text \ While the information about which theorems are used is obvious in the first two proofs, it is not obvious in the third, because of the @@ -2036,7 +2065,7 @@ extracting this information. Let us first extract the name of the established theorem. This can be done with - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \@{thm my_conjIa} |> Thm.proof_body_of |> get_names\ @@ -2048,61 +2077,44 @@ theorem. Notice that the first proof above references three theorems, namely @{thm [source] conjI}, @{thm [source] conjunct1} and @{thm [source] conjunct2}. We can obtain them by descending into the - first level of the proof-tree, as follows. - - @{ML_matchresult_fake [display,gray] + proof-tree. The function @{ML get_all_names} recursively selects all names. + + @{ML_response [display,gray] \@{thm my_conjIa} - |> Thm.proof_body_of - |> get_pbodies - |> map get_names - |> List.concat\ - \["HOL.conjunct2", "HOL.conjunct1", "HOL.conjI", "Pure.protectD", - "Pure.protectI"]\} + |> get_all_names |> sort string_ord\ + \["", "HOL.All_def", "HOL.FalseE", "HOL.False_def", "HOL.TrueI", "HOL.True_def", + "HOL.True_or_False", "HOL.allI", "HOL.and_def", "HOL.conjI", + "HOL.conjunct1", "HOL.conjunct2", "HOL.disjE", "HOL.eqTrueE", "HOL.eqTrueI", + "HOL.ext", "HOL.fun_cong", "HOL.iffD1", "HOL.iffD2", "HOL.iffI", + "HOL.impI", "HOL.mp", "HOL.or_def", "HOL.refl", "HOL.rev_iffD1", + "HOL.rev_iffD2", "HOL.spec", "HOL.ssubst", "HOL.subst", "HOL.sym", + "Pure.protectD", "Pure.protectI"]\} The theorems @{thm [source] Pure.protectD} and @{thm [source] Pure.protectI} that are internal theorems are always part of a - proof in Isabelle. Note also that applications of \assumption\ do not + proof in Isabelle. The other theorems are the theorems used in the proofs of the theorems + @{thm [source] conjunct1}, @{thm [source] conjunct2} and @{thm [source] conjI}. + + Note also that applications of \assumption\ do not count as a separate theorem, as you can see in the following code. - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \@{thm my_conjIb} - |> Thm.proof_body_of - |> get_pbodies - |> map get_names - |> List.concat\ - \["Pure.protectD", "Pure.protectI"]\} + |> get_all_names |> sort string_ord\ + \["", "Pure.protectD", "Pure.protectI"]\} + Interestingly, but not surprisingly, the proof of @{thm [source] my_conjIc} procceeds quite differently from @{thm [source] my_conjIa} and @{thm [source] my_conjIb}, as can be seen by the theorems that are returned for @{thm [source] my_conjIc}. - - @{ML_matchresult_fake [display,gray] + + @{ML_response [display,gray] \@{thm my_conjIc} - |> Thm.proof_body_of - |> get_pbodies - |> map get_names - |> List.concat\ - \["HOL.Eq_TrueI", "HOL.simp_thms_25", "HOL.eq_reflection", - "HOL.conjunct2", "HOL.conjunct1", "HOL.TrueI", "Pure.protectD", - "Pure.protectI"]\} - - Of course we can also descend into the second level of the tree - ``behind'' @{thm [source] my_conjIa} say, which - means we obtain the theorems that are used in order to prove - @{thm [source] conjunct1}, @{thm [source] conjunct2} and @{thm [source] conjI}. - - @{ML_matchresult_fake [display, gray] - \@{thm my_conjIa} - |> Thm.proof_body_of - |> get_pbodies - |> map get_pbodies - |> (map o map) get_names - |> List.concat - |> List.concat - |> duplicates (op=)\ - \["HOL.spec", "HOL.and_def", "HOL.mp", "HOL.impI", "Pure.protectD", - "Pure.protectI"]\} + |> get_all_names\ + \["HOL.simp_thms_25", "Pure.conjunctionD1", "Pure.conjunctionD2", "Pure.conjunctionI", + "HOL.rev_mp", "HOL.exI", "HOL.allE", "HOL.exE",\]\} + \begin{exercise} Have a look at the theorems that are used when a lemma is ``proved'' @@ -2408,7 +2420,7 @@ text \ then you can see it is added to the initially empty list. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \MyThms.get @{context}\ \["True"]\} @@ -2422,7 +2434,7 @@ not need to be explicitly given. These three declarations will cause the theorem list to be updated as: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \MyThms.get @{context}\ \["True", "Suc (Suc 0) = 2"]\} @@ -2435,7 +2447,7 @@ text \After this, the theorem list is again: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \MyThms.get @{context}\ \["True"]\} @@ -2482,7 +2494,7 @@ \ ML %grayML\fun pprint prt = tracing (Pretty.string_of prt)\ - +ML %invisible\val pprint = YXML.content_of o Pretty.string_of\ text \ The point of the pretty-printing infrastructure is to give hints about how to layout text and let Isabelle do the actual layout. Let us first explain @@ -2512,7 +2524,7 @@ We obtain the same if we just use the function @{ML pprint}. -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \pprint (Pretty.str test_str)\ \fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar foooooooooo ooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaa @@ -2526,7 +2538,7 @@ list. To print this list of pretty types as a single string, we concatenate them with the function @{ML_ind blk in Pretty} as follows: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs = map Pretty.str (space_explode " " test_str) in @@ -2542,7 +2554,7 @@ indentation of the printed string. You can increase the indentation and obtain -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs = map Pretty.str (space_explode " " test_str) in @@ -2557,7 +2569,7 @@ that every line starts with the same indent, you can use the function @{ML_ind indent in Pretty} as follows: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs = map Pretty.str (space_explode " " test_str) in @@ -2572,7 +2584,7 @@ have the linebreaks handled properly, you can use the function @{ML_ind commas in Pretty}. For example -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020) in @@ -2590,7 +2602,7 @@ text \ -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020) in @@ -2629,39 +2641,48 @@ can occur. We do the same after the @{text [quotes] "and"}. This gives you for example -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ptrs1 = map (Pretty.str o string_of_int) (1 upto 22) +in + pprint (Pretty.blk (0, and_list ptrs1)) +end\ +\1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 +and 22\} +@{ML_response [display,gray] +\let val ptrs2 = map (Pretty.str o string_of_int) (10 upto 28) in - pprint (Pretty.blk (0, and_list ptrs1)); pprint (Pretty.blk (0, and_list ptrs2)) end\ -\1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 -and 22 -10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 and +\10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 and 28\} Like @{ML blk in Pretty}, the function @{ML_ind chunks in Pretty} prints out a list of items, but automatically inserts forced breaks between each item. Compare - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val a_and_b = [Pretty.str "a", Pretty.str "b"] in - pprint (Pretty.blk (0, a_and_b)); + pprint (Pretty.blk (0, a_and_b)) +end\ +\ab\} + @{ML_response [display,gray] + \let + val a_and_b = [Pretty.str "a", Pretty.str "b"] +in pprint (Pretty.chunks a_and_b) end\ -\ab -a +\a b\} The function @{ML_ind big_list in Pretty} helps with printing long lists. It is used for example in the command \isacommand{print\_theorems}. An example is as follows. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val pstrs = map (Pretty.str o string_of_int) (4 upto 10) in @@ -2681,7 +2702,7 @@ out the the terms produced by the the function @{ML de_bruijn} from exercise~\ref{ex:debruijn} we obtain the following: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \pprint (Syntax.pretty_term @{context} (de_bruijn 4))\ \(P 3 = P 4 \ P 4 \ P 3 \ P 2 \ P 1) \ (P 2 = P 3 \ P 4 \ P 3 \ P 2 \ P 1) \ diff -r 95b42288294e -r 438703674711 ProgTutorial/First_Steps.thy --- a/ProgTutorial/First_Steps.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/First_Steps.thy Tue May 21 14:37:39 2019 +0200 @@ -4,7 +4,6 @@ chapter \First Steps\label{chp:firststeps}\ - text \ \begin{flushright} {\em ``The most effective debugging tool is still careful thought,\\ @@ -109,10 +108,9 @@ You can print out error messages with the function @{ML_ind error in Library}; for example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \if 0 = 1 then true else (error "foo")\ -\*** foo -***\} +\foo\} This function raises the exception \ERROR\, which will then be displayed by the infrastructure indicating that it is an error by @@ -132,11 +130,14 @@ ML %grayML\val pretty_term = Syntax.pretty_term val pwriteln = Pretty.writeln\ - +(* We redfine pwriteln to return a value not just a side effect on the output in order to +use some checking of output with ML_response antiquotation. *) +ML %invisible\val pretty_term = Syntax.pretty_term +val pwriteln = YXML.content_of o Pretty.string_of\ text \ They can be used as follows - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \pwriteln (pretty_term @{context} @{term "1::nat"})\ \"1"\} @@ -159,7 +160,7 @@ text \ Now by using this context @{ML pretty_term} prints out - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pwriteln (pretty_term show_types_ctxt @{term "(1::nat, x)"})\ \(1::nat, x::'a)\} @@ -203,7 +204,7 @@ @{thm [source] conjI} shown below can be used for any (typable) instantiation of \?P\ and \?Q\. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pwriteln (pretty_thm @{context} @{thm conjI})\ \\?P; ?Q\ \ ?P \ ?Q\} @@ -221,7 +222,7 @@ text \ With this function, theorem @{thm [source] conjI} is now printed as follows: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pwriteln (pretty_thm_no_vars @{context} @{thm conjI})\ \\P; Q\ \ P \ Q\} @@ -267,12 +268,12 @@ @{ML_matchresult_fake [display,gray] \pwriteln (Pretty.str "First half,"); pwriteln (Pretty.str "and second half.")\ -\First half, -and second half.\} +\"First half, +and second half."\} but as a single string with appropriate formatting. For example -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \pwriteln (Pretty.str ("First half," ^ "\\n" ^ "and second half."))\ \First half, and second half.\} @@ -282,7 +283,7 @@ @{ML_ind cat_lines in Library} concatenates a list of strings and inserts newlines in between each element. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \pwriteln (Pretty.str (cat_lines ["foo", "bar"]))\ \foo bar\} @@ -387,7 +388,7 @@ applied to it. For example, below three variables are applied to the term @{term [show_types] "P::nat \ int \ unit \ bool"}: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val trm = @{term "P::nat \ int \ unit \ bool"} val ctxt = @{context} @@ -414,7 +415,7 @@ terms involving fresh variables. For this the infrastructure helps tremendously to avoid any name clashes. Consider for example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val trm = @{term "za::'a \ 'b \ 'c"} val ctxt = @{context} @@ -599,7 +600,7 @@ An example where this combinator is useful is as follows - @{ML_matchresult_fake [display, gray] + @{ML_matchresult [display, gray] \let val ((names1, names2), _) = @{context} @@ -639,12 +640,12 @@ both as pairs. We can use this information for example to print out the definiens and the theorem corresponding to the definitions. For example for the first definition: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val (one_trm, (_, one_thm)) = one_def in - pwriteln (pretty_term ctxt' one_trm); - pwriteln (pretty_thm ctxt' one_thm) + (pwriteln (pretty_term ctxt' one_trm), + pwriteln (pretty_thm ctxt' one_thm)) end\ \One One \ 1\} @@ -673,7 +674,7 @@ @{ML_ind check_terms in Syntax}, whose purpose is to simultaneously type-check a list of terms. Consider the code: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val ctxt = @{context} in @@ -694,7 +695,7 @@ Syntax} in the file @{ML_file "Pure/Syntax/syntax.ML"} that is implemented in terms of @{ML singleton} and @{ML check_terms in Syntax}.} For example - @{ML_matchresult_fake [display, gray, linenos] + @{ML_response [display, gray, linenos] \let val ctxt = @{context} in @@ -774,7 +775,7 @@ @{ML_matchresult_fake [display, gray] \Proof_Context.print_abbrevs false @{context}\ \\ -INTER \ INFI +INTER \ INFIMUM Inter \ Inf \\} @@ -783,21 +784,22 @@ You can also use antiquotations to refer to proved theorems: \@{thm \}\ for a single theorem - @{ML_matchresult_fake [display,gray] \@{thm allI}\ \(\x. ?P x) \ \x. ?P x\} + @{ML_response [display,gray] \@{thm allI}\ \(\x. ?P x) \ \x. ?P x\} and \@{thms \}\ for more than one -@{ML_matchresult_fake [display,gray] + +@{ML_response [display,gray] \@{thms conj_ac}\ -\(?P \ ?Q) = (?Q \ ?P) -(?P \ ?Q \ ?R) = (?Q \ ?P \ ?R) -((?P \ ?Q) \ ?R) = (?P \ ?Q \ ?R)\} +\["(?P \ ?Q) = (?Q \ ?P)", + "(?P \ ?Q \ ?R) = (?Q \ ?P \ ?R)", + "((?P \ ?Q) \ ?R) = (?P \ ?Q \ ?R)"]\} The thm-antiquotations can also be used for manipulating theorems. For example, if you need the version of the theorem @{thm [source] refl} that has a meta-equality instead of an equality, you can write -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \@{thm refl[THEN eq_reflection]}\ \?x \ ?x\} @@ -816,7 +818,7 @@ text \ The result can be printed out as follows. - @{ML_matchresult_fake [gray,display] + @{ML_response [gray,display] \foo_thms |> pretty_thms_no_vars @{context} |> pwriteln\ \True, False \ P\} @@ -840,9 +842,9 @@ simp-rules. Now you can feed in the current simpset into this function. The current simpset can be referred to using @{ML_ind simpset_of in Raw_Simplifier}. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \get_thm_names_from_ss @{context}\ - \["Nat.of_nat_eq_id", "Int.of_int_eq_id", "Nat.One_nat_def", \]\} + \["Euclidean_Division.euclidean_size_int_def",\\} Again, this way of referencing simpsets makes you independent from additions of lemmas to the simpset by the user, which can potentially cause loops in your @@ -884,9 +886,9 @@ so that the ML-system can understand it. (All these functions will be explained in more detail in later sections.) An example for this antiquotation is: - @{ML_matchresult_fake [display,gray] + @{ML_matchresult [display,gray] \@{term_pat "Suc (?x::nat)"}\ - \Const ("Suc", "nat \ nat") $ Var (("x", 0), "nat")\} + \Const ("Nat.Suc", _) $ Var (("x", 0), _)\} which shows the internal representation of the term \Suc ?x\. Similarly we can write an antiquotation for type patterns. Its code is @@ -980,19 +982,19 @@ text \ The data can be retrieved with the projection functions defined above. - @{ML_matchresult_fake [display, gray] -\project_int (nth foo_list 0); -project_bool (nth foo_list 1)\ -\13 + @{ML_response [display, gray] +\(project_int (nth foo_list 0), +project_bool (nth foo_list 1))\ +\13, true\} Notice that we access the integer as an integer and the boolean as a boolean. If we attempt to access the integer as a boolean, then we get a runtime error. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \project_bool (nth foo_list 0)\ -\*** exception Match raised\} +\exception Match raised\} This runtime error is the reason why ML is still type-sound despite containing a universal type. @@ -1062,7 +1064,7 @@ \emph{current} theory is updated (this is explained further in Section~\ref{sec:theories}). The lookup can now be performed as follows. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \lookup @{theory} "conj"\ \SOME "\?P; ?Q\ \ ?P \ ?Q"\} @@ -1073,10 +1075,11 @@ setup %gray \update "conj" @{thm TrueI}\ + text \ and accordingly, @{ML lookup} now produces the introduction rule for @{term "True"} -@{ML_matchresult_fake [display, gray] +@{ML_response [display, gray] \lookup @{theory} "conj"\ \SOME "True"\} @@ -1271,9 +1274,9 @@ The rules in the list can be retrieved using the function @{ML FooRules.get}: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \FooRules.get @{context}\ - \["True", "?C","?B"]\} + \["True", "?C", "?B"]\} Note that this function takes a proof context as argument. This might be confusing, since the theorem list is stored as theory data. It becomes clear by knowing diff -r 95b42288294e -r 438703674711 ProgTutorial/Package/Ind_Code.thy --- a/ProgTutorial/Package/Ind_Code.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Package/Ind_Code.thy Tue May 21 14:37:39 2019 +0200 @@ -493,7 +493,7 @@ theorems of the form @{thm [source] all_elims_test}. For example we can instantiate the quantifiers in this theorem with @{term a}, @{term b} and @{term c} as follows: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val ctrms = [@{cterm "a::nat"}, @{cterm "b::nat"}, @{cterm "c::nat"}] val new_thm = all_elims ctrms @{thm all_elims_test} @@ -510,13 +510,13 @@ For example we can eliminate the preconditions \A\ and \B\ from @{thm [source] imp_elims_test}: - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \let val res = imp_elims @{thms imp_elims_test'} @{thm imp_elims_test} in pwriteln (pretty_thm_no_vars @{context} res) end\ - \C\} + \Q\} Now we set up the proof for the introduction rule as follows: \ diff -r 95b42288294e -r 438703674711 ProgTutorial/Parsing.thy --- a/ProgTutorial/Parsing.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Parsing.thy Tue May 21 14:37:39 2019 +0200 @@ -69,9 +69,9 @@ or raise the exception \FAIL\ if no string can be consumed. For example trying to parse - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \($$ "x") (Symbol.explode "world")\ - \Exception FAIL raised\} + \exception FAIL NONE raised\} will raise the exception \FAIL\. The function @{ML_ind "$$" in Scan} will also fail if you try to consume more than a single character. @@ -97,14 +97,13 @@ sequences, for example \\\, that have a special meaning in Isabelle. To see the difference consider -@{ML_matchresult_fake [display,gray] -\let - val input = "\ bar" -in - (String.explode input, Symbol.explode input) -end\ -\(["\\", "<", "f", "o", "o", ">", " ", "b", "a", "r"], - ["\", " ", "b", "a", "r"])\} +@{ML_matchresult [display,gray] +\String.explode "\ bar"\ +\[#"\\", #"<", #"f", #"o", #"o", #">", #" ", #"b", #"a", #"r"]\} + +@{ML_matchresult [display,gray] +\Symbol.explode "\ bar"\ +\["\", " ", "b", "a", "r"]\} Slightly more general than the parser @{ML \$$\} is the function @{ML_ind one in Scan}, in that it takes a predicate as argument and @@ -236,16 +235,16 @@ but if you invoke it on @{text [quotes] "world"} - @{ML_matchresult_fake [display,gray] \(!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\ - \Exception ABORT raised\} + @{ML_response [display,gray] \(!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\ + \exception ABORT fn raised\} then the parsing aborts and the error message \foo\ is printed. In order to see the error message properly, you need to prefix the parser with the function @{ML_ind error in Scan}. For example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Scan.error (!! (fn _ => fn _ => "foo") ($$ "h")) (Symbol.explode "world")\ - \Exception Error "foo" raised\} + \foo\} This kind of ``prefixing'' to see the correct error message is usually done by wrappers such as @{ML_ind local_theory in Outer_Syntax} @@ -269,8 +268,8 @@ @{text [quotes] "h"}, @{text [quotes] "e"} and @{text [quotes] "w"}, and the input @{text [quotes] "holle"} - @{ML_matchresult_fake [display,gray] \Scan.error (p_followed_by_q "h" "e" "w") (Symbol.explode "holle")\ - \Exception ERROR "h is not followed by e" raised\} + @{ML_response [display,gray] \Scan.error (p_followed_by_q "h" "e" "w") (Symbol.explode "holle")\ + \h is not followed by e\} produces the correct error message. Running it with @@ -318,9 +317,9 @@ The function @{ML_ind unless in Scan} takes two parsers: if the first one can parse the input, then the whole parser fails; if not, then the second is tried. Therefore - - @{ML_matchresult_fake_both [display,gray] \Scan.unless ($$ "h") ($$ "w") (Symbol.explode "hello")\ - \Exception FAIL raised\} + + @{ML_response [display,gray] \Scan.unless ($$ "h") ($$ "w") (Symbol.explode "hello")\ + "exception FAIL NONE raised"} fails, while @@ -501,11 +500,11 @@ messages. The following code -@{ML_matchresult_fake [display,gray] \Token.explode +@{ML_response [display,gray] \Token.explode (Thy_Header.get_keywords' @{context}) Position.none "hello world"\ -\[Token (_,(Ident, "hello"),_), - Token (_,(Space, " "),_), - Token (_,(Ident, "world"),_)]\} +\[Token (\(Ident, "hello"),\), + Token (\(Space, " "),\), + Token (\(Ident, "world"),\)]\} produces three tokens where the first and the last are identifiers, since @{text [quotes] "hello"} and @{text [quotes] "world"} do not match any @@ -523,24 +522,24 @@ text \ then lexing @{text [quotes] "hello world"} will produce - @{ML_matchresult_fake [display,gray] \Token.explode + @{ML_response [display,gray] \Token.explode (Thy_Header.get_keywords' @{context}) Position.none "hello world"\ -\[Token (_,(Keyword, "hello"),_), - Token (_,(Space, " "),_), - Token (_,(Ident, "world"),_)]\} +\[Token (\(Keyword, "hello"),\), + Token (\(Space, " "),\), + Token (\(Ident, "world"),\)]\} Many parsing functions later on will require white space, comments and the like to have already been filtered out. So from now on we are going to use the functions @{ML filter} and @{ML_ind is_proper in Token} to do this. For example: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = Token.explode (Thy_Header.get_keywords' @{context}) Position.none "hello world" in filter Token.is_proper input end\ -\[Token (_,(Ident, "hello"), _), Token (_,(Ident, "world"), _)]\} +\[Token (\(Keyword, "hello"), \), Token (\(Ident, "world"), \)]\} For convenience we define the function: \ @@ -552,11 +551,11 @@ text \ If you now parse -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \filtered_input "inductive | for"\ -\[Token (_,(Command, "inductive"),_), - Token (_,(Keyword, "|"),_), - Token (_,(Keyword, "for"),_)]\} +\[Token (\(Command, "inductive"),\), + Token (\(Keyword, "|"),\), + Token (\(Keyword, "for"),\)]\} you obtain a list consisting of only one command and two keyword tokens. If you want to see which keywords and commands are currently known to Isabelle, @@ -637,15 +636,15 @@ section). A difference, however, is that the error message of @{ML \Parse.!!!\} is fixed to be @{text [quotes] "Outer syntax error"} together with a relatively precise description of the failure. For example: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = filtered_input "in |" val parse_bar_then_in = Parse.$$$ "|" -- Parse.$$$ "in" in parse (Parse.!!! parse_bar_then_in) input end\ -\Exception ERROR "Outer syntax error: keyword "|" expected, -but keyword in was found" raised\ +\Outer syntax error: keyword "|" expected, +but keyword in was found\ } \begin{exercise} (FIXME) @@ -669,10 +668,10 @@ text \ where we pretend the parsed string starts on line 7. An example is -@{ML_matchresult_fake [display,gray] -\filtered_input' "foo \\n bar"\ -\[Token (("foo", ({line=7, end_line=7}, {line=7})), (Ident, "foo"), _), - Token (("bar", ({line=8, end_line=8}, {line=8})), (Ident, "bar"), _)]\} +@{ML_response [display,gray] +\filtered_input' "foo \n bar"\ +\[Token (("foo", ({line=7, offset=1, end_offset=4}, {line=7, offset=4})), (Ident, "foo"),\), + Token (("bar", ({line=8, offset=7, end_offset=10}, {line=8, offset=10})), (Ident, "bar"),\)]\} in which the @{text [quotes] "\\n"} causes the second token to be in line 8. @@ -680,13 +679,13 @@ By using the parser @{ML position in Parse} you can access the token position and return it as part of the parser result. For example -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = filtered_input' "where" in parse (Parse.position (Parse.$$$ "where")) input end\ -\(("where", {line=7, end_line=7}), [])\} +\(("where", {line=7, offset=1, end_offset=6}), [])\} \begin{readmore} The functions related to positions are implemented in the file @@ -751,7 +750,7 @@ for terms and types: you can just call the predefined parsers. Terms can be parsed using the function @{ML_ind term in Parse}. For example: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = Token.explode (Thy_Header.get_keywords' @{context}) Position.none "foo" in @@ -767,13 +766,13 @@ The result of the decoding is an XML-tree. You can see better what is going on if you replace @{ML Position.none} by @{ML \Position.line 42\}, say: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = Token.explode (Thy_Header.get_keywords' @{context}) (Position.line 42) "foo" in YXML.parse (fst (Parse.term input)) end\ -\Elem ("token", [("line", "42"), ("end_line", "42")], [XML.Text "foo"])\} +\foo\} The positional information is stored as part of an XML-tree so that code called later on will be able to give more precise error messages. @@ -847,13 +846,6 @@ ((oddS,_),_)]), [])\} \ -ML \let - val input = filtered_input - "foo::\"int \ bool\" and bar::nat (\"BAR\" 100) and blonk" -in - parse Parse.vars input -end\ - text \ As you see, the result is a pair consisting of a list of variables with optional type-annotation and syntax-annotation, and a list of @@ -866,16 +858,16 @@ \"int \ bool"\ in order to properly escape the double quotes in the compound type.} -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val input = filtered_input "foo::\"int \ bool\" and bar::nat (\"BAR\" 100) and blonk" in parse Parse.vars input end\ -\([(foo, SOME _, NoSyn), - (bar, SOME _, Mixfix (Source {text="BAR",...}, [], 100, _)), - (blonk, NONE, NoSyn)],[])\} +\([("foo", SOME \, NoSyn), + ("bar", SOME \, Mixfix (Source {\text = "BAR"}, [], 100, \)), + ("blonk", NONE, NoSyn)], [])\} \ text \ @@ -953,11 +945,7 @@ text \ Often new commands, for example for providing new definitional principles, - need to be implemented. While this is not difficult on the ML-level and for - jEdit, in order to be backwards compatible, new commands need also to be recognised - by Proof-General. This results in some subtle configuration issues, which we will - explain in the next section. Here we just describe how to define new commands - to work with jEdit. + need to be implemented. Let us start with a ``silly'' command that does nothing at all. We shall name this command \isacommand{foobar}. Before you can @@ -1000,9 +988,7 @@ text \ but of course you will not see anything since \isacommand{foobar} is - not intended to do anything. Remember, however, that this only - works in jEdit. In order to enable also Proof-General recognise this - command, a keyword file needs to be generated (see next section). + not intended to do anything. As it stands, the command \isacommand{foobar} is not very useful. Let us refine it a bit next by letting it take a proposition as argument diff -r 95b42288294e -r 438703674711 ProgTutorial/Readme.thy --- a/ProgTutorial/Readme.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Readme.thy Tue May 21 14:37:39 2019 +0200 @@ -90,7 +90,7 @@ \begin{center}\small \begin{tabular}{ll} - \@{ML_matchresult_fake\ & \\term_of @{theory} @{term "a + b = c"}\}\\\ + \@{ML_matchresult_fake\ & \\term_of @{theory} @{term "a + b = c"}\\\\ & \\a + b = c\}\\smallskip\\ \@{ML_matchresult_fake\ & \\($$ "x") (explode "world")\\\\ & \\Exception FAIL raised\}\ @@ -129,14 +129,42 @@ \end{tabular} \end{center} - which produce respectively + which produces + + \begin{center}\small + @{ML_response \1 upto 10\} + \end{center} + + You can give a second argument for the expected response. This is matched against the actual + response by crude wildcard matching where whitespace and \\\ are treated as wildcard. \begin{center}\small - \begin{tabular}{p{3cm}p{3cm}} - @{ML_response \1 upto 10\} + \begin{tabular}{ll} + \@{ML_response\ & \\1 upto 20\\\\ + & \"[1, 2, 3, 4, 5, 6\\\\ + & \18, 19, 20]"}\\\ \end{tabular} \end{center} - + + will produce + + @{ML_response \1 upto 20\ +\[1, 2, 3, 4, 5, 6\ + 18, 19, 20]\} + + Note that exceptions are also converted to strings and can thus be checked in the response + string. + + @{ML_response \error "hallo"\ + \hallo\} + +So as a rule of thumb, to facilitate result checking use prefer this order: +\begin{enumerate} + \item \@{ML_matchresult \expr\ \pat\}\ + \item \@{ML_response \expr\ \string\}\ + \item \@{ML_matchresult_fake \expr\ \pat\}\ or \@{ML_response \expr\}\ + \item \@{ML_matchresult_fake_both \expr\ \pat\}\ +\end{enumerate} \item[$\bullet$] \@{ML_file "name"}\ should be used when referring to a file. It checks whether the file exists. An example diff -r 95b42288294e -r 438703674711 ProgTutorial/Recipes/ExternalSolver.thy --- a/ProgTutorial/Recipes/ExternalSolver.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Recipes/ExternalSolver.thy Tue May 21 14:37:39 2019 +0200 @@ -27,7 +27,7 @@ properly. For example, the following expression takes only approximately one second: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Timeout.apply (Time.fromSeconds 1) Isabelle_System.bash_output "sleep 30" handle TIMEOUT => ("timeout", ~1)\ \("timeout", ~1)\} @@ -46,7 +46,7 @@ In Isabelle, this application may now be executed by - @{ML_matchresult_fake [display,gray] \Isabelle_System.bash_output "$FOO"\ \\\} + @{ML_response [display,gray] \Isabelle_System.bash_output "$FOO"\ \\\} \ diff -r 95b42288294e -r 438703674711 ProgTutorial/Recipes/Oracle.thy --- a/ProgTutorial/Recipes/Oracle.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Recipes/Oracle.thy Tue May 21 14:37:39 2019 +0200 @@ -110,7 +110,7 @@ text \ Here is what we get when applying the oracle: - @{ML_matchresult_fake \iff_oracle @{cprop "p = (p::bool)"}\ \p = p\} + @{ML_response \iff_oracle @{cprop "p = (p::bool)"}\ \p = p\} (FIXME: is there a better way to present the theorem?) diff -r 95b42288294e -r 438703674711 ProgTutorial/Recipes/Sat.thy --- a/ProgTutorial/Recipes/Sat.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Recipes/Sat.thy Tue May 21 14:37:39 2019 +0200 @@ -42,7 +42,7 @@ (appropriately updated) in the result. In the case above the input table is empty (i.e.~@{ML Termtab.empty}) and the output table is - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Termtab.dest table\ \[(Free ("A", "bool"), 1), (Free ("B", "bool"), 2)]\} @@ -58,9 +58,9 @@ returns @{ML \BoolVar 1\ in Prop_Logic} for @{ML pform'} and the table @{ML table'} is: - @{ML_matchresult_fake [display,gray] - \map (apfst (Syntax.string_of_term @{context})) (Termtab.dest table')\ - \(\x. P x, 1)\} + @{ML_response [display,gray] + \map (apfst (YXML.content_of o Syntax.string_of_term @{context})) (Termtab.dest table')\ + \("\x. P x", 1)\} In the print out of the tabel, we used some pretty printing scaffolding to see better which assignment the table contains. diff -r 95b42288294e -r 438703674711 ProgTutorial/Solutions.thy --- a/ProgTutorial/Solutions.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Solutions.thy Tue May 21 14:37:39 2019 +0200 @@ -55,7 +55,7 @@ \-1\ to account for the deleted quantifier. An example is as follows: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \@{prop "\x y z. P x = P z"} |> kill_trivial_quantifiers |> pretty_term @{context} @@ -330,9 +330,9 @@ text \ This function generates for example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \pwriteln (pretty_term @{context} (term_tree 2))\ - \(1 + 2) + (3 + 4)\} + \1 + 2 + (3 + 4)\} The next function constructs a goal of the form \P \\ with a term produced by @{ML term_tree} filled in. diff -r 95b42288294e -r 438703674711 ProgTutorial/Tactical.thy --- a/ProgTutorial/Tactical.thy Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/Tactical.thy Tue May 21 14:37:39 2019 +0200 @@ -47,7 +47,7 @@ text \ This proof translates to the following ML-code. -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val ctxt = @{context} val goal = @{prop "P \ Q \ Q \ P"} @@ -763,17 +763,19 @@ constraints is by pre-instantiating theorems with other theorems. The function @{ML_ind RS in Drule}, for example, does this. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \@{thm disjI1} RS @{thm conjI}\ \\?P1; ?Q\ \ (?P1 \ ?Q1) \ ?Q\} In this example it instantiates the first premise of the \conjI\-theorem with the theorem \disjI1\. If the instantiation is impossible, as in the case of - @{ML_matchresult_fake_both [display,gray] + @{ML_response [display,gray] \@{thm conjI} RS @{thm mp}\ -\*** Exception- THM ("RSN: no unifiers", 1, -["\?P; ?Q\ \ ?P \ ?Q", "\?P \ ?Q; ?P\ \ ?Q"]) raised\} +\exception THM 1 raised\ + RSN: no unifiers + \?P; ?Q\ \ ?P \ ?Q + \?P \ ?Q; ?P\ \ ?Q\} then the function raises an exception. The function @{ML_ind RSN in Drule} is similar to @{ML RS}, but takes an additional number as argument. This @@ -782,7 +784,7 @@ If you want to instantiate more than one premise of a theorem, you can use the function @{ML_ind MRS in Drule}: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \[@{thm disjI1}, @{thm disjI2}] MRS @{thm conjI}\ \\?P2; ?Q1\ \ (?P2 \ ?Q2) \ (?P1 \ ?Q1)\} @@ -791,17 +793,17 @@ example in the code below, every theorem in the second list is instantiated with every theorem in the first. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val list1 = [@{thm impI}, @{thm disjI2}] val list2 = [@{thm conjI}, @{thm disjI1}] in list1 RL list2 end\ -\[\?P1 \ ?Q1; ?Q\ \ (?P1 \ ?Q1) \ ?Q, - \?Q1; ?Q\ \ (?P1 \ ?Q1) \ ?Q, - (?P1 \ ?Q1) \ (?P1 \ ?Q1) \ ?Q, - ?Q1 \ (?P1 \ ?Q1) \ ?Q]\} +\["\?P1 \ ?Q1; ?Q\ \ (?P1 \ ?Q1) \ ?Q", + "\?Q1; ?Q\ \ (?P1 \ ?Q1) \ ?Q", + "(?P1 \ ?Q1) \ (?P1 \ ?Q1) \ ?Q", + "?Q1 \ (?P1 \ ?Q1) \ ?Q"]\} \begin{readmore} The combinators for instantiating theorems with other theorems are @@ -1458,7 +1460,7 @@ prints out some parts of a simpset. If you use it to print out the components of the empty simpset, i.e., @{ML_ind empty_ss in Raw_Simplifier} - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \print_ss @{context} empty_ss\ \Simplification rules: Congruences rules: @@ -1474,10 +1476,10 @@ text \ Printing then out the components of the simpset gives: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \print_ss @{context} (Raw_Simplifier.simpset_of ss1)\ \Simplification rules: - ??.unknown: A - B \ C \ A - B \ (A - C) + ??.unknown: A1 - B1 \ C1 \ A1 - B1 \ (A1 - C1) Congruences rules: Simproc patterns:\} @@ -1491,12 +1493,12 @@ text \ gives - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \print_ss @{context} (Raw_Simplifier.simpset_of ss2)\ \Simplification rules: - ??.unknown: A - B \ C \ A - B \ (A - C) + ??.unknown: A1 - B1 \ C1 \ A1 - B1 \ (A1 - C1) Congruences rules: - Complete_Lattices.Inf_class.INFIMUM: + Complete_Lattices.Inf_class.Inf: \A1 = B1; \x. x \ B1 =simp=> C1 x = D1 x\ \ INFIMUM A1 C1 \ INFIMUM B1 D1 Simproc patterns:\} @@ -1508,7 +1510,7 @@ In the context of HOL, the first really useful simpset is @{ML_ind HOL_basic_ss in Simpdata}. While printing out the components of this simpset - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \print_ss @{context} HOL_basic_ss\ \Simplification rules: Congruences rules: @@ -1534,6 +1536,7 @@ apply(tactic \ALLGOALS (simp_tac (put_simpset HOL_basic_ss @{context}))\) done +declare [[ML_print_depth = 200]] text \ This behaviour is not because of simplification rules, but how the subgoaler, solver and looper are set up in @{ML HOL_basic_ss}. @@ -1542,17 +1545,17 @@ already many useful simplification and congruence rules for the logical connectives in HOL. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \print_ss @{context} HOL_ss\ \Simplification rules: Pure.triv_forall_equality: (\x. PROP V) \ PROP V HOL.the_eq_trivial: THE x. x = y \ y - HOL.the_sym_eq_trivial: THE ya. y = ya \ y + HOL.the_sym_eq_trivial: THE y. y = y \ y \ Congruences rules: HOL.simp_implies: \ \ (PROP P =simp=> PROP Q) \ (PROP P' =simp=> PROP Q') - op -->: \P \ P'; P' \ Q \ Q'\ \ P \ Q \ P' \ Q' + HOL.implies: \P \ P'; P' \ Q \ Q'\ \ P \ Q \ P' \ Q' Simproc patterns: \\} @@ -2064,16 +2067,16 @@ is the function @{ML_ind all_conv in Conv}, which maps a @{ML_type cterm} to an instance of the (meta)reflexivity theorem. For example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Conv.all_conv @{cterm "Foo \ Bar"}\ \Foo \ Bar \ Foo \ Bar\} Another simple conversion is @{ML_ind no_conv in Conv} which always raises the exception @{ML CTERM}. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Conv.no_conv @{cterm True}\ - \*** Exception- CTERM ("no conversion", []) raised\} + \exception CTERM \: no conversion\} A more interesting conversion is the function @{ML_ind beta_conversion in Thm}: it produces a meta-equation between a term and its beta-normal form. For example @@ -2111,7 +2114,7 @@ or in the pretty-printed form - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val add = @{cterm "\x y. x + (y::nat)"} val two = @{cterm "2::nat"} @@ -2146,7 +2149,7 @@ It can be used for example to rewrite @{term "True \ (Foo \ Bar)"} to @{term "Foo \ Bar"}. The code is as follows. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val ctrm = @{cterm "True \ (Foo \ Bar)"} in @@ -2182,7 +2185,7 @@ The conversion combinator @{ML_ind else_conv in Conv} tries out the first one, and if it does not apply, tries the second. For example - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val conv = Conv.rewr_conv @{thm true_conj1} else_conv Conv.all_conv val ctrm1 = @{cterm "True \ Q"} @@ -2190,7 +2193,7 @@ in (conv ctrm1, conv ctrm2) end\ -\(True \ Q \ Q, P \ True \ Q \ P \ True \ Q)\} +\("True \ Q \ Q", "P \ True \ Q \ P \ True \ Q")\} Here the conversion @{thm [source] true_conj1} only applies in the first case, but fails in the second. The whole conversion @@ -2199,7 +2202,7 @@ behaviour can also be achieved with conversion combinator @{ML_ind try_conv in Conv}. For example - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val conv = Conv.try_conv (Conv.rewr_conv @{thm true_conj1}) val ctrm = @{cterm "True \ P"} @@ -2220,14 +2223,14 @@ a conversion to the first, respectively second, argument of an application. For example - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val conv = Conv.arg_conv (Conv.rewr_conv @{thm true_conj1}) val ctrm = @{cterm "P \ (True \ Q)"} in conv ctrm end\ -\P \ (True \ Q) \ P \ Q\} +\P \ True \ Q \ P \ Q\} The reason for this behaviour is that \(op \)\ expects two arguments. Therefore the term must be of the form \(Const \ $ t1) $ t2\. The @@ -2238,14 +2241,14 @@ The function @{ML_ind abs_conv in Conv} applies a conversion under an abstraction. For example: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val conv = Conv.rewr_conv @{thm true_conj1} val ctrm = @{cterm "\P. True \ (P \ Foo)"} in Conv.abs_conv (K conv) @{context} ctrm end\ - \\P. True \ (P \ Foo) \ \P. P \ Foo\} + \"\P. True \ P \ Foo \ \P. P \ Foo"\} Note that this conversion needs a context as an argument. We also give the conversion as \(K conv)\, which is a function that ignores its @@ -2279,7 +2282,7 @@ to be able to pattern-match the term. To see this conversion in action, consider the following example: -@{ML_matchresult_fake [display,gray] +@{ML_response [display,gray] \let val conv = true_conj1_conv @{context} val ctrm = @{cterm "distinct [1::nat, x] \ True \ 1 \ x"} @@ -2358,7 +2361,7 @@ soon as it found one redex. Here is an example for this conversion: - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val ctrm = @{cterm "if P (True \ 1 \ (2::nat)) then True \ True else True \ False"} @@ -2382,7 +2385,7 @@ Using the conversion you can transform this theorem into a new theorem as follows - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \let val conv = Conv.fconv_rule (true_conj1_conv @{context}) val thm = @{thm foo_test} @@ -2465,7 +2468,7 @@ into @{thm [source] id2_def} by abstracting all variables on the left-hand side in the right-hand side. - @{ML_matchresult_fake [display,gray] + @{ML_response [display,gray] \Drule.abs_def @{thm id1_def}\ \id1 \ \x. x\} @@ -2513,7 +2516,7 @@ export the context \ctxt'\ back to \ctxt\ in order to produce meta-variables for the theorem. An example is as follows. - @{ML_matchresult_fake [display, gray] + @{ML_response [display, gray] \unabs_def @{context} @{thm id2_def}\ \id2 ?x1 \ ?x1\} \ diff -r 95b42288294e -r 438703674711 ProgTutorial/antiquote_setup.ML --- a/ProgTutorial/antiquote_setup.ML Fri May 17 11:21:09 2019 +0200 +++ b/ProgTutorial/antiquote_setup.ML Tue May 21 14:37:39 2019 +0200 @@ -12,6 +12,27 @@ fun prefix_lines prfx txt = map (fn s => prfx ^ s) (split_lines txt) +fun is_sep "\" = true + | is_sep s = Symbol.is_ascii_blank s; + +fun scan_word sep = + Scan.many1 sep >> K NONE || + Scan.many1 (fn s => not (sep s) andalso Symbol.not_eof s) >> (SOME o implode); + +fun split_words sep = Symbol.scanner "Bad text" (Scan.repeat (scan_word sep) >> map_filter I); + +fun explode_words sep = split_words sep o Symbol.explode; + +fun match_string sep pat str = + let + fun match [] _ = true + | match (p :: ps) s = + size p <= size s andalso + (case try (unprefix p) s of + SOME s' => match ps s' + | NONE => match (p :: ps) (String.substring (s, 1, size s - 1))); + in match (explode_words sep pat) str end; + fun ml_with_vars' ys txt = implode ["fn ", (case ys of [] => "_" | _ => enclose "(" ")" (commas ys)), " => (", txt, ")"] @@ -70,14 +91,20 @@ (Scan.optional (Args.$$$ "for" |-- Parse.!!! (Scan.repeat1 Args.name)) [] -- Scan.option (Args.$$$ "in" |-- Parse.!!! Args.name))) -fun output_ml_response ctxt src = +fun output_ml_response ignore_pat ctxt (src, opat) = let val res = eval_response ctxt src + val _ = writeln res + val cnt = YXML.content_of res + val pat = case opat of NONE => cnt + | SOME p => p |> Input.source_content + val _ = if ignore_pat orelse Print_Mode.print_mode_active Latex.latexN orelse match_string is_sep pat cnt then () + else error (cat_lines ["Substring:", pat, "not contained in:", cnt]) + val out = if ignore_pat then cnt else pat in - OutputTutorial.output ctxt ([Input.source_content src] @ [Library.prefix_lines "> " res]) + OutputTutorial.output ctxt ([Input.source_content src] @ [Library.prefix_lines "> " out]) end - (* checks and prints a single ML-item and produces an index entry *) fun output_ml_ind ctxt (src, stru) = let @@ -149,11 +176,13 @@ val single_arg = Scan.lift (Args.text_input) val two_args = Scan.lift (Args.text_input -- Args.text_input) +val maybe_two_args = Scan.lift (Args.text_input -- Scan.option Args.text_input) val test = Scan.lift (Args.name -- Args.name -- Scan.option (Args.$$$ "with" |-- Args.name)) val ml_setup = Thy_Output.antiquotation_raw @{binding "ML"} parser_ml output_ml - #> Thy_Output.antiquotation_raw @{binding "ML_response"} single_arg output_ml_response + #> Thy_Output.antiquotation_raw @{binding "ML_response"} maybe_two_args (output_ml_response false) + #> Thy_Output.antiquotation_raw @{binding "ML_response_ignore"} maybe_two_args (output_ml_response true) #> Thy_Output.antiquotation_raw @{binding "ML_ind"} parser_ml_ind output_ml_ind #> Thy_Output.antiquotation_raw @{binding "ML_type_ind"} single_arg output_type_ind #> Thy_Output.antiquotation_raw @{binding "ML_structure_ind"} single_arg output_struct_ind