diff -r 14c3dd5ee2ad -r 7b8c4fe235aa CookBook/FirstSteps.thy --- a/CookBook/FirstSteps.thy Wed Jan 14 21:46:04 2009 +0000 +++ b/CookBook/FirstSteps.thy Wed Jan 14 23:44:14 2009 +0000 @@ -68,14 +68,14 @@ in your code. This can be done in a ``quick-and-dirty'' fashion using the function @{ML "warning"}. For example - @{ML_response_fake [display] "warning \"any string\"" "\"any string\""} + @{ML_response_fake [display,gray] "warning \"any string\"" "\"any string\""} will print out @{text [quotes] "any string"} inside the response buffer of Isabelle. This function expects a string as argument. If you develop under PolyML, then there is a convenient, though again ``quick-and-dirty'', method for converting values into strings, namely @{ML makestring}: - @{ML_response_fake [display] "warning (makestring 1)" "\"1\""} + @{ML_response_fake [display,gray] "warning (makestring 1)" "\"1\""} However @{ML makestring} only works if the type of what is converted is monomorphic and is not a function. @@ -86,7 +86,7 @@ function @{ML tracing} is more appropriate. This function writes all output into a separate tracing buffer. For example - @{ML_response_fake [display] "tracing \"foo\"" "\"foo\""} + @{ML_response_fake [display,gray] "tracing \"foo\"" "\"foo\""} It is also possible to redirect the ``channel'' where the string @{text "foo"} is printed to a separate file, e.g. to prevent ProofGeneral from choking on massive @@ -107,10 +107,8 @@ TextIO.flushOut stream)) *} text {* - Calling @{ML "redirect_tracing"} with @{ML "(TextIO.openOut \"foo.bar\")"} will cause that all tracing information is printed into the file @{text "foo.bar"}. - *} @@ -124,7 +122,7 @@ theory by typing - @{ML_response [display] "Context.theory_name @{theory}" "\"FirstSteps\""} + @{ML_response [display,gray] "Context.theory_name @{theory}" "\"FirstSteps\""} where @{text "@{theory}"} is an antiquotation that is substituted with the current theory (remember that we assumed we are inside the theory @@ -149,8 +147,8 @@ In a similar way you can use antiquotations to refer to theorems or simpsets: - @{ML_response_fake [display] "@{thm allI}" "(\x. ?P x) \ \x. ?P x"} - @{ML_response_fake [display] + @{ML_response_fake [display,gray] "@{thm allI}" "(\x. ?P x) \ \x. ?P x"} + @{ML_response_fake [display,gray] "let val ({rules,...},_) = MetaSimplifier.rep_ss @{simpset} in @@ -186,7 +184,7 @@ One way to construct terms of Isabelle on the ML-level is by using the antiquotation \mbox{@{text "@{term \}"}}. For example - @{ML_response [display] + @{ML_response [display,gray] "@{term \"(a::nat) + b = c\"}" "Const (\"op =\", \) $ (Const (\"HOL.plus_class.plus\", \) $ \ $ \) $ \"} @@ -223,27 +221,27 @@ may omit parts of it by default. If you want to see all of it, you can use the following ML function to set the limit to a value high enough: - \end{exercise} @{ML [display] "print_depth 50"} + \end{exercise} The antiquotation @{text "@{prop \}"} constructs terms of propositional type, inserting the invisible @{text "Trueprop"}-coercions whenever necessary. Consider for example the pairs - @{ML_response [display] "(@{term \"P x\"}, @{prop \"P x\"})" "(Free (\"P\", \) $ Free (\"x\", \), + @{ML_response [display,gray] "(@{term \"P x\"}, @{prop \"P x\"})" "(Free (\"P\", \) $ Free (\"x\", \), Const (\"Trueprop\", \) $ (Free (\"P\", \) $ Free (\"x\", \)))"} where an coercion is inserted in the second component and - @{ML_response [display] "(@{term \"P x \ Q x\"}, @{prop \"P x \ Q x\"})" + @{ML_response [display,gray] "(@{term \"P x \ Q x\"}, @{prop \"P x \ Q x\"})" "(Const (\"==>\", \) $ \ $ \, Const (\"==>\", \) $ \ $ \)"} - where it is not (since it is already constructed using the meta-implication). + where it is not (since it is already constructed by a meta-implication). Types can be constructed using the antiquotation @{text "@{typ \}"}. For example - @{ML_response_fake [display] "@{typ \"bool \ nat\"}" "bool \ nat"} + @{ML_response_fake [display,gray] "@{typ \"bool \ nat\"}" "bool \ nat"} \begin{readmore} Types are described in detail in \isccite{sec:types}. Their @@ -283,7 +281,7 @@ to both functions. With @{ML make_imp} we obtain the correct term involving @{term "S"}, @{text "T"} and @{text "@{typ nat}"} - @{ML_response [display] "make_imp @{term S} @{term T} @{typ nat}" + @{ML_response [display,gray] "make_imp @{term S} @{term T} @{typ nat}" "Const \ $ Abs (\"x\", Type (\"nat\",[]), Const \ $ (Free (\"S\",\) $ \) $ @@ -292,7 +290,7 @@ With @{ML make_wrong_imp} we obtain an incorrect term involving the @{term "P"} and @{text "Q"} from the antiquotation. - @{ML_response [display] "make_wrong_imp @{term S} @{term T} @{typ nat}" + @{ML_response [display,gray] "make_wrong_imp @{term S} @{term T} @{typ nat}" "Const \ $ Abs (\"x\", \, Const \ $ (Const \ $ (Free (\"P\",\) $ \)) $ @@ -314,7 +312,7 @@ antiquotation @{text "@{const_name \}"} which does the expansion automatically, for example: - @{ML_response_fake [display] "@{const_name \"Nil\"}" "List.list.Nil"} + @{ML_response_fake [display,gray] "@{const_name \"Nil\"}" "List.list.Nil"} (FIXME: Is it useful to explain @{text "@{const_syntax}"}?) @@ -377,19 +375,19 @@ the @{ML "@{theory}"} antiquotation to get hold of the current theory. For example we can write - @{ML_response_fake [display] "cterm_of @{theory} @{term \"a + b = c\"}" "a + b = c"} + @{ML_response_fake [display,gray] "cterm_of @{theory} @{term \"a + b = c\"}" "a + b = c"} or use the antiquotation - @{ML_response_fake [display] "@{cterm \"(a::nat) + b = c\"}" "a + b = c"} + @{ML_response_fake [display,gray] "@{cterm \"(a::nat) + b = c\"}" "a + b = c"} Attempting - @{ML_response_fake_both [display] "@{cterm \"1 + True\"}" "Type unification failed \"} + @{ML_response_fake_both [display,gray] "@{cterm \"1 + True\"}" "Type unification failed \"} yields an error. A slightly more elaborate example is -@{ML_response_fake [display] +@{ML_response_fake [display,gray] "let val natT = @{typ \"nat\"} val zero = @{term \"0::nat\"} @@ -426,7 +424,7 @@ application. This combinator, and several variants are defined in @{ML_file "Pure/General/basics.ML"}.} -@{ML_response_fake [display] +@{ML_response_fake [display,gray] "let val thy = @{theory} @@ -497,6 +495,7 @@ Tactics are functions that map a goal state to a (lazy) sequence of successor states, hence the type of a tactic is + @{ML_type[display] "thm -> thm Seq.seq"} \begin{readmore} @@ -531,7 +530,7 @@ goal is proved. The @{text "tac"} is the tactic which proves the goal and which can make use of the local assumptions (there are none in this example). -@{ML_response_fake [display] +@{ML_response_fake [display,gray] "let val ctxt = @{context} val goal = @{prop \"P \ Q \ Q \ P\"} @@ -552,7 +551,7 @@ An alternative way to transcribe this proof is as follows -@{ML_response_fake [display] +@{ML_response_fake [display,gray] "let val ctxt = @{context} val goal = @{prop \"P \ Q \ Q \ P\"}