# HG changeset patch # User griff # Date 1248173366 -7200 # Node ID 2ff4867c00cf3d791cf94046d35c6f329c28d95e # Parent c354e45d80d2b97382198d7bedc5b320331bc013# Parent 949957531f63ebf1bad6dad9bab2b71e1bd39a3d merged diff -r 949957531f63 -r 2ff4867c00cf IsaMakefile --- a/IsaMakefile Tue Jul 21 12:27:12 2009 +0200 +++ b/IsaMakefile Tue Jul 21 12:49:26 2009 +0200 @@ -2,8 +2,6 @@ ## targets default: tutorial -images: -test: all: tutorial @@ -26,10 +24,11 @@ ProgTutorial/*.ML \ ProgTutorial/Recipes/*.thy \ ProgTutorial/Package/*.thy \ - ProgTutorial/Package/*.ML + ProgTutorial/Package/*.ML + @rm -rf ProgTutorial/generated/* $(USEDIR) HOL ProgTutorial $(ISATOOL) version > ProgTutorial/generated/version - poly -v > ProgTutorial/generated/pversion + $(ML_HOME)/poly -v > ProgTutorial/generated/pversion $(ISATOOL) document -o pdf ProgTutorial/generated @cp ProgTutorial/document.pdf progtutorial.pdf diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Base.thy --- a/ProgTutorial/Base.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Base.thy Tue Jul 21 12:49:26 2009 +0200 @@ -1,6 +1,7 @@ theory Base -imports Main +imports Main LaTeXsugar uses + "output_tutorial.ML" "chunks.ML" "antiquote_setup.ML" begin @@ -8,38 +9,33 @@ (* to have a special tag for text enclosed in ML *) ML {* -fun inherit_env (context as Context.Proof lthy) = - Context.Proof (LocalTheory.map_contexts (ML_Context.inherit_env context) lthy) - | inherit_env context = context; +fun propagate_env (context as Context.Proof lthy) = + Context.Proof (LocalTheory.map_contexts (ML_Env.inherit context) lthy) + | propagate_env context = context -fun inherit_env_prf prf = Proof.map_contexts - (Context.proof_map (ML_Context.inherit_env (Context.Proof (Proof.context_of prf)))) prf +fun propagate_env_prf prf = Proof.map_contexts + (Context.proof_map (ML_Env.inherit (Context.Proof (Proof.context_of prf)))) prf val _ = OuterSyntax.command "ML" "eval ML text within theory" (OuterKeyword.tag "TutorialML" OuterKeyword.thy_decl) (OuterParse.ML_source >> (fn (txt, pos) => Toplevel.generic_theory - (ML_Context.exec (fn () => ML_Context.eval true pos txt) #> inherit_env))); - + (ML_Context.exec (fn () => ML_Context.eval true pos txt) #> propagate_env))) val _ = OuterSyntax.command "ML_prf" "ML text within proof" (OuterKeyword.tag "TutorialML" OuterKeyword.prf_decl) (OuterParse.ML_source >> (fn (txt, pos) => Toplevel.proof (Proof.map_context (Context.proof_map - (ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> inherit_env_prf))) + (ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> propagate_env_prf))) val _ = OuterSyntax.command "ML_val" "diagnostic ML text" (OuterKeyword.tag "TutorialML" OuterKeyword.diag) - (OuterParse.ML_source >> IsarCmd.ml_diag true); + (OuterParse.ML_source >> IsarCmd.ml_diag true) *} -(* -ML {* - fun warning str = str - fun tracing str = str -*} -*) -end + + +end \ No newline at end of file diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/FirstSteps.thy --- a/ProgTutorial/FirstSteps.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/FirstSteps.thy Tue Jul 21 12:49:26 2009 +0200 @@ -4,20 +4,20 @@ chapter {* First Steps *} + text {* - - Isabelle programming is done in ML. Just like lemmas and proofs, ML-code + Isabelle programming is done in ML. Just like lemmas and proofs, ML-code in Isabelle is part of a theory. If you want to follow the code given in this chapter, we assume you are working inside the theory starting with - \begin{center} + \begin{quote} \begin{tabular}{@ {}l} \isacommand{theory} FirstSteps\\ \isacommand{imports} Main\\ \isacommand{begin}\\ \ldots \end{tabular} - \end{center} + \end{quote} We also generally assume you are working with HOL. The given examples might need to be adapted if you work in a different logic. @@ -55,12 +55,29 @@ \isacommand{ML}~@{text "\ \ \"} scaffolding whenever we show code. The lines prefixed with @{text [quotes] ">"} are not part of the code, rather they indicate what the response is when the code is evaluated. + There are also the commands \isacommand{ML\_val} and \isacommand{ML\_prf} for + including ML-code. The first evaluates the given code, but any effect on the + theory, in which the code is embedded, is suppressed. The second needs to + be used if ML-code is defined + inside a proof. For example + + \begin{quote} + \begin{isabelle} + \isacommand{lemma}~@{text "test:"}\isanewline + \isacommand{shows}~@{text [quotes] "True"}\isanewline + \isacommand{ML\_prf}~@{text "\"}~@{ML "writeln \"Trivial!\""}~@{text "\"}\isanewline + \isacommand{oops} + \end{isabelle} + \end{quote} + + However, both commands will only play minor roles in this tutorial (we will always + arrange that the ML-code is defined outside of proofs, for example). Once a portion of code is relatively stable, you usually want to export it to a separate ML-file. Such files can then be included somewhere inside a theory by using the command \isacommand{use}. For example - \begin{center} + \begin{quote} \begin{tabular}{@ {}l} \isacommand{theory} FirstSteps\\ \isacommand{imports} Main\\ @@ -70,13 +87,13 @@ \isacommand{use}~@{text "\"file_to_be_included.ML\""}\\ \ldots \end{tabular} - \end{center} + \end{quote} The \isacommand{uses}-command in the header of the theory is needed in order to indicate the dependency of the theory on the ML-file. Alternatively, the file can be included by just writing in the header - \begin{center} + \begin{quote} \begin{tabular}{@ {}l} \isacommand{theory} FirstSteps\\ \isacommand{imports} Main\\ @@ -84,9 +101,13 @@ \isacommand{begin}\\ \ldots \end{tabular} - \end{center} - - Note that no parentheses are given this time. + \end{quote} + + Note that no parentheses are given this time. Note also that the + included ML-file should not contain any + \isacommand{use} itself. Otherwise Isabelle is unable to record all + file dependencies, which is a nuisance if you have to track down + errors. *} section {* Debugging and Printing\label{sec:printing} *} @@ -95,24 +116,24 @@ During development you might find it necessary to inspect some data in your code. This can be done in a ``quick-and-dirty'' fashion using - the function @{ML "warning"}. For example - - @{ML_response_fake [display,gray] "warning \"any string\"" "\"any string\""} + the function @{ML [index] "writeln"}. For example + + @{ML_response_fake [display,gray] "writeln \"any string\"" "\"any string\""} will print out @{text [quotes] "any string"} inside the response buffer of Isabelle. This function expects a string as argument. If you develop under PolyML, then there is a convenient, though again ``quick-and-dirty'', method for - converting values into strings, namely the function @{ML makestring}: - - @{ML_response_fake [display,gray] "warning (makestring 1)" "\"1\""} - - However, @{ML makestring} only works if the type of what is converted is monomorphic + converting values into strings, namely the function @{ML PolyML.makestring}: + + @{ML_response_fake [display,gray] "writeln (PolyML.makestring 1)" "\"1\""} + + However, @{ML [index] makestring in PolyML} only works if the type of what is converted is monomorphic and not a function. - The function @{ML "warning"} should only be used for testing purposes, because any + The function @{ML "writeln"} should only be used for testing purposes, because any output this function generates will be overwritten as soon as an error is raised. For printing anything more serious and elaborate, the - function @{ML tracing} is more appropriate. This function writes all output into + function @{ML [index] tracing} is more appropriate. This function writes all output into a separate tracing buffer. For example: @{ML_response_fake [display,gray] "tracing \"foo\"" "\"foo\""} @@ -139,13 +160,14 @@ Calling @{ML "redirect_tracing"} with @{ML "(TextIO.openOut \"foo.bar\")"} will cause that all tracing information is printed into the file @{text "foo.bar"}. - You can print out error messages with the function @{ML error}; for example: + You can print out error messages with the function @{ML [index] error}; for example: @{ML_response_fake [display,gray] "if 0=1 then true else (error \"foo\")" "Exception- ERROR \"foo\" raised At command \"ML\"."} - (FIXME @{ML Toplevel.debug} @{ML Toplevel.profiling}) + (FIXME Mention how to work with @{ML [index] debug in Toplevel} and + @{ML [index] profiling in Toplevel}.) *} (* @@ -164,108 +186,107 @@ text {* Most often you want to inspect data of type @{ML_type term}, @{ML_type cterm} - or @{ML_type thm}. Isabelle contains elaborate pretty-printing functions for printing them, + or @{ML_type thm}. Isabelle contains elaborate pretty-printing functions for printing + them (see Section \ref{sec:pretty}), but for quick-and-dirty solutions they are far too unwieldy. A simple way to transform - a term into a string is to use the function @{ML Syntax.string_of_term}. + a term into a string is to use the function @{ML [index] string_of_term in Syntax}. @{ML_response_fake [display,gray] "Syntax.string_of_term @{context} @{term \"1::nat\"}" "\"\\^E\\^Fterm\\^E\\^E\\^Fconst\\^Fname=HOL.one_class.one\\^E1\\^E\\^F\\^E\\^E\\^F\\^E\""} This produces a string with some additional information encoded in it. The string - can be properly printed by using the function @{ML warning}. + can be properly printed by using the function @{ML [index] writeln}. @{ML_response_fake [display,gray] - "warning (Syntax.string_of_term @{context} @{term \"1::nat\"})" + "writeln (Syntax.string_of_term @{context} @{term \"1::nat\"})" "\"1\""} A @{ML_type cterm} can be transformed into a string by the following function. *} -ML{*fun str_of_cterm ctxt t = +ML{*fun string_of_cterm ctxt t = Syntax.string_of_term ctxt (term_of t)*} text {* - In this example the function @{ML term_of} extracts the @{ML_type term} from + In this example the function @{ML [index] term_of} extracts the @{ML_type term} from a @{ML_type cterm}. If there are more than one @{ML_type cterm}s to be - printed, you can use the function @{ML commas} to separate them. + printed, you can use the function @{ML [index] commas} to separate them. *} -ML{*fun str_of_cterms ctxt ts = - commas (map (str_of_cterm ctxt) ts)*} +ML{*fun string_of_cterms ctxt ts = + commas (map (string_of_cterm ctxt) ts)*} text {* The easiest way to get the string of a theorem is to transform it - into a @{ML_type cterm} using the function @{ML crep_thm}. + into a @{ML_type cterm} using the function @{ML [index] crep_thm}. *} -ML{*fun str_of_thm ctxt thm = - str_of_cterm ctxt (#prop (crep_thm thm))*} +ML{*fun string_of_thm ctxt thm = + string_of_cterm ctxt (#prop (crep_thm thm))*} text {* Theorems also include schematic variables, such as @{text "?P"}, @{text "?Q"} and so on. @{ML_response_fake [display, gray] - "warning (str_of_thm @{context} @{thm conjI})" + "writeln (string_of_thm @{context} @{thm conjI})" "\?P; ?Q\ \ ?P \ ?Q"} In order to improve the readability of theorems we convert these schematic variables into free variables using the - function @{ML Variable.import_thms}. + function @{ML [index] import in Variable}. *} ML{*fun no_vars ctxt thm = let - val ((_, [thm']), _) = Variable.import_thms true [thm] ctxt + val ((_, [thm']), _) = Variable.import true [thm] ctxt in thm' end -fun str_of_thm_no_vars ctxt thm = - str_of_cterm ctxt (#prop (crep_thm (no_vars ctxt thm)))*} +fun string_of_thm_no_vars ctxt thm = + string_of_cterm ctxt (#prop (crep_thm (no_vars ctxt thm)))*} text {* Theorem @{thm [source] conjI} is now printed as follows: @{ML_response_fake [display, gray] - "warning (str_of_thm_no_vars @{context} @{thm conjI})" + "writeln (string_of_thm_no_vars @{context} @{thm conjI})" "\P; Q\ \ P \ Q"} Again the function @{ML commas} helps with printing more than one theorem. *} -ML{*fun str_of_thms ctxt thms = - commas (map (str_of_thm ctxt) thms) - -fun str_of_thms_no_vars ctxt thms = - commas (map (str_of_thm_no_vars ctxt) thms) *} +ML{*fun string_of_thms ctxt thms = + commas (map (string_of_thm ctxt) thms) + +fun string_of_thms_no_vars ctxt thms = + commas (map (string_of_thm_no_vars ctxt) thms) *} section {* Combinators\label{sec:combinators} *} text {* - (FIXME: Calling convention) - For beginners perhaps the most puzzling parts in the existing code of Isabelle are the combinators. At first they seem to greatly obstruct the comprehension of the code, but after getting familiar with them, they actually ease the understanding and also the programming. - The simplest combinator is @{ML I}, which is just the identity function defined as + The simplest combinator is @{ML [index] I}, which is just the identity function defined as *} ML{*fun I x = x*} -text {* Another simple combinator is @{ML K}, defined as *} +text {* Another simple combinator is @{ML [index] K}, defined as *} ML{*fun K x = fn _ => x*} text {* - @{ML K} ``wraps'' a function around the argument @{text "x"}. However, this + @{ML [index] K} ``wraps'' a function around the argument @{text "x"}. However, this function ignores its argument. As a result, @{ML K} defines a constant function always returning @{text x}. - The next combinator is reverse application, @{ML "|>"}, defined as: + The next combinator is reverse application, @{ML [index] "|>"}, defined as: *} ML{*fun x |> f = f x*} @@ -288,8 +309,8 @@ common when dealing with theories (for example by adding a definition, followed by lemmas and so on). The reverse application allows you to read what happens in a top-down manner. This kind of coding should also be familiar, - if you have been exposed to Haskell's do-notation. Writing the function @{ML inc_by_five} using - the reverse application is much clearer than writing + if you have been exposed to Haskell's {\it do}-notation. Writing the function + @{ML inc_by_five} using the reverse application is much clearer than writing *} ML{*fun inc_by_five x = fst ((fn x => (x, x)) (x + 1)) + 4*} @@ -302,11 +323,11 @@ text {* and typographically more economical than *} ML{*fun inc_by_five x = - let val y1 = x + 1 - val y2 = (y1, y1) - val y3 = fst y2 - val y4 = y3 + 4 - in y4 end*} +let val y1 = x + 1 + val y2 = (y1, y1) + val y3 = fst y2 + val y4 = y3 + 4 +in y4 end*} text {* Another reason why the let-bindings in the code above are better to be @@ -323,7 +344,7 @@ |> map (pair "z") |> Variable.variant_frees ctxt [f] |> map Free - |> (curry list_comb) f *} + |> curry list_comb f *} text {* This code extracts the argument types of a given function @{text "f"} and then generates @@ -333,22 +354,30 @@ @{ML_response_fake [display,gray] "apply_fresh_args @{term \"P::nat \ int \ unit \ bool\"} @{context} |> Syntax.string_of_term @{context} - |> warning" + |> writeln" "P z za zb"} You can read off this behaviour from how @{ML apply_fresh_args} is - coded: in Line 2, the function @{ML fastype_of} calculates the type of the - function; @{ML binder_types} in the next line produces the list of argument + coded: in Line 2, the function @{ML [index] fastype_of} calculates the type of the + function; @{ML [index] binder_types} in the next line produces the list of argument types (in the case above the list @{text "[nat, int, unit]"}); Line 4 pairs up each type with the string @{text "z"}; the - function @{ML variant_frees in Variable} generates for each @{text "z"} a + function @{ML [index] variant_frees in Variable} generates for each @{text "z"} a unique name avoiding the given @{text f}; the list of name-type pairs is turned into a list of variable terms in Line 6, which in the last line is applied - by the function @{ML list_comb} to the function. In this last step we have to - use the function @{ML curry}, because @{ML list_comb} expects the function and the - variables list as a pair. + by the function @{ML [index] list_comb} to the function. In this last step we have to + use the function @{ML [index] curry}, because @{ML [index] list_comb} expects the + function and the variables list as a pair. This kind of functions is often needed when + constructing terms and the infrastructure helps tremendously to avoid + any name clashes. Consider for example: + + @{ML_response_fake [display,gray] +"apply_fresh_args @{term \"za::'a \ 'b \ 'c\"} @{context} + |> Syntax.string_of_term @{context} + |> writeln" + "za z zb"} - The combinator @{ML "#>"} is the reverse function composition. It can be + The combinator @{ML [index] "#>"} is the reverse function composition. It can be used to define the following function *} @@ -363,7 +392,7 @@ composition allows you to read the code top-down. The remaining combinators described in this section add convenience for the - ``waterfall method'' of writing functions. The combinator @{ML tap} allows + ``waterfall method'' of writing functions. The combinator @{ML [index] tap} allows you to get hold of an intermediate result (to do some side-calculations for instance). The function @@ -371,18 +400,18 @@ ML %linenosgray{*fun inc_by_three x = x |> (fn x => x + 1) - |> tap (fn x => tracing (makestring x)) + |> tap (fn x => tracing (PolyML.makestring x)) |> (fn x => x + 2)*} text {* increments the argument first by @{text "1"} and then by @{text "2"}. In the - middle (Line 3), however, it uses @{ML tap} for printing the ``plus-one'' - intermediate result inside the tracing buffer. The function @{ML tap} can + middle (Line 3), however, it uses @{ML [index] tap} for printing the ``plus-one'' + intermediate result inside the tracing buffer. The function @{ML [index] tap} can only be used for side-calculations, because any value that is computed cannot be merged back into the ``main waterfall''. To do this, you can use the next combinator. - The combinator @{ML "`"} (a backtick) is similar to @{ML tap}, but applies a + The combinator @{ML [index] "`"} (a backtick) is similar to @{ML tap}, but applies a function to the value and returns the result together with the value (as a pair). For example the function *} @@ -397,7 +426,7 @@ for x}. After that, the function increments the right-hand component of the pair. So finally the result will be @{ML "(x + 1, x + 1)" for x}. - The combinators @{ML "|>>"} and @{ML "||>"} are defined for + The combinators @{ML [index] "|>>"} and @{ML [index] "||>"} are defined for functions manipulating pairs. The first applies the function to the first component of the pair, defined as *} @@ -411,7 +440,7 @@ ML{*fun (x, y) ||> f = (x, f y)*} text {* - With the combinator @{ML "|->"} you can re-combine the elements from a pair. + With the combinator @{ML [index] "|->"} you can re-combine the elements from a pair. This combinator is defined as *} @@ -427,10 +456,10 @@ |-> (fn x => fn y => x + y)*} text {* - The combinator @{ML ||>>} plays a central rôle whenever your task is to update a + The combinator @{ML [index] ||>>} plays a central rôle whenever your task is to update a theory and the update also produces a side-result (for example a theorem). Functions for such tasks return a pair whose second component is the theory and the fist - component is the side-result. Using @{ML ||>>}, you can do conveniently the update + component is the side-result. Using @{ML [index] ||>>}, you can do conveniently the update and also accumulate the side-results. Considder the following simple function. *} @@ -442,7 +471,7 @@ text {* The purpose of Line 2 is to just pair up the argument with a dummy value (since - @{ML "||>>"} operates on pairs). Each of the next three lines just increment + @{ML [index] "||>>"} operates on pairs). Each of the next three lines just increment the value by one, but also nest the intrermediate results to the left. For example @{ML_response [display,gray] @@ -459,12 +488,14 @@ *} text {* - Recall that @{ML "|>"} is the reverse function application. Recall also that + Recall that @{ML [index] "|>"} is the reverse function application. Recall also that the related - reverse function composition is @{ML "#>"}. In fact all the combinators @{ML "|->"}, - @{ML "|>>"} , @{ML "||>"} and @{ML "||>>"} described above have related combinators for - function composition, namely @{ML "#->"}, @{ML "#>>"}, @{ML "##>"} and @{ML "##>>"}. - Using @{ML "#->"}, for example, the function @{text double} can also be written as: + reverse function composition is @{ML [index] "#>"}. In fact all the combinators + @{ML [index] "|->"}, @{ML [index] "|>>"} , @{ML [index] "||>"} and @{ML [index] + "||>>"} described above have related combinators for + function composition, namely @{ML [index] "#->"}, @{ML [index] "#>>"}, + @{ML [index] "##>"} and @{ML [index] "##>>"}. + Using @{ML [index] "#->"}, for example, the function @{text double} can also be written as: *} ML{*val double = @@ -482,6 +513,7 @@ contains further information about combinators. \end{readmore} + (FIXME: say something about calling conventions) *} @@ -500,7 +532,7 @@ where @{text "@{theory}"} is an antiquotation that is substituted with the current theory (remember that we assumed we are inside the theory @{text FirstSteps}). The name of this theory can be extracted using - the function @{ML "Context.theory_name"}. + the function @{ML [index] theory_name in Context}. Note, however, that antiquotations are statically linked, that is their value is determined at ``compile-time'', not ``run-time''. For example the function @@ -542,9 +574,9 @@ end*} text {* - The function @{ML dest_ss in MetaSimplifier} returns a record containing all + The function @{ML [index] dest_ss in MetaSimplifier} returns a record containing all information stored in the simpset, but we are only interested in the names of the - simp-rules. So now you can feed in the current simpset into this function. + simp-rules. Now you can feed in the current simpset into this function. The current simpset can be referred to using the antiquotation @{text "@{simpset}"}. @{ML_response_fake [display,gray] @@ -554,17 +586,17 @@ Again, this way of referencing simpsets makes you independent from additions of lemmas to the simpset by the user that potentially cause loops. - On the ML-level of Isabelle, you often have to work with qualified names; - these are strings with some additional information, such as positional information - and qualifiers. Such bindings can be generated with the antiquotation + On the ML-level of Isabelle, you often have to work with qualified names. + These are strings with some additional information, such as positional information + and qualifiers. Such qualified names can be generated with the antiquotation @{text "@{binding \}"}. @{ML_response [display,gray] "@{binding \"name\"}" "name"} - An example where a binding is needed is the function @{ML define in - LocalTheory}. Below, this function is used to define the constant @{term + An example where a binding is needed is the function @{ML [index] define in + LocalTheory}. This function is below used to define the constant @{term "TrueConj"} as the conjunction @{term "True \ True"}. *} @@ -583,7 +615,8 @@ \end{isabelle} (FIXME give a better example why bindings are important; maybe - give a pointer to \isacommand{local\_setup}) + give a pointer to \isacommand{local\_setup}; if not, then explain + why @{ML snd} is needed) While antiquotations have many applications, they were originally introduced in order to avoid explicit bindings of theorems such as: @@ -602,11 +635,28 @@ kinds of logical elements from the ML-level. *} +text {* FIXME: give an example of a user defined antiquotation *} + +ML{*ML_Antiquote.inline "term_pat" + (Args.context -- Scan.lift Args.name_source >> + (fn (ctxt, s) => + s |> ProofContext.read_term_pattern ctxt + |> ML_Syntax.print_term + |> ML_Syntax.atomic))*} + +ML{*@{term_pat "?x + ?y"}*} + +text {* + \begin{readmore} + @{ML_file "Pure/ML/ml_antiquote.ML"} + \end{readmore} +*} + section {* Terms and Types *} text {* One way to construct Isabelle terms, is by using the antiquotation - \mbox{@{text "@{term \}"}}. For example: + \mbox{@{text "@{term \}"}}. For example @{ML_response [display,gray] "@{term \"(a::nat) + b = c\"}" @@ -614,33 +664,85 @@ (Const (\"HOL.plus_class.plus\", \) $ \ $ \) $ \"} will show the term @{term "(a::nat) + b = c"}, but printed using the internal - representation corresponding to the data type @{ML_type "term"}. - - This internal representation uses the usual de Bruijn index mechanism---where - bound variables are represented by the constructor @{ML Bound}. The index in - @{ML Bound} refers to the number of Abstractions (@{ML Abs}) we have to skip + representation corresponding to the datatype @{ML_type [index] "term"} defined as follows: +*} + +ML_val %linenosgray{*datatype term = + Const of string * typ +| Free of string * typ +| Var of indexname * typ +| Bound of int +| Abs of string * typ * term +| $ of term * term *} + +text {* + As can be seen in Line 5, terms use the usual de Bruijn index mechanism + for representing bound variables. For + example in + + @{ML_response_fake [display, gray] + "@{term \"\x y. x y\"}" + "Abs (\"x\", \"'a \ 'b\", Abs (\"y\", \"'a\", Bound 1 $ Bound 0))"} + + the indices refer to the number of Abstractions (@{ML Abs}) that we need to skip until we hit the @{ML Abs} that binds the corresponding variable. Note that the names of bound variables are kept at abstractions for printing purposes, and so should be treated only as ``comments''. Application in Isabelle is - realised with the term-constructor @{ML $}. + realised with the term-constructor @{ML $}. + + Isabelle makes a distinction between \emph{free} variables (term-constructor @{ML Free}) + and variables (term-constructor @{ML Var}). The latter correspond to the schematic + variables that when printed show up with a question mark in front of them. Consider + the following two examples + + @{ML_response_fake [display, gray] +"let + val v1 = Var ((\"x\", 3), @{typ bool}) + val v2 = Var ((\"x1\", 3), @{typ bool}) +in + writeln (Syntax.string_of_term @{context} v1); + writeln (Syntax.string_of_term @{context} v2) +end" +"?x3 +?x1.3"} + + This is different from a free variable + + @{ML_response_fake [display, gray] + "writeln (Syntax.string_of_term @{context} (Free (\"x\", @{typ bool})))" + "x"} + + When constructing terms, you are usually concerned with free variables (for example + you cannot construct schematic variables using the antiquotation @{text "@{term \}"}). + If you deal with theorems, you have to, however, observe the distinction. A similar + distinction holds for types (see below). \begin{readmore} - Terms are described in detail in \isccite{sec:terms}. Their + Terms and types are described in detail in \isccite{sec:terms}. Their definition and many useful operations are implemented in @{ML_file "Pure/term.ML"}. + For constructing terms involving HOL constants, many helper functions are defined + in @{ML_file "HOL/Tools/hologic.ML"}. \end{readmore} Constructing terms via antiquotations has the advantage that only typable terms can be constructed. For example @{ML_response_fake_both [display,gray] - "@{term \"(x::nat) x\"}" - "Type unification failed \"} + "@{term \"x x\"}" + "Type unification failed: Occurs check!"} raises a typing error, while it perfectly ok to construct the term - - @{ML [display,gray] "Free (\"x\", @{typ nat}) $ Free (\"x\", @{typ nat})"} + + @{ML_response_fake [display,gray] +"let + val omega = Free (\"x\", @{typ nat}) $ Free (\"x\", @{typ nat}) +in + writeln (Syntax.string_of_term @{context} omega) +end" + "x x"} with the raw ML-constructors. + Sometimes the internal representation of terms can be surprisingly different from what you see at the user-level, because the layers of parsing/type-checking/pretty printing can be quite elaborate. @@ -683,6 +785,22 @@ @{ML_response_fake [display,gray] "@{typ \"bool \ nat\"}" "bool \ nat"} + Their definition is +*} + +ML_val{*datatype typ = + Type of string * typ list +| TFree of string * sort +| TVar of indexname * sort *} + +text {* + Like with terms, there is the distinction between free type + variables (term-constructor @{ML "TFree"} and schematic + type variables (term-constructor @{ML "TVar"}). A type constant, + like @{typ "int"} or @{typ bool}, are types with an empty list + of argument types. + + \begin{readmore} Types are described in detail in \isccite{sec:types}. Their definition and many useful operations are implemented @@ -690,8 +808,7 @@ \end{readmore} *} - -section {* Constructing Terms and Types Manually\label{sec:terms_types_manually} *} +section {* Constructing Terms Manually\label{sec:terms_types_manually} *} text {* While antiquotations are very convenient for constructing terms, they can @@ -711,7 +828,8 @@ text {* The reason is that you cannot pass the arguments @{term P} and @{term Q} - into an antiquotation. For example the following does \emph{not} work. + into an antiquotation.\footnote{At least not at the moment.} For example + the following does \emph{not} work. *} ML{*fun make_wrong_imp P Q = @{prop "\(x::nat). P x \ Q x"} *} @@ -736,7 +854,7 @@ (Const \ $ (Free (\"Q\",\) $ \)))"} There are a number of handy functions that are frequently used for - constructing terms. One is the function @{ML list_comb}, which takes a term + constructing terms. One is the function @{ML [index] list_comb}, which takes a term and a list of terms as arguments, and produces as output the term list applied to the term. For example @@ -744,7 +862,7 @@ "list_comb (@{term \"P::nat\"}, [@{term \"True\"}, @{term \"False\"}])" "Free (\"P\", \"nat\") $ Const (\"True\", \"bool\") $ Const (\"False\", \"bool\")"} - Another handy function is @{ML lambda}, which abstracts a variable + Another handy function is @{ML [index] lambda}, which abstracts a variable in a term. For example @{ML_response_fake [display,gray] @@ -767,7 +885,7 @@ of Church-style typing, where variables with the same name still differ, if they have different type. - There is also the function @{ML subst_free} with which terms can + There is also the function @{ML [index] subst_free} with which terms can be replaced by other terms. For example below, we will replace in @{term "(f::nat\nat\nat) 0 x"} the subterm @{term "(f::nat\nat\nat) 0"} by @{term y}, and @{term x} by @{term True}. @@ -786,6 +904,41 @@ @{term \"(\x::nat. x)\"}" "Free (\"x\", \"nat\")"} + There are many convenient functions that construct specific HOL-terms. For + example @{ML [index] mk_eq in HOLogic} constructs an equality out of two terms. + The types needed in this equality are calculated from the type of the + arguments. For example + +@{ML_response_fake [gray,display] + "writeln (Syntax.string_of_term @{context} + (HOLogic.mk_eq (@{term \"True\"}, @{term \"False\"})))" + "True = False"} + + \begin{readmore} + There are many functions in @{ML_file "Pure/term.ML"}, @{ML_file "Pure/logic.ML"} and + @{ML_file "HOL/Tools/hologic.ML"} that make such manual constructions of terms + and types easier.\end{readmore} + + Have a look at these files and try to solve the following two exercises: + + \begin{exercise}\label{fun:revsum} + Write a function @{text "rev_sum : term -> term"} that takes a + term of the form @{text "t\<^isub>1 + t\<^isub>2 + \ + t\<^isub>n"} (whereby @{text "n"} might be zero) + and returns the reversed sum @{text "t\<^isub>n + \ + t\<^isub>2 + t\<^isub>1"}. Assume + the @{text "t\<^isub>i"} can be arbitrary expressions and also note that @{text "+"} + associates to the left. Try your function on some examples. + \end{exercise} + + \begin{exercise}\label{fun:makesum} + Write a function which takes two terms representing natural numbers + in unary notation (like @{term "Suc (Suc (Suc 0))"}), and produces the + number representing their sum. + \end{exercise} +*} + +section {* Constants *} + +text {* There are a few subtle issues with constants. They usually crop up when pattern matching terms or types, or when constructing them. While it is perfectly ok to write the function @{text is_true} as follows @@ -803,7 +956,7 @@ | is_all _ = false*} text {* - will not correctly match the formula @{prop[source] "\x::nat. P x"}: + will not correctly match the formula @{prop "\x::nat. P x"}: @{ML_response [display,gray] "is_all @{term \"\x::nat. P x\"}" "false"} @@ -844,7 +997,7 @@ @{ML_response [display,gray] "@{term \"Nil\"}" "Const (\"List.list.Nil\", \)"} the name of the constant @{text "Nil"} depends on the theory in which the - term constructor is defined (@{text "List"}) and also in which data type + term constructor is defined (@{text "List"}) and also in which datatype (@{text "list"}). Even worse, some constants have a name involving type-classes. Consider for example the constants for @{term "zero"} and \mbox{@{text "(op *)"}}: @@ -856,7 +1009,7 @@ While you could use the complete name, for example @{ML "Const (\"List.list.Nil\", some_type)" for some_type}, for referring to or matching against @{text "Nil"}, this would make the code rather brittle. - The reason is that the theory and the name of the data type can easily change. + The reason is that the theory and the name of the datatype can easily change. To make the code more robust, it is better to use the antiquotation @{text "@{const_name \}"}. With this antiquotation you can harness the variable parts of the constant's name. Therefore a function for @@ -869,30 +1022,17 @@ | is_nil_or_all _ = false *} text {* - \begin{readmore} - There are many functions in @{ML_file "Pure/term.ML"}, @{ML_file "Pure/logic.ML"} and - @{ML_file "HOL/Tools/hologic.ML"} that make such manual constructions of terms - and types easier.\end{readmore} - - Have a look at these files and try to solve the following two exercises: - - \begin{exercise}\label{fun:revsum} - Write a function @{text "rev_sum : term -> term"} that takes a - term of the form @{text "t\<^isub>1 + t\<^isub>2 + \ + t\<^isub>n"} (whereby @{text "n"} might be one) - and returns the reversed sum @{text "t\<^isub>n + \ + t\<^isub>2 + t\<^isub>1"}. Assume - the @{text "t\<^isub>i"} can be arbitrary expressions and also note that @{text "+"} - associates to the left. Try your function on some examples. - \end{exercise} - - \begin{exercise}\label{fun:makesum} - Write a function which takes two terms representing natural numbers - in unary notation (like @{term "Suc (Suc (Suc 0))"}), and produces the - number representing their sum. - \end{exercise} + The antiquotation for properly referencing type constants is is @{text "@{type_name \}"}. + For example + + @{ML_response [display,gray] + "@{type_name \"list\"}" "\"List.list\""} + + (FIXME: Explain the following better.) Occasionally you have to calculate what the ``base'' name of a given - constant is. For this you can use the function @{ML Sign.extern_const} or - @{ML Long_Name.base_name}. For example: + constant is. For this you can use the function @{ML [index] "Sign.extern_const"} or + @{ML [index] Long_Name.base_name}. For example: @{ML_response [display,gray] "Sign.extern_const @{theory} \"List.list.Nil\"" "\"Nil\""} @@ -904,7 +1044,11 @@ Functions about naming are implemented in @{ML_file "Pure/General/name_space.ML"}; functions about signatures in @{ML_file "Pure/sign.ML"}. \end{readmore} - +*} + +section {* Constructing Types Manually *} + +text {* Although types of terms can often be inferred, there are many situations where you need to construct types manually, especially when defining constants. For example the function returning a function @@ -912,23 +1056,30 @@ *} -ML{*fun make_fun_type tau1 tau2 = Type ("fun", [tau1, tau2]) *} - -text {* This can be equally written with the combinator @{ML "-->"} as: *} - -ML{*fun make_fun_type tau1 tau2 = tau1 --> tau2 *} +ML{*fun make_fun_type ty1 ty2 = Type ("fun", [ty1, ty2]) *} + +text {* This can be equally written with the combinator @{ML [index] "-->"} as: *} + +ML{*fun make_fun_type ty1 ty2 = ty1 --> ty2 *} text {* - A handy function for manipulating terms is @{ML map_types}: it takes a + If you want to construct a function type with more than one argument + type, then you can use @{ML [index] "--->"}. +*} + +ML{*fun make_fun_types tys ty = tys ---> ty *} + +text {* + A handy function for manipulating terms is @{ML [index] map_types}: it takes a function and applies it to every type in a term. You can, for example, change every @{typ nat} in a term into an @{typ int} using the function: *} -ML{*fun nat_to_int t = - (case t of +ML{*fun nat_to_int ty = + (case ty of @{typ nat} => @{typ int} - | Type (s, ts) => Type (s, map nat_to_int ts) - | _ => t)*} + | Type (s, tys) => Type (s, map nat_to_int tys) + | _ => ty)*} text {* Here is an example: @@ -938,7 +1089,26 @@ "Const (\"op =\", \"int \ int \ bool\") $ Free (\"a\", \"int\") $ Const (\"HOL.one_class.one\", \"int\")"} - (FIXME: a readmore about types) + If you want to obtain the list of free type-variables of a term, you + can use the function @{ML [index] add_tfrees in Term} + (similarly @{ML [index] add_tvars in Term} for the schematic type-variables). + One would expect that such functions + take a term as input and return a list of types. But their type is actually + + @{text[display] "Term.term -> (string * Term.sort) list -> (string * Term.sort) list"} + + that is they take, besides a term, also a list of type-variables as input. + So in order to obtain the list of type-variables of a term you have to + call them as follows + + @{ML_response [gray,display] + "Term.add_tfrees @{term \"(a,b)\"} []" + "[(\"'b\", [\"HOL.type\"]), (\"'a\", [\"HOL.type\"])]"} + + The reason for this definition is that @{ML add_tfrees in Term} can + be easily folded over a list of terms. Similarly for all functions + named @{text "add_"}some\_name in @{ML_file "Pure/term.ML"}. + *} @@ -949,8 +1119,8 @@ You can freely construct and manipulate @{ML_type "term"}s and @{ML_type typ}es, since they are just arbitrary unchecked trees. However, you eventually want to see if a term is well-formed, or type-checks, relative to - a theory. Type-checking is done via the function @{ML cterm_of}, which - converts a @{ML_type term} into a @{ML_type cterm}, a \emph{certified} + a theory. Type-checking is done via the function @{ML [index] cterm_of}, which + converts a @{ML_type [index] term} into a @{ML_type [index] cterm}, a \emph{certified} term. Unlike @{ML_type term}s, which are just trees, @{ML_type "cterm"}s are abstract objects that are guaranteed to be type-correct, and they can only be constructed via ``official interfaces''. @@ -1001,10 +1171,15 @@ the file @{ML_file "Pure/thm.ML"}. \end{readmore} + \begin{exercise} + Check that the function defined in Exercise~\ref{fun:revsum} returns a + result that type-checks. + \end{exercise} + Remember Isabelle follows the Church-style typing for terms, i.e., a term contains enough typing information (constants, free variables and abstractions all have typing information) so that it is always clear what the type of a term is. - Given a well-typed term, the function @{ML type_of} returns the + Given a well-typed term, the function @{ML [index] type_of} returns the type of a term. Consider for example: @{ML_response [display,gray] @@ -1019,7 +1194,7 @@ "*** Exception- TYPE (\"type_of: type mismatch in application\" \"} Since the complete traversal might sometimes be too costly and - not necessary, there is the function @{ML fastype_of}, which + not necessary, there is the function @{ML [index] fastype_of}, which also returns the type of a term. @{ML_response [display,gray] @@ -1033,15 +1208,10 @@ where no error is detected. - \begin{exercise}\label{fun:revsum typed} - Check that the function defined in Exercise~\ref{fun:revsum} returns a - result that type-checks. - \end{exercise} - Sometimes it is a bit inconvenient to construct a term with complete typing annotations, especially in cases where the typing information is redundant. A short-cut is to use the ``place-holder'' - type @{ML "dummyT"} and then let type-inference figure out the + type @{ML [index] dummyT} and then let type-inference figure out the complete type. An example is as follows: @{ML_response_fake [display,gray] @@ -1131,10 +1301,90 @@ @{ML_file "Pure/thm.ML"}. \end{readmore} - (FIXME: handy functions working on theorems, like @{ML ObjectLogic.rulify} and so on) + (FIXME: handy functions working on theorems, like @{ML [index] rulify in ObjectLogic} and so on) (FIXME: how to add case-names to goal states - maybe in the next section) + + (FIXME: example for how to add theorem styles) +*} + +ML {* +fun strip_assums_all (params, Const("all",_) $ Abs(a, T, t)) = + strip_assums_all ((a, T)::params, t) + | strip_assums_all (params, B) = (params, B) + +fun style_parm_premise i ctxt t = + let val prems = Logic.strip_imp_prems t in + if i <= length prems + then let val (params,t) = strip_assums_all([], nth prems (i - 1)) + in subst_bounds(map Free params, t) end + else error ("Not enough premises for prem" ^ string_of_int i ^ + " in propositon: " ^ Syntax.string_of_term ctxt t) + end; +*} + +ML {* +strip_assums_all ([], @{term "\x y. A x y"}) +*} + +setup {* + TermStyle.add_style "no_all_prem1" (style_parm_premise 1) #> + TermStyle.add_style "no_all_prem2" (style_parm_premise 2) +*} + + +section {* Setups (TBD) *} + +text {* + In the previous section we used \isacommand{setup} in order to make + a theorem attribute known to Isabelle. What happens behind the scenes + is that \isacommand{setup} expects a function of type + @{ML_type "theory -> theory"}: the input theory is the current theory and the + output the theory where the theory attribute has been stored. + + This is a fundamental principle in Isabelle. A similar situation occurs + for example with declaring constants. The function that declares a + constant on the ML-level is @{ML [index] add_consts_i in Sign}. + If you write\footnote{Recall that ML-code needs to be + enclosed in \isacommand{ML}~@{text "\ \ \"}.} +*} + +ML{*Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] @{theory} *} + +text {* + for declaring the constant @{text "BAR"} with type @{typ nat} and + run the code, then you indeed obtain a theory as result. But if you + query the constant on the Isabelle level using the command \isacommand{term} + + \begin{isabelle} + \isacommand{term}~@{text [quotes] "BAR"}\\ + @{text "> \"BAR\" :: \"'a\""} + \end{isabelle} + + you do not obtain a constant of type @{typ nat}, but a free variable (printed in + blue) of polymorphic type. The problem is that the ML-expression above did + not register the declaration with the current theory. This is what the command + \isacommand{setup} is for. The constant is properly declared with +*} + +setup %gray {* Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] *} + +text {* + Now + + \begin{isabelle} + \isacommand{term}~@{text [quotes] "BAR"}\\ + @{text "> \"BAR\" :: \"nat\""} + \end{isabelle} + + returns a (black) constant with the type @{typ nat}. + + A similar command is \isacommand{local\_setup}, which expects a function + of type @{ML_type "local_theory -> local_theory"}. Later on we will also + use the commands \isacommand{method\_setup} for installing methods in the + current theory and \isacommand{simproc\_setup} for adding new simprocs to + the current simpset. *} section {* Theorem Attributes *} @@ -1173,13 +1423,12 @@ ML{*val my_symmetric = Thm.rule_attribute (fn _ => fn thm => thm RS @{thm sym})*} text {* - where the function @{ML "Thm.rule_attribute"} expects a function taking a + where the function @{ML [index] rule_attribute in Thm} expects a function taking a context (which we ignore in the code above) and a theorem (@{text thm}), and returns another theorem (namely @{text thm} resolved with the theorem - @{thm [source] sym}: @{thm sym[no_vars]}).\footnote{The function @{ML RS} is explained - later on in Section~\ref{sec:simpletacs}.} The function - @{ML "Thm.rule_attribute"} then returns - an attribute. + @{thm [source] sym}: @{thm sym[no_vars]}).\footnote{The function @{ML [index] RS} + is explained in Section~\ref{sec:simpletacs}.} The function + @{ML rule_attribute in Thm} then returns an attribute. Before we can use the attribute, we need to set it up. This can be done using the Isabelle command \isacommand{attribute\_setup} as follows: @@ -1214,6 +1463,18 @@ @{text "> "}~@{thm test[my_sym]} \end{isabelle} + An alternative for setting up an attribute is the function @{ML [index] setup in Attrib}. + So instead of using \isacommand{attribute\_setup}, you can also set up the + attribute as follows: +*} + +ML{*Attrib.setup @{binding "my_sym"} (Scan.succeed my_symmetric) + "applying the sym rule" *} + +text {* + This gives a function from @{ML_type "Context.theory -> Context.theory"}, which + can be used for example with \isacommand{setup}. + As an example of a slightly more complicated theorem attribute, we implement our own version of @{text "[THEN \]"}. This attribute will take a list of theorems as argument and resolve the proved theorem with this list (one theorem @@ -1225,7 +1486,7 @@ text {* where @{ML swap} swaps the components of a pair. The setup of this theorem - attribute uses the parser @{ML Attrib.thms}, which parses a list of + attribute uses the parser @{ML thms in Attrib}, which parses a list of theorems. *} @@ -1267,10 +1528,10 @@ you get an exception indicating that the theorem @{thm [source] sym} does not resolve with meta-equations. - The purpose of @{ML Thm.rule_attribute} is to directly manipulate theorems. + The purpose of @{ML [index] rule_attribute in Thm} is to directly manipulate theorems. Another usage of theorem attributes is to add and delete theorems from stored data. For example the theorem attribute @{text "[simp]"} adds or deletes a theorem from the - current simpset. For these applications, you can use @{ML Thm.declaration_attribute}. + current simpset. For these applications, you can use @{ML [index] declaration_attribute in Thm}. To illustrate this function, let us introduce a reference containing a list of theorems. *} @@ -1297,12 +1558,11 @@ text {* These functions take a theorem and a context and, for what we are explaining here it is sufficient that they just return the context unchanged. They change - however the reference @{ML my_thms}, whereby the function @{ML Thm.add_thm} - adds a theorem if it is not already included in the list, and @{ML - Thm.del_thm} deletes one (both functions use the predicate @{ML - Thm.eq_thm_prop}, which compares theorems according to their proved - propositions modulo alpha-equivalence). - + however the reference @{ML my_thms}, whereby the function + @{ML [index] add_thm in Thm} adds a theorem if it is not already included in + the list, and @{ML [index] del_thm in Thm} deletes one (both functions use the + predicate @{ML [index] eq_thm_prop in Thm}, which compares theorems according to + their proved propositions modulo alpha-equivalence). You can turn functions @{ML my_thm_add} and @{ML my_thm_del} into attributes with the code @@ -1319,7 +1579,7 @@ "maintaining a list of my_thms - rough test only!" text {* - The parser @{ML Attrib.add_del} is a pre-defined parser for + The parser @{ML [index] add_del in Attrib} is a pre-defined parser for adding and deleting lemmas. Now if you prove the next lemma and attach to it the attribute @{text "[my_thms]"} *} @@ -1347,7 +1607,7 @@ "[\"True\", \"Suc (Suc 0) = 2\"]"} The theorem @{thm [source] trueI_2} only appears once, since the - function @{ML Thm.add_thm} tests for duplicates, before extending + function @{ML [index] add_thm in Thm} tests for duplicates, before extending the list. Deletion from the list works as follows: *} @@ -1359,7 +1619,7 @@ "!my_thms" "[\"True\"]"} - We used in this example two functions declared as @{ML Thm.declaration_attribute}, + We used in this example two functions declared as @{ML [index] declaration_attribute in Thm}, but there can be any number of them. We just have to change the parser for reading the arguments accordingly. @@ -1369,7 +1629,7 @@ @{text GenericDataFun}: *} -ML {*structure MyThmsData = GenericDataFun +ML{*structure MyThmsData = GenericDataFun (type T = thm list val empty = [] val extend = I @@ -1423,7 +1683,7 @@ is important to store data in a ``data slot'' and \emph{not} in a reference. Backtrack to the point just before the lemma @{thm [source] three} was proved and - check the the content of @{ML_struct "MyThmsData"}: it should be empty. + check the the content of @{ML_struct MyThmsData}: it should be empty. The addition has been properly retracted. Now consider the proof: *} @@ -1442,11 +1702,11 @@ properly treats ``data slots''! Since storing theorems in a list is such a common task, there is the special - functor @{text NamedThmsFun}, which does most of the work for you. To obtain + functor @{ML_functor Named_Thms}, which does most of the work for you. To obtain a named theorem lists, you just declare *} -ML{*structure FooRules = NamedThmsFun +ML{*structure FooRules = Named_Thms (val name = "foo" val description = "Rules for foo") *} @@ -1489,9 +1749,7 @@ @{ML_response_fake [display,gray] "FooRules.get @{context}" "[\"?C\",\"?B\"]"} \begin{readmore} - For more information see @{ML_file "Pure/Tools/named_thms.ML"} and also - the recipe in Section~\ref{recipe:storingdata} about storing arbitrary - data. + For more information see @{ML_file "Pure/Tools/named_thms.ML"}. \end{readmore} (FIXME What are: @{text "theory_attributes"}, @{text "proof_attributes"}?) @@ -1505,58 +1763,6 @@ *} -section {* Setups (TBD) *} - -text {* - In the previous section we used \isacommand{setup} in order to make - a theorem attribute known to Isabelle. What happens behind the scenes - is that \isacommand{setup} expects a function of type - @{ML_type "theory -> theory"}: the input theory is the current theory and the - output the theory where the theory attribute has been stored. - - This is a fundamental principle in Isabelle. A similar situation occurs - for example with declaring constants. The function that declares a - constant on the ML-level is @{ML Sign.add_consts_i}. - If you write\footnote{Recall that ML-code needs to be - enclosed in \isacommand{ML}~@{text "\ \ \"}.} -*} - -ML{*Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] @{theory} *} - -text {* - for declaring the constant @{text "BAR"} with type @{typ nat} and - run the code, then you indeed obtain a theory as result. But if you - query the constant on the Isabelle level using the command \isacommand{term} - - \begin{isabelle} - \isacommand{term}~@{text [quotes] "BAR"}\\ - @{text "> \"BAR\" :: \"'a\""} - \end{isabelle} - - you do not obtain a constant of type @{typ nat}, but a free variable (printed in - blue) of polymorphic type. The problem is that the ML-expression above did - not register the declaration with the current theory. This is what the command - \isacommand{setup} is for. The constant is properly declared with -*} - -setup %gray {* Sign.add_consts_i [(@{binding "BAR"}, @{typ "nat"}, NoSyn)] *} - -text {* - Now - - \begin{isabelle} - \isacommand{term}~@{text [quotes] "BAR"}\\ - @{text "> \"BAR\" :: \"nat\""} - \end{isabelle} - - returns a (black) constant with the type @{typ nat}. - - A similar command is \isacommand{local\_setup}, which expects a function - of type @{ML_type "local_theory -> local_theory"}. Later on we will also - use the commands \isacommand{method\_setup} for installing methods in the - current theory and \isacommand{simproc\_setup} for adding new simprocs to - the current simpset. -*} section {* Theories, Contexts and Local Theories (TBD) *} @@ -1629,7 +1835,7 @@ case realOut (!r) of NONE => "NONE" | SOME x => Real.toString x - val () = warning (concat [s1, " ", s2, " ", s3, "\n"]) + val () = writeln (concat [s1, " ", s2, " ", s3, "\n"]) end*} ML_val{*structure t = Test(U) *} @@ -1642,8 +1848,11 @@ section {* Storing Theorems\label{sec:storing} (TBD) *} -text {* @{ML PureThy.add_thms_dynamic} *} - +text {* @{ML [index] add_thms_dynamic in PureThy} *} + +local_setup {* + LocalTheory.note Thm.theoremK + ((@{binding "allI_alt"}, []), [@{thm allI}]) #> snd *} (* FIXME: some code below *) @@ -1674,21 +1883,306 @@ *) (*>*) -section {* Pretty-Printing (TBD) *} +section {* Pretty-Printing\label{sec:pretty} *} + +text {* + So far we printed out only plain strings without any formatting except for + occasional explicit linebreaks using @{text [quotes] "\\n"}. This is + sufficient for ``quick-and-dirty'' printouts. For something more + sophisticated, Isabelle includes an infrastructure for properly formatting text. + This infrastructure is loosely based on a paper by Oppen~\cite{Oppen80}. Most of + its functions do not operate on @{ML_type string}s, but on instances of the + type: + + @{ML_type [display, gray, index] "Pretty.T"} + + The function @{ML str in Pretty} transforms a (plain) string into such a pretty + type. For example + + @{ML_response_fake [display,gray] + "Pretty.str \"test\"" "String (\"test\", 4)"} + + where the result indicates that we transformed a string with length 4. Once + you have a pretty type, you can, for example, control where linebreaks may + occur in case the text wraps over a line, or with how much indentation a + text should be printed. However, if you want to actually output the + formatted text, you have to transform the pretty type back into a @{ML_type + string}. This can be done with the function @{ML [index] string_of in Pretty}. In what + follows we will use the following wrapper function for printing a pretty + type: +*} + +ML{*fun pprint prt = writeln (Pretty.string_of prt)*} + +text {* + The point of the pretty-printing infrastructure is to give hints about how to + layout text and let Isabelle do the actual layout. Let us first explain + how you can insert places where a linebreak can occur. For this assume the + following function that replicates a string n times: +*} + +ML{*fun rep n str = implode (replicate n str) *} + +text {* + and suppose we want to print out the string: +*} + +ML{*val test_str = rep 8 "fooooooooooooooobaaaaaaaaaaaar "*} text {* - Isabelle has a pretty sphisticated pretty printing module. + We deliberately chosed a large string so that is spans over more than one line. + If we print out the string using the usual ``quick-and-dirty'' method, then + we obtain the ugly output: + +@{ML_response_fake [display,gray] +"writeln test_str" +"fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar foooooooooo +ooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaa +aaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fo +oooooooooooooobaaaaaaaaaaaar"} + + We obtain the same if we just use + +@{ML_response_fake [display,gray] +"pprint (Pretty.str test_str)" +"fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar foooooooooo +ooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaa +aaaaaaar fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar fo +oooooooooooooobaaaaaaaaaaaar"} + + However by using pretty types you have the ability to indicate a possible + linebreak for example at each space. You can achieve this with the function + @{ML [index] breaks in Pretty}, which expects a list of pretty types and inserts a + possible linebreak in between every two elements in this list. To print + this list of pretty types as a single string, we concatenate them + with the function @{ML [index] blk in Pretty} as follows: + + +@{ML_response_fake [display,gray] +"let + val ptrs = map Pretty.str (space_explode \" \" test_str) +in + pprint (Pretty.blk (0, Pretty.breaks ptrs)) +end" +"fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar +fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar +fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar +fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar"} + + Here the layout of @{ML test_str} is much more pleasing to the + eye. The @{ML "0"} in @{ML [index] blk in Pretty} stands for no indentation + of the printed string. You can increase the indentation and obtain + +@{ML_response_fake [display,gray] +"let + val ptrs = map Pretty.str (space_explode \" \" test_str) +in + pprint (Pretty.blk (3, Pretty.breaks ptrs)) +end" +"fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar"} + + where starting from the second line the indent is 3. If you want + that every line starts with the same indent, you can use the + function @{ML [index] indent in Pretty} as follows: + +@{ML_response_fake [display,gray] +"let + val ptrs = map Pretty.str (space_explode \" \" test_str) +in + pprint (Pretty.indent 10 (Pretty.blk (0, Pretty.breaks ptrs))) +end" +" fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar + fooooooooooooooobaaaaaaaaaaaar fooooooooooooooobaaaaaaaaaaaar"} + + If you want to print out a list of items separated by commas and + have the linebreaks handled properly, you can use the function + @{ML [index] commas in Pretty}. For example + +@{ML_response_fake [display,gray] +"let + val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020) +in + pprint (Pretty.blk (0, Pretty.commas ptrs)) +end" +"99998, 99999, 100000, 100001, 100002, 100003, 100004, 100005, 100006, +100007, 100008, 100009, 100010, 100011, 100012, 100013, 100014, 100015, +100016, 100017, 100018, 100019, 100020"} + + where @{ML upto} generates a list of integers. You can print out this + list as an ``set'', that means enclosed inside @{text [quotes] "{"} and + @{text [quotes] "}"}, and separated by commas using the function + @{ML [index] enum in Pretty}. For example *} text {* - @{ML Pretty.big_list}, - @{ML Pretty.brk}, - @{ML Pretty.block}, - @{ML Pretty.chunks} + +@{ML_response_fake [display,gray] +"let + val ptrs = map (Pretty.str o string_of_int) (99998 upto 100020) +in + pprint (Pretty.enum \",\" \"{\" \"}\" ptrs) +end" +"{99998, 99999, 100000, 100001, 100002, 100003, 100004, 100005, 100006, + 100007, 100008, 100009, 100010, 100011, 100012, 100013, 100014, 100015, + 100016, 100017, 100018, 100019, 100020}"} + + As can be seen, this function prints out the ``set'' so that starting + from the second, each new line as an indentation of 2. + + If you print out something that goes beyond the capabilities of the + standard functions, you can do realatively easily the formating + yourself. Assume you want to print out a list of items where like in ``English'' + the last two items are separated by @{text [quotes] "and"}. For this you can + write the function + +*} + +ML %linenosgray{*fun and_list [] = [] + | and_list [x] = [x] + | and_list xs = + let + val (front, last) = split_last xs + in + (Pretty.commas front) @ + [Pretty.brk 1, Pretty.str "and", Pretty.brk 1, last] + end *} + +text {* + where Line 7 prints the beginning of the list and Line + 8 the last item. We have to use @{ML "Pretty.brk 1"} in order + to insert a space (of length 1) before the + @{text [quotes] "and"}. This space is also a pleace where a linebreak + can occur. We do the same ater the @{text [quotes] "and"}. This gives you + for example + +@{ML_response_fake [display,gray] +"let + val ptrs = map (Pretty.str o string_of_int) (1 upto 22) +in + pprint (Pretty.blk (0, and_list ptrs)) +end" +"1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21 +and 22"} + + Next we like to pretty-print a term and its type. For this we use the + function @{text "tell_type"}. *} +ML %linenosgray{*fun tell_type ctxt t = +let + fun pstr s = Pretty.breaks (map Pretty.str (space_explode " " s)) + val ptrm = Pretty.quote (Syntax.pretty_term ctxt t) + val pty = Pretty.quote (Syntax.pretty_typ ctxt (fastype_of t)) +in + pprint (Pretty.blk (0, + (pstr "The term " @ [ptrm] @ pstr " has type " + @ [pty, Pretty.str "."]))) +end*} + +text {* + In Line 3 we define a function that inserts possible linebreaks in places + where a space is. In Lines 4 and 5 we pretty-print the term and its type + using the functions @{ML [index] pretty_term in Syntax} and @{ML [index] + pretty_typ in Syntax}. We also use the function @{ML [index] quote in + Pretty} in order to enclose the term and type inside quotation marks. In + Line 9 we add a period right after the type without the possibility of a + linebreak, because we do not want that a linebreak occurs there. + + + Now you can write + + @{ML_response_fake [display,gray] + "tell_type @{context} @{term \"min (Suc 0)\"}" + "The term \"min (Suc 0)\" has type \"nat \ nat\"."} + + To see the proper linebreaking, you can try out the function on a bigger term + and type. For example: + + @{ML_response_fake [display,gray] + "tell_type @{context} @{term \"op = (op = (op = (op = (op = op =))))\"}" + "The term \"op = (op = (op = (op = (op = op =))))\" has type +\"((((('a \ 'a \ bool) \ bool) \ bool) \ bool) \ bool) \ bool\"."} + + + FIXME: TBD below +*} + +ML {* pprint (Pretty.big_list "header" (map (Pretty.str o string_of_int) (4 upto 10))) *} + +text {* +chunks inserts forced breaks (unlike blk where you have to do this yourself) +*} + +ML {* (Pretty.chunks [Pretty.str "a", Pretty.str "b"], + Pretty.blk (0, [Pretty.str "a", Pretty.str "b"])) *} + +ML {* +fun setmp_show_all_types f = + setmp show_all_types + (! show_types orelse ! show_sorts orelse ! show_all_types) f; + +val ctxt = @{context}; +val t1 = @{term "undefined::nat"}; +val t2 = @{term "Suc y"}; +val pty = Pretty.block (Pretty.breaks + [(setmp show_question_marks false o setmp_show_all_types) + (Syntax.pretty_term ctxt) t1, + Pretty.str "=", Syntax.pretty_term ctxt t2]); +pty |> Pretty.string_of |> priority +*} + +text {* the infrastructure of Syntax-pretty-term makes sure it is printed nicely *} + + +ML {* Pretty.mark Markup.hilite (Pretty.str "foo") |> Pretty.string_of |> writeln *} +ML {* (Pretty.str "bar") |> Pretty.string_of |> writeln *} + + +ML {* Pretty.mark Markup.subgoal (Pretty.str "foo") |> Pretty.string_of |> writeln *} +ML {* (Pretty.str "bar") |> Pretty.string_of |> writeln *} + +text {* + Still to be done: + + What happens with big formulae? + + \begin{readmore} + The general infrastructure for pretty-printing is implemented in the file + @{ML_file "Pure/General/pretty.ML"}. The file @{ML_file "Pure/Syntax/syntax.ML"} + contains pretty-printing functions for terms, types, theorems and so on. + + @{ML_file "Pure/General/markup.ML"} + \end{readmore} +*} + +text {* + printing into the goal buffer as part of the proof state +*} + + +ML {* Pretty.mark Markup.hilite (Pretty.str "foo") |> Pretty.string_of |> writeln *} +ML {* (Pretty.str "bar") |> Pretty.string_of |> writeln *} + +text {* writing into the goal buffer *} + +ML {* fun my_hook interactive state = + (interactive ? Proof.goal_message (fn () => Pretty.str +"foo")) state +*} + +setup {* Context.theory_map (Specification.add_theorem_hook my_hook) *} + +lemma "False" +oops + + section {* Misc (TBD) *} -ML {*DatatypePackage.get_datatype @{theory} "List.list"*} +ML {*Datatype.get_info @{theory} "List.list"*} end diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Intro.thy --- a/ProgTutorial/Intro.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Intro.thy Tue Jul 21 12:49:26 2009 +0200 @@ -10,7 +10,7 @@ Isabelle programming, and also explain tricks of the trade. The best way to get to know the ML-level of Isabelle is by experimenting with the many code examples included in the tutorial. The code is as far as possible checked - against \input{version}. If something does not work, + against the Isabelle distribution.\footnote{\input{version}} If something does not work, then please let us know. It is impossible for us to know every environment, operating system or editor in which Isabelle is used. If you have comments, criticism or like to add to the tutorial, please feel free---you are most @@ -60,14 +60,19 @@ things really work. Therefore you should not hesitate to look at the way things are actually implemented. More importantly, it is often good to look at code that does similar things as you want to do and - learn from it. The GNU/UNIX command \mbox{@{text "grep -R"}} is + learn from it. The UNIX command \mbox{@{text "grep -R"}} is often your best friend while programming with Isabelle, or - hypersearch if you program using jEdit under MacOSX. + hypersearch if you program using jEdit under MacOSX. To understand the sources, + it is often also necessary to track the change history of a file or + files. The Mercurial repository\footnote{\url{http://isabelle.in.tum.de/repos/isabelle/}} + for Isabelle provides convenient interfaces to query the history of + files and ``change sets''. \end{description} + *} -section {* Typographic Conventions in the Tutorial *} +section {* Typographic Conventions *} text {* @@ -119,11 +124,50 @@ to solve the exercises on your own, and then look at the solutions. *} -section {* Naming Conventions in the Isabelle Sources *} +section {* Aaaaargh! My Code Does not Work Anymore *} text {* - There are a few naming conventions in Isabelle that might aid reading - and writing code. (Remember that code is written once, but read numerous + One unpleasant aspect of any code development inside a larger system is that + one has to aim at a ``moving target''. The Isabelle system is no exception. Every + update lets potentially all hell break loose, because other developers have + changed code you are relying on. Cursing is somewhat helpful in such situations, + but taking the view that incompatible code changes are a fact of life + might be more gratifying. Isabelle is a research project. In most circumstances + it is just impossible to make research backward compatible (imagine Darwin + attempting to make the Theory of Evolution backward compatible). + + However, there are a few steps you can take to mitigate unwanted interferences + with code changes from other developers. First, you can base your code on the latest + stable release of Isabelle (it is aimed to have one such release at least + once every year). This + might cut you off from the latest feature + implemented in Isabelle, but at least you do not have to track side-steps + or dead-ends in the Isabelle development. Of course this means also you have + to synchronise your code at the next stable release. Another possibility + is to get your code into the Isabelle distribution. For this you have + to convince other developers that your code or project is of general interest. + If you managed to do this, then the problem of the moving target goes + away, because when checking in code, developers are strongly urged to + test against Isabelle's code base. If your project is part of that code base, + then maintenance is done by others. Unfortunately, this might not + be a helpful advice for all types of projects. A lower threshold for inclusion has the + Archive of Formalised Proofs, short AFP.\footnote{\url{http://afp.sourceforge.net/}} + This archive has been created mainly for formalisations that are + interesting but not necessarily of general interest. If you have ML-code as + part of a formalisation, then this might be the right place for you. There + is no problem with updating your code after submission. At the moment + developers are not as diligent with checking their code against the AFP than + with checking agains the distribution, but generally problems will be caught + and the developer, who caused them, is expected to fix them. So also + in this case code maintenance is done for you. + +*} + +section {* Some Naming Conventions in the Isabelle Sources *} + +text {* + There are a few naming conventions in the Isabelle code that might aid reading + and writing code. (Remember that code is written once, but read many times.) The most important conventions are: \begin{itemize} @@ -137,6 +181,7 @@ \item @{text lthy} for local theories; ML-type: @{ML_type local_theory} \item @{text context} for generic contexts; ML-type @{ML_type Context.generic} \item @{text mx} for mixfix syntax annotations; ML-type @{ML_type mixfix} + \item @{text prt} for pretty printing; ML-type @{ML_type Pretty.T} \end{itemize} *} @@ -150,10 +195,12 @@ \begin{itemize} \item {\bf Stefan Berghofer} wrote nearly all of the ML-code of the \simpleinductive-package and the code for the @{text - "chunk"}-antiquotation. He also wrote the first version of the chapter + "chunk"}-antiquotation. He also wrote the first version of this chapter describing the package and has been helpful \emph{beyond measure} with answering questions about Isabelle. + \item {\bf Jasmin Blanchette} helped greatly with section \ref{sec:pretty}. + \item {\bf Sascha Böhme} contributed the recipes in \ref{rec:timeout}, \ref{rec:config}, \ref{rec:storingdata}, \ref{rec:external} and \ref{rec:oracle}. He also wrote section \ref{sec:conversion} and helped with recipe \ref{rec:timing}. @@ -167,7 +214,7 @@ chapter and also contributed the material on @{text NamedThmsFun}. \item {\bf Christian Sternagel} proofread the tutorial and made - comments on the text. + many comments on the text. \end{itemize} Please let me know of any omissions. Responsibility for any remaining diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_Code.thy --- a/ProgTutorial/Package/Ind_Code.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_Code.thy Tue Jul 21 12:49:26 2009 +0200 @@ -21,7 +21,7 @@ and then ``register'' the definition inside a local theory. To do the latter, we use the following wrapper for the function - @{ML LocalTheory.define}. The wrapper takes a predicate name, a syntax + @{ML [index] define in LocalTheory}. The wrapper takes a predicate name, a syntax annotation and a term representing the right-hand side of the definition. *} @@ -35,12 +35,12 @@ text {* It returns the definition (as a theorem) and the local theory in which the - definition has been made. In Line 4, @{ML internalK in Thm} is a flag - attached to the theorem (others possibile flags are @{ML definitionK in Thm} - and @{ML axiomK in Thm}). These flags just classify theorems and have no + definition has been made. In Line 4, @{ML [index] internalK in Thm} is a flag + attached to the theorem (others possibile flags are @{ML [index] definitionK in Thm} + and @{ML [index] axiomK in Thm}). These flags just classify theorems and have no significant meaning, except for tools that, for example, find theorems in the theorem database.\footnote{FIXME: put in the section about theorems.} We - also use @{ML empty_binding in Attrib} in Line 3, since the definitions for + also use @{ML [index] empty_binding in Attrib} in Line 3, since the definitions for our inductive predicates are not meant to be seen by the user and therefore do not need to have any theorem attributes. A testcase for this function is *} @@ -50,7 +50,7 @@ val arg = ((@{binding "My_True"}, NoSyn), @{term True}) val (def, lthy') = make_defn arg lthy in - warning (str_of_thm_no_vars lthy' def); lthy' + writeln (string_of_thm_no_vars lthy' def); lthy' end *} text {* @@ -117,7 +117,7 @@ let val def = defn_aux lthy eo_orules eo_preds (e_pred, e_arg_tys) in - warning (Syntax.string_of_term lthy def); lthy + writeln (Syntax.string_of_term lthy def); lthy end *} text {* @@ -133,7 +133,7 @@ *} local_setup %gray{* fn lthy => - (warning (Syntax.string_of_term lthy + (writeln (Syntax.string_of_term lthy (defn_aux lthy fresh_orules [fresh_pred] (fresh_pred, fresh_arg_tys))); lthy) *} @@ -170,7 +170,7 @@ into the object logic (since definitions cannot be stated with meta-connectives). To do this transformation we have to obtain the theory behind the local theory (Line 3); with this theory we can use the function - @{ML ObjectLogic.atomize_term} to make the transformation (Line 4). The call + @{ML [index] atomize_term in ObjectLogic} to make the transformation (Line 4). The call to @{ML defn_aux} in Line 5 produces all right-hand sides of the definitions. The actual definitions are then made in Line 7. The result of the function is a list of theorems and a local theory (the theorems are @@ -182,7 +182,7 @@ val (defs, lthy') = defns eo_rules eo_preds eo_prednames eo_mxs eo_arg_tyss lthy in - warning (str_of_thms_no_vars lthy' defs); lthy + writeln (string_of_thms_no_vars lthy' defs); lthy end *} text {* @@ -391,7 +391,7 @@ val intro = prove_ind lthy eo_defs srules cnewpreds ((e_pred, newpred), e_arg_tys) in - warning (str_of_thm lthy intro); lthy + writeln (string_of_thm lthy intro); lthy end *} text {* @@ -437,7 +437,7 @@ the new predicates. In Line 8, we construct the types of these new predicates using the given argument types. Next we turn them into terms and subsequently certify them (Line 9 and 10). We can now produce the substituted introduction rules - (Line 11) using the function @{ML subst_free}. Line 14 and 15 just iterate + (Line 11) using the function @{ML [index] subst_free}. Line 14 and 15 just iterate the proofs for all predicates. From this we obtain a list of theorems. Finally we need to export the fixed variables @{text "Ps"} to obtain the schematic variables @{text "?Ps"} @@ -450,7 +450,7 @@ let val ind_thms = inds eo_rules eo_defs eo_preds eo_arg_tyss lthy in - warning (str_of_thms lthy ind_thms); lthy + writeln (string_of_thms lthy ind_thms); lthy end *} @@ -516,7 +516,7 @@ val ctrms = [@{cterm \"a::nat\"}, @{cterm \"b::nat\"}, @{cterm \"c::nat\"}] val new_thm = all_elims ctrms @{thm all_elims_test} in - warning (str_of_thm_no_vars @{context} new_thm) + writeln (string_of_thm_no_vars @{context} new_thm) end" "P a b c"} @@ -529,7 +529,7 @@ @{thm [source] imp_elims_test}: @{ML_response_fake [display, gray] -"warning (str_of_thm_no_vars @{context} +"writeln (string_of_thm_no_vars @{context} (imp_elims @{thms imp_elims_test'} @{thm imp_elims_test}))" "C"} @@ -579,7 +579,7 @@ differently. In the code below we will therefore separate them into @{text "params1"} and @{text params2}, respectively @{text "prems1"} and @{text "prems2"}. To do this separation, it is best to open a subproof with the - tactic @{ML SUBPROOF}, since this tactic provides us with the parameters (as + tactic @{ML [index] SUBPROOF}, since this tactic provides us with the parameters (as list of @{ML_type cterm}s) and the assumptions (as list of @{ML_type thm}s). The problem we have to overcome with @{ML SUBPROOF} is, however, that this tactic always expects us to completely discharge the @@ -604,14 +604,14 @@ *} ML{*fun chop_print params1 params2 prems1 prems2 ctxt = let - val s = ["Params1 from the rule:", str_of_cterms ctxt params1] - @ ["Params2 from the predicate:", str_of_cterms ctxt params2] - @ ["Prems1 from the rule:"] @ (map (str_of_thm ctxt) prems1) - @ ["Prems2 from the predicate:"] @ (map (str_of_thm ctxt) prems2) + val s = ["Params1 from the rule:", string_of_cterms ctxt params1] + @ ["Params2 from the predicate:", string_of_cterms ctxt params2] + @ ["Prems1 from the rule:"] @ (map (string_of_thm ctxt) prems1) + @ ["Prems2 from the predicate:"] @ (map (string_of_thm ctxt) prems2) in s |> separate "\n" |> implode - |> warning + |> writeln end*} text_raw{* \end{isabelle} @@ -627,7 +627,7 @@ going in our example, we will print out these values using the printing function in Figure~\ref{fig:chopprint}. Since the tactic @{ML SUBPROOF} will supply us the @{text "params"} and @{text "prems"} as lists, we can - separate them using the function @{ML chop}. + separate them using the function @{ML [index] chop}. *} ML{*fun chop_test_tac preds rules = @@ -691,7 +691,7 @@ To use this premise with @{ML rtac}, we need to instantiate its quantifiers (with @{text params1}) and transform it into rule - format (using @{ML "ObjectLogic.rulify"}. So we can modify the + format (using @{ML [index] rulify in ObjectLogic}. So we can modify the subproof as follows: *} @@ -854,7 +854,7 @@ In Line 4 we use the @{text prems} from the @{ML SUBPROOF} and resolve them with @{text prem}. In the simple cases, that is where the @{text prem} comes from a non-recursive premise of the rule, @{text prems} will be - just the empty list and the function @{ML MRS} does nothing. Similarly, in the + just the empty list and the function @{ML [index] MRS} does nothing. Similarly, in the cases where the recursive premises of the rule do not have preconditions. In case there are preconditions, then Line 4 discharges them. After that we can proceed as before, i.e., check whether the outermost @@ -932,7 +932,7 @@ end*} text {* - The iteration is done with the function @{ML map_index} since we + The iteration is done with the function @{ML [index] map_index} since we need the introduction rule together with its number (counted from @{text 0}). This completes the code for the functions deriving the reasoning infrastructure. It remains to implement some administrative @@ -945,7 +945,7 @@ We have produced various theorems (definitions, induction principles and introduction rules), but apart from the definitions, we have not yet registered them with the theorem database. This is what the functions - @{ML LocalTheory.note} does. + @{ML [index] note in LocalTheory} does. For convenience, we use the following @@ -1027,7 +1027,7 @@ Line 20 add further every introduction rule under its own name (given by the user).\footnote{FIXME: what happens if the user did not give any name.} Line 21 registers the induction principles. For this we have - to use some specific attributes. The first @{ML "case_names" in RuleCases} + to use some specific attributes. The first @{ML [index] case_names in RuleCases} corresponds to the case names that are used by Isar to reference the proof obligations in the induction. The second @{ML "consumes 1" in RuleCases} indicates that the first premise of the induction principle (namely diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_Extensions.thy --- a/ProgTutorial/Package/Ind_Extensions.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_Extensions.thy Tue Jul 21 12:49:26 2009 +0200 @@ -184,10 +184,16 @@ The standard inductive package is based on least fix-points. It allows more general introduction rules that can include any monotone operators and also provides a richer reasoning infrastructure. The code of this package can be found in - @{ML_file "HOL/Tools/inductive_package.ML"}. + @{ML_file "HOL/Tools/inductive.ML"}. \end{readmore} *} +section {* Definitional Packages *} + +text {* Type declarations *} + +ML{*Typedef.add_typedef false NONE (@{binding test},[],NoSyn) + @{term "{1}::nat set"} NONE (simp_tac @{simpset} 1) @{theory} *} (*<*)end(*>*) \ No newline at end of file diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_General_Scheme.thy --- a/ProgTutorial/Package/Ind_General_Scheme.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_General_Scheme.thy Tue Jul 21 12:49:26 2009 +0200 @@ -1,11 +1,44 @@ theory Ind_General_Scheme -imports Ind_Interface +imports "../Base" Simple_Inductive_Package begin +(*<*) +simple_inductive + trcl :: "('a \ 'a \ bool) \ 'a \ 'a \ bool" +where + base: "trcl R x x" +| step: "trcl R x y \ R y z \ trcl R x z" + +simple_inductive + even and odd +where + even0: "even 0" +| evenS: "odd n \ even (Suc n)" +| oddS: "even n \ odd (Suc n)" + +simple_inductive + accpart :: "('a \ 'a \ bool) \ 'a \ bool" +where + accpartI: "(\y. R y x \ accpart R y) \ accpart R x" + +datatype trm = + Var "string" +| App "trm" "trm" +| Lam "string" "trm" + +simple_inductive + fresh :: "string \ trm \ bool" +where + fresh_var: "a\b \ fresh a (Var b)" +| fresh_app: "\fresh a t; fresh a s\ \ fresh a (App t s)" +| fresh_lam1: "fresh a (Lam a t)" +| fresh_lam2: "\a\b; fresh a t\ \ fresh a (Lam b t)" +(*>*) + + section {* The Code in a Nutshell\label{sec:nutshell} *} - text {* The inductive package will generate the reasoning infrastructure for mutually recursive predicates @{text "pred\<^isub>1\pred\<^isub>n"}. In what diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_Interface.thy --- a/ProgTutorial/Package/Ind_Interface.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_Interface.thy Tue Jul 21 12:49:26 2009 +0200 @@ -1,5 +1,5 @@ theory Ind_Interface -imports "../Base" "../Parsing" Simple_Inductive_Package +imports "../Base" Simple_Inductive_Package begin section {* Parsing and Typing the Specification\label{sec:interface} *} @@ -46,7 +46,7 @@ \end{boxedminipage} \caption{Specification given by the user for the inductive predicates @{term "trcl"}, @{term "even"} and @{term "odd"}, @{term "accpart"} and -@{text "fresh"}.\label{fig:specs}} +@{term "fresh"}.\label{fig:specs}} \end{figure} *} @@ -120,7 +120,9 @@ accpart' where accpartI: "(\y. R y x \ accpart' y) \ accpart' x" - +(*<*)ML %no{*fun filtered_input str = + filter OuterLex.is_proper (OuterSyntax.scan Position.none str) +fun parse p input = Scan.finite OuterLex.stopper (Scan.error p) input*}(*>*) text {* Note that in these definitions the parameter @{text R}, standing for the relation, is left implicit. For the moment we will ignore this kind @@ -149,28 +151,22 @@ then we get back the specifications of the predicates (with type and syntax annotations), and specifications of the introduction rules. This is all the information we need for calling the package and setting up the keyword. The latter is - done in Lines 5 to 7 in the code below. + done in Lines 5 to 7 in the code below.\footnote{FIXME: Is there a way to state + here @{text "simple_inductive"}?} *} - -(*<*)ML %linenos{*fun add_inductive pred_specs rule_specs lthy = lthy -fun add_inductive_cmd pred_specs rule_specs lthy = -let - val ((pred_specs', rule_specs'), _) = - Specification.read_spec pred_specs rule_specs lthy -in - add_inductive pred_specs' rule_specs' lthy -end*}(*>*) +(*<*)ML %no{*fun add_inductive_cmd pred_specs rule_specs lthy = lthy + fun add_inductive pred_specs rule_specs lthy = lthy*}(*>*) ML_val %linenosgray{*val specification = spec_parser >> (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs) -val _ = OuterSyntax.local_theory "simple_inductive" +val _ = OuterSyntax.local_theory "simple_inductive2" "definition of simple inductive predicates" OuterKeyword.thy_decl specification*} text {* - We call @{ML local_theory in OuterSyntax} with the kind-indicator - @{ML thy_decl in OuterKeyword} since the package does not need to open + We call @{ML [index] local_theory in OuterSyntax} with the kind-indicator + @{ML [index] thy_decl in OuterKeyword} since the package does not need to open up any proof (see Section~\ref{sec:newcommand}). The auxiliary function @{text specification} in Lines 1 to 3 gathers the information from the parser to be processed further @@ -182,7 +178,7 @@ eventually will be). Also the introduction rules are just strings. What we have to do first is to transform the parser's output into some internal datastructures that can be processed further. For this we can use the - function @{ML read_spec in Specification}. This function takes some strings + function @{ML [index] read_spec in Specification}. This function takes some strings (with possible typing annotations) and some rule specifications, and attempts to find a typing according to the given type constraints given by the user and the type constraints by the ``ambient'' theory. It returns @@ -191,7 +187,7 @@ @{text "add_inductive_cmd"} is a call to @{ML read_spec in Specification}. *} -ML{*fun add_inductive_cmd pred_specs rule_specs lthy = +ML_val{*fun add_inductive_cmd pred_specs rule_specs lthy = let val ((pred_specs', rule_specs'), _) = Specification.read_spec pred_specs rule_specs lthy @@ -199,11 +195,9 @@ add_inductive pred_specs' rule_specs' lthy end*} -ML {* Specification.read_spec *} - text {* Once we have the input data as some internal datastructure, we call - the function @{ML add_inductive}. This function does the heavy duty + the function @{text add_inductive}. This function does the heavy duty lifting in the package: it generates definitions for the predicates and derives from them corresponding induction principles and introduction rules. The description of this function will span over diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_Intro.thy --- a/ProgTutorial/Package/Ind_Intro.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_Intro.thy Tue Jul 21 12:49:26 2009 +0200 @@ -17,13 +17,13 @@ \medskip HOL is based on just a few primitive constants, like equality and implication, whose properties are described by axioms. All other concepts, - such as inductive predicates, datatypes or recursive functions, have to be defined - in terms of those constants, and the desired properties, for example - induction theorems or recursion equations, have to be derived from the definitions + such as inductive predicates, datatypes or recursive functions, are defined + in terms of those primitives, and the desired properties, for example + induction theorems or recursion equations, are derived from the definitions by a formal proof. Since it would be very tedious for a user to define complex inductive predicates or datatypes ``by hand'' just using the primitive operators of higher order logic, \emph{definitional packages} have - been implemented automating such work. Thanks to those packages, the user + been implemented to automate such work. Thanks to those packages, the user can give a high-level specification, for example a list of introduction rules or constructors, and the package then does all the low-level definitions and proofs behind the scenes. In this chapter we explain how @@ -32,11 +32,11 @@ As the running example we have chosen a rather simple package for defining inductive predicates. To keep things really simple, we will not use the general Knaster-Tarski fixpoint theorem on complete lattices, which forms - the basis of Isabelle's standard inductive definition package. Instead, we + the basis of Isabelle/HOL's standard inductive definition package. Instead, we will describe a simpler \emph{impredicative} (i.e.\ involving quantification on predicate variables) encoding of inductive predicates. Due to its simplicity, this package will necessarily have a reduced functionality. It - does neither support introduction rules involving arbitrary monotone + does neither support introduction rules involving arbitrary monotonic operators, nor does it prove case analysis rules (also called inversion rules). Moreover, it only proves a weaker form of the induction principle for inductive predicates. diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Ind_Prelims.thy --- a/ProgTutorial/Package/Ind_Prelims.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Ind_Prelims.thy Tue Jul 21 12:49:26 2009 +0200 @@ -1,5 +1,5 @@ theory Ind_Prelims -imports Main LaTeXsugar "../Base" +imports Main "../Base" begin section{* Preliminaries *} @@ -15,9 +15,7 @@ we will figure out a general method for defining inductive predicates. The aim in this section is \emph{not} to write proofs that are as beautiful as possible, but as close as possible to - the ML-code we will develop in later sections. - - + the ML-implementation we will develop in later sections. We first consider the transitive closure of a relation @{text R}. The ``pencil-and-paper'' specification for the transitive closure is: @@ -161,7 +159,7 @@ text {* Now we are done. It might be surprising that we are not using the automatic - tactics available in Isabelle for proving this lemmas. After all @{text + tactics available in Isabelle/HOL for proving this lemmas. After all @{text "blast"} would easily dispense of it. *} diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Package/Simple_Inductive_Package.thy --- a/ProgTutorial/Package/Simple_Inductive_Package.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Package/Simple_Inductive_Package.thy Tue Jul 21 12:49:26 2009 +0200 @@ -44,7 +44,6 @@ where accpartI': "\x. (\y. r y x \ accpart' y) \ accpart' x" - context rel begin thm accpartI' diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Parsing.thy --- a/ProgTutorial/Parsing.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Parsing.thy Tue Jul 21 12:49:26 2009 +0200 @@ -35,24 +35,28 @@ text {* - Let us first have a look at parsing strings using generic parsing combinators. - The function @{ML "$$"} takes a string as argument and will ``consume'' this string from - a given input list of strings. ``Consume'' in this context means that it will - return a pair consisting of this string and the rest of the input list. - For example: + Let us first have a look at parsing strings using generic parsing + combinators. The function @{ML [index] "$$"} takes a string as argument and will + ``consume'' this string from a given input list of strings. ``Consume'' in + this context means that it will return a pair consisting of this string and + the rest of the input list. For example: - @{ML_response [display,gray] "($$ \"h\") (explode \"hello\")" "(\"h\", [\"e\", \"l\", \"l\", \"o\"])"} + @{ML_response [display,gray] + "($$ \"h\") (Symbol.explode \"hello\")" "(\"h\", [\"e\", \"l\", \"l\", \"o\"])"} - @{ML_response [display,gray] "($$ \"w\") (explode \"world\")" "(\"w\", [\"o\", \"r\", \"l\", \"d\"])"} + @{ML_response [display,gray] + "($$ \"w\") (Symbol.explode \"world\")" "(\"w\", [\"o\", \"r\", \"l\", \"d\"])"} - The function @{ML "$$"} will either succeed (as in the two examples above) or raise the exception - @{text "FAIL"} if no string can be consumed. For example trying to parse + The function @{ML "$$"} will either succeed (as in the two examples above) + or raise the exception @{text "FAIL"} if no string can be consumed. For + example trying to parse - @{ML_response_fake [display,gray] "($$ \"x\") (explode \"world\")" - "Exception FAIL raised"} + @{ML_response_fake [display,gray] + "($$ \"x\") (Symbol.explode \"world\")" + "Exception FAIL raised"} - will raise the exception @{text "FAIL"}. - There are three exceptions used in the parsing combinators: + will raise the exception @{text "FAIL"}. There are three exceptions used in + the parsing combinators: \begin{itemize} \item @{text "FAIL"} is used to indicate that alternative routes of parsing @@ -65,43 +69,60 @@ However, note that these exceptions are private to the parser and cannot be accessed by the programmer (for example to handle them). - - Slightly more general than the parser @{ML "$$"} is the function @{ML - Scan.one}, in that it takes a predicate as argument and then parses exactly + + \indexdef{explode@ {\tt\slshape{}explode}}{in {\tt\slshape Symbol}} + \index{explode@ {\tt\slshape{}explode}} + In the examples above we use the function @{ML Symbol.explode}, instead of the + more standard library function @{ML explode}, for obtaining an input list for + the parser. The reason is that @{ML Symbol.explode} is aware of character sequences, + for example @{text "\"}, that have a special meaning in Isabelle. To see + the difference consider + +@{ML_response_fake [display,gray] +"let + val input = \"\ bar\" +in + (explode input, Symbol.explode input) +end" +"([\"\\\", \"<\", \"f\", \"o\", \"o\", \">\", \" \", \"b\", \"a\", \"r\"], + [\"\\", \" \", \"b\", \"a\", \"r\"])"} + + Slightly more general than the parser @{ML "$$"} is the function + @{ML [index] one in Scan}, in that it takes a predicate as argument and + then parses exactly one item from the input list satisfying this predicate. For example the following parser either consumes an @{text [quotes] "h"} or a @{text [quotes] "w"}: - @{ML_response [display,gray] "let val hw = Scan.one (fn x => x = \"h\" orelse x = \"w\") - val input1 = explode \"hello\" - val input2 = explode \"world\" + val input1 = Symbol.explode \"hello\" + val input2 = Symbol.explode \"world\" in (hw input1, hw input2) end" "((\"h\", [\"e\", \"l\", \"l\", \"o\"]),(\"w\", [\"o\", \"r\", \"l\", \"d\"]))"} - Two parsers can be connected in sequence by using the function @{ML "--"}. + Two parsers can be connected in sequence by using the function @{ML [index] "--"}. For example parsing @{text "h"}, @{text "e"} and @{text "l"} (in this order) you can achieve by: @{ML_response [display,gray] - "($$ \"h\" -- $$ \"e\" -- $$ \"l\") (explode \"hello\")" + "($$ \"h\" -- $$ \"e\" -- $$ \"l\") (Symbol.explode \"hello\")" "(((\"h\", \"e\"), \"l\"), [\"l\", \"o\"])"} Note how the result of consumed strings builds up on the left as nested pairs. If, as in the previous example, you want to parse a particular string, - then you should use the function @{ML Scan.this_string}: + then you should use the function @{ML [index] this_string in Scan}: @{ML_response [display,gray] - "Scan.this_string \"hell\" (explode \"hello\")" + "Scan.this_string \"hell\" (Symbol.explode \"hello\")" "(\"hell\", [\"o\"])"} - Parsers that explore alternatives can be constructed using the function @{ML - "||"}. The parser @{ML "(p || q)" for p q} returns the + Parsers that explore alternatives can be constructed using the function + @{ML [index] "||"}. The parser @{ML "(p || q)" for p q} returns the result of @{text "p"}, in case it succeeds, otherwise it returns the result of @{text "q"}. For example: @@ -109,14 +130,14 @@ @{ML_response [display,gray] "let val hw = $$ \"h\" || $$ \"w\" - val input1 = explode \"hello\" - val input2 = explode \"world\" + val input1 = Symbol.explode \"hello\" + val input2 = Symbol.explode \"world\" in (hw input1, hw input2) end" "((\"h\", [\"e\", \"l\", \"l\", \"o\"]), (\"w\", [\"o\", \"r\", \"l\", \"d\"]))"} - The functions @{ML "|--"} and @{ML "--|"} work like the sequencing function + The functions @{ML [index] "|--"} and @{ML [index] "--|"} work like the sequencing function for parsers, except that they discard the item being parsed by the first (respectively second) parser. For example: @@ -124,12 +145,13 @@ "let val just_e = $$ \"h\" |-- $$ \"e\" val just_h = $$ \"h\" --| $$ \"e\" - val input = explode \"hello\" + val input = Symbol.explode \"hello\" in (just_e input, just_h input) end" - "((\"e\", [\"l\", \"l\", \"o\"]),(\"h\", [\"l\", \"l\", \"o\"]))"} + "((\"e\", [\"l\", \"l\", \"o\"]), (\"h\", [\"l\", \"l\", \"o\"]))"} + \indexdef{optional@ {\tt\slshape{optional}}}{in {\tt\slshape Scan}} The parser @{ML "Scan.optional p x" for p x} returns the result of the parser @{text "p"}, if it succeeds; otherwise it returns the default value @{text "x"}. For example: @@ -137,26 +159,26 @@ @{ML_response [display,gray] "let val p = Scan.optional ($$ \"h\") \"x\" - val input1 = explode \"hello\" - val input2 = explode \"world\" + val input1 = Symbol.explode \"hello\" + val input2 = Symbol.explode \"world\" in (p input1, p input2) end" "((\"h\", [\"e\", \"l\", \"l\", \"o\"]), (\"x\", [\"w\", \"o\", \"r\", \"l\", \"d\"]))"} - The function @{ML Scan.option} works similarly, except no default value can + The function @{ML [index] option in Scan} works similarly, except no default value can be given. Instead, the result is wrapped as an @{text "option"}-type. For example: @{ML_response [display,gray] "let val p = Scan.option ($$ \"h\") - val input1 = explode \"hello\" - val input2 = explode \"world\" + val input1 = Symbol.explode \"hello\" + val input2 = Symbol.explode \"world\" in (p input1, p input2) end" "((SOME \"h\", [\"e\", \"l\", \"l\", \"o\"]), (NONE, [\"w\", \"o\", \"r\", \"l\", \"d\"]))"} - The function @{ML "!!"} helps to produce appropriate error messages + The function @{ML [index] "!!"} helps to produce appropriate error messages for parsing. For example if you want to parse @{text p} immediately followed by @{text q}, or start a completely different parser @{text r}, you might write: @@ -182,23 +204,23 @@ on @{text [quotes] "hello"}, the parsing succeeds @{ML_response [display,gray] - "(!! (fn _ => \"foo\") ($$ \"h\")) (explode \"hello\")" + "(!! (fn _ => \"foo\") ($$ \"h\")) (Symbol.explode \"hello\")" "(\"h\", [\"e\", \"l\", \"l\", \"o\"])"} but if you invoke it on @{text [quotes] "world"} - @{ML_response_fake [display,gray] "(!! (fn _ => \"foo\") ($$ \"h\")) (explode \"world\")" + @{ML_response_fake [display,gray] "(!! (fn _ => \"foo\") ($$ \"h\")) (Symbol.explode \"world\")" "Exception ABORT raised"} then the parsing aborts and the error message @{text "foo"} is printed. In order to see the error message properly, you need to prefix the parser with the function - @{ML "Scan.error"}. For example: + @{ML [index] error in Scan}. For example: @{ML_response_fake [display,gray] "Scan.error (!! (fn _ => \"foo\") ($$ \"h\"))" "Exception Error \"foo\" raised"} - This ``prefixing'' is usually done by wrappers such as @{ML "OuterSyntax.local_theory"} + This ``prefixing'' is usually done by wrappers such as @{ML [index] local_theory in OuterSyntax} (see Section~\ref{sec:newcommand} which explains this function in more detail). Let us now return to our example of parsing @{ML "(p -- q) || r" for p q @@ -219,12 +241,12 @@ @{text [quotes] "h"}, @{text [quotes] "e"} and @{text [quotes] "w"}, and the input @{text [quotes] "holle"} - @{ML_response_fake [display,gray] "Scan.error (p_followed_by_q \"h\" \"e\" \"w\") (explode \"holle\")" + @{ML_response_fake [display,gray] "Scan.error (p_followed_by_q \"h\" \"e\" \"w\") (Symbol.explode \"holle\")" "Exception ERROR \"h is not followed by e\" raised"} produces the correct error message. Running it with - @{ML_response [display,gray] "Scan.error (p_followed_by_q \"h\" \"e\" \"w\") (explode \"wworld\")" + @{ML_response [display,gray] "Scan.error (p_followed_by_q \"h\" \"e\" \"w\") (Symbol.explode \"wworld\")" "((\"w\", \"w\"), [\"o\", \"r\", \"l\", \"d\"])"} yields the expected parsing. @@ -232,62 +254,62 @@ The function @{ML "Scan.repeat p" for p} will apply a parser @{text p} as often as it succeeds. For example: - @{ML_response [display,gray] "Scan.repeat ($$ \"h\") (explode \"hhhhello\")" + @{ML_response [display,gray] "Scan.repeat ($$ \"h\") (Symbol.explode \"hhhhello\")" "([\"h\", \"h\", \"h\", \"h\"], [\"e\", \"l\", \"l\", \"o\"])"} - Note that @{ML "Scan.repeat"} stores the parsed items in a list. The function - @{ML "Scan.repeat1"} is similar, but requires that the parser @{text "p"} + Note that @{ML [index] repeat in Scan} stores the parsed items in a list. The function + @{ML [index] repeat1 in Scan} is similar, but requires that the parser @{text "p"} succeeds at least once. Also note that the parser would have aborted with the exception @{text MORE}, if you had run it only on just @{text [quotes] "hhhh"}. This can be avoided by using - the wrapper @{ML Scan.finite} and the ``stopper-token'' @{ML Symbol.stopper}. With - them you can write: + the wrapper @{ML [index] finite in Scan} and the ``stopper-token'' + @{ML [index] stopper in Symbol}. With them you can write: - @{ML_response [display,gray] "Scan.finite Symbol.stopper (Scan.repeat ($$ \"h\")) (explode \"hhhh\")" + @{ML_response [display,gray] "Scan.finite Symbol.stopper (Scan.repeat ($$ \"h\")) (Symbol.explode \"hhhh\")" "([\"h\", \"h\", \"h\", \"h\"], [])"} @{ML Symbol.stopper} is the ``end-of-input'' indicator for parsing strings; other stoppers need to be used when parsing, for example, tokens. However, this kind of manually wrapping is often already done by the surrounding infrastructure. - The function @{ML Scan.repeat} can be used with @{ML Scan.one} to read any + The function @{ML [index] repeat in Scan} can be used with @{ML [index] one in Scan} to read any string as in @{ML_response [display,gray] "let val p = Scan.repeat (Scan.one Symbol.not_eof) - val input = explode \"foo bar foo\" + val input = Symbol.explode \"foo bar foo\" in Scan.finite Symbol.stopper p input end" "([\"f\", \"o\", \"o\", \" \", \"b\", \"a\", \"r\", \" \", \"f\", \"o\", \"o\"], [])"} - where the function @{ML Symbol.not_eof} ensures that we do not read beyond the + where the function @{ML [index] not_eof in Symbol} ensures that we do not read beyond the end of the input string (i.e.~stopper symbol). The function @{ML "Scan.unless p q" for p q} takes two parsers: if the first one can parse the input, then the whole parser fails; if not, then the second is tried. Therefore - @{ML_response_fake_both [display,gray] "Scan.unless ($$ \"h\") ($$ \"w\") (explode \"hello\")" + @{ML_response_fake_both [display,gray] "Scan.unless ($$ \"h\") ($$ \"w\") (Symbol.explode \"hello\")" "Exception FAIL raised"} fails, while - @{ML_response [display,gray] "Scan.unless ($$ \"h\") ($$ \"w\") (explode \"world\")" + @{ML_response [display,gray] "Scan.unless ($$ \"h\") ($$ \"w\") (Symbol.explode \"world\")" "(\"w\",[\"o\", \"r\", \"l\", \"d\"])"} succeeds. - The functions @{ML Scan.repeat} and @{ML Scan.unless} can be combined to read any - input until a certain marker symbol is reached. In the example below the marker - symbol is a @{text [quotes] "*"}. + The functions @{ML [index] repeat in Scan} and @{ML [index] unless in Scan} can + be combined to read any input until a certain marker symbol is reached. In the + example below the marker symbol is a @{text [quotes] "*"}. @{ML_response [display,gray] "let val p = Scan.repeat (Scan.unless ($$ \"*\") (Scan.one Symbol.not_eof)) - val input1 = explode \"fooooo\" - val input2 = explode \"foo*ooo\" + val input1 = Symbol.explode \"fooooo\" + val input2 = Symbol.explode \"foo*ooo\" in (Scan.finite Symbol.stopper p input1, Scan.finite Symbol.stopper p input2) @@ -295,8 +317,10 @@ "(([\"f\", \"o\", \"o\", \"o\", \"o\", \"o\"], []), ([\"f\", \"o\", \"o\"], [\"*\", \"o\", \"o\", \"o\"]))"} + After parsing is done, you almost always want to apply a function to the parsed - items. One way to do this is the function @{ML "(p >> f)" for p f}, which runs + items. One way to do this is the function @{ML [index]">>"} where + @{ML "(p >> f)" for p f} runs first the parser @{text p} and upon successful completion applies the function @{text f} to the result. For example @@ -304,7 +328,7 @@ "let fun double (x, y) = (x ^ x, y ^ y) in - (($$ \"h\") -- ($$ \"e\") >> double) (explode \"hello\") + (($$ \"h\") -- ($$ \"e\") >> double) (Symbol.explode \"hello\") end" "((\"hh\", \"ee\"), [\"l\", \"l\", \"o\"])"} @@ -313,7 +337,7 @@ @{ML_response [display,gray] "let val p = Scan.repeat (Scan.one Symbol.not_eof) - val input = explode \"foo bar foo\" + val input = Symbol.explode \"foo bar foo\" in Scan.finite Symbol.stopper (p >> implode) input end" @@ -324,19 +348,19 @@ (FIXME: move to an earlier place) - The function @{ML Scan.ahead} parses some input, but leaves the original + The function @{ML [index] ahead in Scan} parses some input, but leaves the original input unchanged. For example: @{ML_response [display,gray] - "Scan.ahead (Scan.this_string \"foo\") (explode \"foo\")" + "Scan.ahead (Scan.this_string \"foo\") (Symbol.explode \"foo\")" "(\"foo\", [\"f\", \"o\", \"o\"])"} - The function @{ML Scan.lift} takes a parser and a pair as arguments. This function applies + The function @{ML [index] lift in Scan} takes a parser and a pair as arguments. This function applies the given parser to the second component of the pair and leaves the first component untouched. For example @{ML_response [display,gray] -"Scan.lift ($$ \"h\" -- $$ \"e\") (1, explode \"hello\")" +"Scan.lift ($$ \"h\" -- $$ \"e\") (1, Symbol.explode \"hello\")" "((\"h\", \"e\"), (1, [\"l\", \"l\", \"o\"]))"} (FIXME: In which situations is this useful? Give examples.) @@ -371,12 +395,13 @@ The definition for tokens is in the file @{ML_file "Pure/Isar/outer_lex.ML"}. \end{readmore} - The structure @{ML_struct OuterLex} defines several kinds of tokens (for - example @{ML "Ident" in OuterLex} for identifiers, @{ML "Keyword" in - OuterLex} for keywords and @{ML "Command" in OuterLex} for commands). Some + The structure @{ML_struct [index] OuterLex} defines several kinds of tokens (for + example @{ML [index] Ident in OuterLex} for identifiers, @{ML Keyword in + OuterLex} for keywords and @{ML [index] Command in OuterLex} for commands). Some token parsers take into account the kind of tokens. The first example shows - how to generate a token list out of a string using the function @{ML - "OuterSyntax.scan"}. It is given the argument @{ML "Position.none"} since, + how to generate a token list out of a string using the function + @{ML [index] scan in OuterSyntax}. It is given the argument + @{ML "Position.none"} since, at the moment, we are not interested in generating precise error messages. The following code\footnote{Note that because of a possible bug in the PolyML runtime system, the result is printed as @{text [quotes] "?"}, @@ -393,7 +418,7 @@ other syntactic category. The second indicates a space. We can easily change what is recognised as a keyword with - @{ML OuterKeyword.keyword}. For example calling this function + @{ML [index] keyword in OuterKeyword}. For example calling this function *} ML{*val _ = OuterKeyword.keyword "hello"*} @@ -406,9 +431,10 @@ Token (\,(Space, \" \"),\), Token (\,(Ident, \"world\"),\)]"} - Many parsing functions later on will require spaces, comments and the like + Many parsing functions later on will require white space, comments and the like to have already been filtered out. So from now on we are going to use the - functions @{ML filter} and @{ML OuterLex.is_proper} to do this. For example: + functions @{ML filter} and @{ML [index] is_proper in OuterLex} to do this. + For example: @{ML_response_fake [display,gray] "let @@ -434,9 +460,8 @@ Token (\,(Keyword, \"for\"),\)]"} you obtain a list consisting of only one command and two keyword tokens. - If you want to see which keywords and commands are currently known to Isabelle, type in - the following code (you might have to adjust the @{ML print_depth} in order to - see the complete list): + If you want to see which keywords and commands are currently known to Isabelle, + type: @{ML_response_fake [display,gray] "let @@ -446,7 +471,10 @@ end" "([\"}\", \"{\", \], [\"\\", \"\\", \])"} - The parser @{ML "OuterParse.$$$"} parses a single keyword. For example: + You might have to adjust the @{ML [index] print_depth} in order to + see the complete list. + + The parser @{ML [index] "$$$" in OuterParse} parses a single keyword. For example: @{ML_response [display,gray] "let @@ -457,7 +485,7 @@ end" "((\"where\",\), (\"|\",\))"} - Any non-keyword string can be parsed with the function @{ML OuterParse.reserved}. + Any non-keyword string can be parsed with the function @{ML [index] reserved in OuterParse}. For example: @{ML_response [display,gray] @@ -469,7 +497,7 @@ end" "(\"bar\",[])"} - Like before, you can sequentially connect parsers with @{ML "--"}. For example: + Like before, you can sequentially connect parsers with @{ML [index] "--"}. For example: @{ML_response [display,gray] "let @@ -479,6 +507,7 @@ end" "((\"|\", \"in\"), [])"} + \indexdef{enum@ {\tt\slshape{enum}}}{in {\tt\slshape OuterParse}} The parser @{ML "OuterParse.enum s p" for s p} parses a possibly empty list of items recognised by the parser @{text p}, where the items being parsed are separated by the string @{text s}. For example: @@ -491,7 +520,7 @@ end" "([\"in\", \"in\", \"in\"], [\])"} - @{ML "OuterParse.enum1"} works similarly, except that the parsed list must + @{ML [index] enum1 in OuterParse} works similarly, except that the parsed list must be non-empty. Note that we had to add a string @{text [quotes] "foo"} at the end of the parsed string, otherwise the parser would have consumed all tokens and then failed with the exception @{text "MORE"}. Like in the @@ -515,7 +544,7 @@ text {* - The function @{ML "OuterParse.!!!"} can be used to force termination of the + The function @{ML [index] "!!!" in OuterParse} can be used to force termination of the parser in case of a dead end, just like @{ML "Scan.!!"} (see previous section). Except that the error message of @{ML "OuterParse.!!!"} is fixed to be @{text [quotes] "Outer syntax error"} @@ -534,8 +563,8 @@ \begin{exercise} (FIXME) A type-identifier, for example @{typ "'a"}, is a token of - kind @{ML "Keyword" in OuterLex}. It can be parsed using - the function @{ML OuterParse.type_ident}. + kind @{ML [index] Keyword in OuterLex}. It can be parsed using + the function @{ML type_ident in OuterParse}. \end{exercise} (FIXME: or give parser for numbers) @@ -561,12 +590,12 @@ in which the @{text [quotes] "\\n"} causes the second token to be in line 8. - By using the parser @{ML OuterParse.position} you can decode the positional - information and return it as part of the parsed input. For example + By using the parser @{ML OuterParse.position} you can access the token position + and return it as part of the parser result. For example @{ML_response_fake [display,gray] "let - val input = (filtered_input' \"where\") + val input = filtered_input' \"where\" in parse (OuterParse.position (OuterParse.$$$ \"where\")) input end" @@ -665,7 +694,7 @@ (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*} text {* - Note that the parser does not parse the keyword \simpleinductive, even if it is + Note that the parser must not parse the keyword \simpleinductive, even if it is meant to process definitions as shown above. The parser of the keyword will be given by the infrastructure that will eventually call @{ML spec_parser}. @@ -693,7 +722,7 @@ variables with optional type-annotation and syntax-annotation, and a list of rules where every rule has optionally a name and an attribute. - The function @{ML OuterParse.fixes} in Line 2 of the parser reads an + The function @{ML [index] "fixes" in OuterParse} in Line 2 of the parser reads an \isacommand{and}-separated list of variables that can include optional type annotations and syntax translations. For example:\footnote{Note that in the code we need to write @@ -716,18 +745,18 @@ Whenever types are given, they are stored in the @{ML SOME}s. The types are not yet used to type the variables: this must be done by type-inference later on. Since types are part of the inner syntax they are strings with some - encoded information (see previous section). If a syntax translation is - present for a variable, then it is stored in the @{ML Mixfix} data structure; - no syntax translation is indicated by @{ML NoSyn}. + encoded information (see previous section). If a mixfix-syntax is + present for a variable, then it is stored in the @{ML [index] Mixfix} data structure; + no syntax translation is indicated by @{ML [index] NoSyn}. \begin{readmore} - The data structure for syntax annotations is defined in @{ML_file "Pure/Syntax/mixfix.ML"}. + The data structure for mixfix annotations is defined in @{ML_file "Pure/Syntax/mixfix.ML"}. \end{readmore} Lines 3 to 7 in the function @{ML spec_parser} implement the parser for a list of introduction rules, that is propositions with theorem annotations such as rule names and attributes. The introduction rules are propositions - parsed by @{ML OuterParse.prop}. However, they can include an optional + parsed by @{ML [index] prop in OuterParse}. However, they can include an optional theorem name plus some attributes. For example @{ML_response [display,gray] "let @@ -737,8 +766,8 @@ (name, map Args.dest_src attrib) end" "(foo_lemma, [((\"intro\", []), \), ((\"dest\", [\]), \)])"} - The function @{ML opt_thm_name in SpecParse} is the ``optional'' variant of - @{ML thm_name in SpecParse}. Theorem names can contain attributes. The name + The function @{ML [index] opt_thm_name in SpecParse} is the ``optional'' variant of + @{ML [index] thm_name in SpecParse}. Theorem names can contain attributes. The name has to end with @{text [quotes] ":"}---see the argument of the function @{ML SpecParse.opt_thm_name} in Line 7. @@ -753,7 +782,7 @@ Have a look at how the parser @{ML SpecParse.where_alt_specs} is implemented in file @{ML_file "Pure/Isar/spec_parse.ML"}. This parser corresponds to the ``where-part'' of the introduction rules given above. Below - we paraphrase the code of @{ML SpecParse.where_alt_specs} adapted to our + we paraphrase the code of @{ML [index] where_alt_specs in SpecParse} adapted to our purposes. \begin{isabelle} *} @@ -802,7 +831,7 @@ end *} text {* - The crucial function @{ML OuterSyntax.local_theory} expects a name for the command, a + The crucial function @{ML [index] local_theory in OuterSyntax} expects a name for the command, a short description, a kind indicator (which we will explain later more thoroughly) and a parser producing a local theory transition (its purpose will also explained later). @@ -840,9 +869,9 @@ @{text "\"}\\ \isacommand{end} \end{graybox} - \caption{The file @{text "Command.thy"} is necessary for generating a log - file. This log file enables Isabelle to generate a keyword file containing - the command \isacommand{foobar}.\label{fig:commandtheory}} + \caption{This file can be used to generate a log file. This log file in turn can + be used to generate a keyword file containing the command \isacommand{foobar}. + \label{fig:commandtheory}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -891,7 +920,7 @@ If the compilation succeeds, you have finally created all the necessary log files. They are stored in the directory - @{text [display] "~/.isabelle/heaps/Isabelle2008/polyml-5.2.1_x86-linux/log"} + @{text [display] "~/.isabelle/heaps/Isabelle2009/polyml-5.2.1_x86-linux/log"} or something similar depending on your Isabelle distribution and architecture. One quick way to assign a shell variable to this directory is by typing @@ -919,7 +948,7 @@ the string @{text "foobar"} twice.\footnote{To see whether things are fine, check that @{text "grep foobar"} on this file returns something non-empty.} This keyword file needs to - be copied into the directory @{text "~/.isabelle/etc"}. To make Isabelle + be copied into the directory @{text "~/.isabelle/etc"}. To make ProofGeneral aware of this keyword file, you have to start Isabelle with the option @{text "-k foobar"}, that is: @@ -942,7 +971,7 @@ The crucial part of a command is the function that determines the behaviour of the command. In the code above we used a ``do-nothing''-function, which - because of @{ML Scan.succeed} does not parse any argument, but immediately + because of @{ML [index] succeed in Scan} does not parse any argument, but immediately returns the simple function @{ML "LocalTheory.theory I"}. We can replace this code by a function that first parses a proposition (using the parser @{ML OuterParse.prop}), then prints out the tracing @@ -972,7 +1001,7 @@ and see the proposition in the tracing buffer. - Note that so far we used @{ML thy_decl in OuterKeyword} as the kind + Note that so far we used @{ML [index] thy_decl in OuterKeyword} as the kind indicator for the command. This means that the command finishes as soon as the arguments are processed. Examples of this kind of commands are \isacommand{definition} and \isacommand{declare}. In other cases, commands @@ -980,7 +1009,7 @@ ``open up'' a proof in order to prove the proposition (for example \isacommand{lemma}) or prove some other properties (for example \isacommand{function}). To achieve this kind of behaviour, you have to use - the kind indicator @{ML thy_goal in OuterKeyword} and the function @{ML + the kind indicator @{ML [index] thy_goal in OuterKeyword} and the function @{ML "local_theory_to_proof" in OuterSyntax} to set up the command. Note, however, once you change the ``kind'' of a command from @{ML thy_decl in OuterKeyword} to @{ML thy_goal in OuterKeyword} then the keyword file needs @@ -992,11 +1021,11 @@ *} ML%linenosgray{*let - fun prove_prop str ctxt = + fun prove_prop str lthy = let - val prop = Syntax.read_prop ctxt str + val prop = Syntax.read_prop lthy str in - Proof.theorem_i NONE (K I) [[(prop,[])]] ctxt + Proof.theorem_i NONE (K I) [[(prop,[])]] lthy end; val prove_prop_parser = OuterParse.prop >> prove_prop @@ -1009,8 +1038,8 @@ text {* The function @{text prove_prop} in Lines 2 to 7 takes a string (the proposition to be proved) and a context as argument. The context is necessary in order to be able to use - @{ML Syntax.read_prop}, which converts a string into a proper proposition. - In Line 6 the function @{ML Proof.theorem_i} starts the proof for the + @{ML [index] read_prop in Syntax}, which converts a string into a proper proposition. + In Line 6 the function @{ML [index] theorem_i in Proof} starts the proof for the proposition. Its argument @{ML NONE} stands for a locale (which we chose to omit); the argument @{ML "(K I)"} stands for a function that determines what should be done with the theorem once it is proved (we chose to just forget @@ -1034,7 +1063,8 @@ \isacommand{done} \end{isabelle} - (FIXME: read a name and show how to store theorems) + (FIXME: read a name and show how to store theorems; see @{ML [index] note in LocalTheory}) + *} section {* Methods (TBD) *} @@ -1057,20 +1087,21 @@ An example of a very simple method is: *} -method_setup %gray foobar_meth = +method_setup %gray foo = {* Scan.succeed (K (SIMPLE_METHOD ((etac @{thm conjE} THEN' rtac @{thm conjI}) 1))) *} - "foobar method for conjE and conjI" + "foo method for conjE and conjI" text {* - It defines the method @{text foobar_meth}, which takes no arguments (therefore the + It defines the method @{text foobar}, which takes no arguments (therefore the parser @{ML Scan.succeed}) and only applies a single tactic, namely the tactic which - applies @{thm [source] conjE} and then @{thm [source] conjI}. The function @{ML SIMPLE_METHOD} - turns such a tactic into a method. @{text "Foobar_meth"} can be used as follows + applies @{thm [source] conjE} and then @{thm [source] conjI}. The function + @{ML [index] SIMPLE_METHOD} + turns such a tactic into a method. The method @{text "foobar"} can be used as follows *} lemma shows "A \ B \ C \ D" -apply(foobar_meth) +apply(foo) txt {* where it results in the goal state @@ -1312,7 +1343,7 @@ Input text is split up into tokens, and the input source type for many parsing functions is \texttt{token list}. - The data type definition (which is not published in the signature) is + The datatype definition (which is not published in the signature) is \begin{verbatim} datatype token = Token of Position.T * (token_kind * string); \end{verbatim} @@ -1497,7 +1528,7 @@ text {* The source file is \texttt{src/Pure/Isar/args.ML}. - The primary type of this structure is the \texttt{src} data type; + The primary type of this structure is the \texttt{src} datatype; the single constructors not published in the signature, but \texttt{Args.src} and \texttt{Args.dest\_src} are in fact the constructor and destructor functions. @@ -1757,8 +1788,7 @@ structure \texttt{Method}, but not published in its signature. The source file \texttt{src/Pure/Isar/method.ML} shows the use of \texttt{Method.add\_method} to add a number of methods. - +*} -*} (*>*) end diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Recipes/Timing.thy --- a/ProgTutorial/Recipes/Timing.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Recipes/Timing.thy Tue Jul 21 12:49:26 2009 +0200 @@ -33,7 +33,7 @@ val res = tac st; val t_end = end_timing t_start; in - (warning (#message t_end); res) + (writeln (#message t_end); res) end*} text {* diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Solutions.thy --- a/ProgTutorial/Solutions.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Solutions.thy Tue Jul 21 12:49:26 2009 +0200 @@ -177,7 +177,7 @@ This function generates for example: @{ML_response_fake [display,gray] - "warning (Syntax.string_of_term @{context} (term_tree 2))" + "writeln (Syntax.string_of_term @{context} (term_tree 2))" "(1 + 2) + (3 + 4)"} The next function constructs a goal of the form @{text "P \"} with a term diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/Tactical.thy --- a/ProgTutorial/Tactical.thy Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/Tactical.thy Tue Jul 21 12:49:26 2009 +0200 @@ -56,12 +56,12 @@ @{text xs} that will be generalised once the goal is proved (in our case @{text P} and @{text Q}). The @{text "tac"} is the tactic that proves the goal; it can make use of the local assumptions (there are none in this example). - The functions @{ML etac}, @{ML rtac} and @{ML atac} in the code above correspond to - @{text erule}, @{text rule} and @{text assumption}, respectively. - The operator @{ML THEN} strings the tactics together. + The tactics @{ML [index] etac}, @{ML [index] rtac} and @{ML [index] atac} in the code above correspond + roughly to @{text erule}, @{text rule} and @{text assumption}, respectively. + The operator @{ML [index] THEN} strings the tactics together. \begin{readmore} - To learn more about the function @{ML Goal.prove} see \isccite{sec:results} + To learn more about the function @{ML [index] prove in Goal} see \isccite{sec:results} and the file @{ML_file "Pure/goal.ML"}. See @{ML_file "Pure/tactic.ML"} and @{ML_file "Pure/tctical.ML"} for the code of basic tactics and tactic combinators; see also Chapters 3 and 4 in the old @@ -119,7 +119,7 @@ THEN' atac)*}text_raw{*\label{tac:footacprime}*} text {* - where @{ML THEN'} is used instead of @{ML THEN}. With @{ML foo_tac'} you can give + where @{ML [index] THEN'} is used instead of @{ML THEN}. With @{ML foo_tac'} you can give the number for the subgoal explicitly when the tactic is called. So in the next proof you can first discharge the second subgoal, and subsequently the first. @@ -145,7 +145,9 @@ @{text "*** At command \"apply\"."} \end{isabelle} - This means they failed. The reason for this error message is that tactics + This means they failed.\footnote{To be precise tactics do not produce this error + message, the it originates from the \isacommand{apply} wrapper.} The reason for this + error message is that tactics are functions mapping a goal state to a (lazy) sequence of successor states. Hence the type of a tactic is: *} @@ -158,7 +160,7 @@ willy-nilly; only in very grave failure situations should a tactic raise the exception @{text THM}. - The simplest tactics are @{ML no_tac} and @{ML all_tac}. The first returns + The simplest tactics are @{ML [index] no_tac} and @{ML [index] all_tac}. The first returns the empty sequence and is defined as *} @@ -210,7 +212,7 @@ ML{*fun my_print_tac ctxt thm = let - val _ = warning (str_of_thm_no_vars ctxt thm) + val _ = writeln (string_of_thm_no_vars ctxt thm) in Seq.single thm end*} @@ -310,10 +312,12 @@ where @{term C} is the goal to be proved and the @{term "A\<^isub>i"} are the subgoals. So after setting up the lemma, the goal state is always of the form @{text "C \ (C)"}; when the proof is finished we are left with @{text - "(C)"}. Since the goal @{term C} can potentially be an implication, there is - a ``protector'' wrapped around it (the wrapper is the outermost constant @{text - "Const (\"prop\", bool \ bool)"}; however this constant - is invisible in the figure). This wrapper prevents that premises of @{text C} are + "(C)"}.\footnote{This only applies to single statements. If the lemma + contains more than one statement, then one has more such implications.} + Since the goal @{term C} can potentially be an implication, there is a + ``protector'' wrapped around it (the wrapper is the outermost constant + @{text "Const (\"prop\", bool \ bool)"}; however this constant is invisible + in the figure). This wrapper prevents that premises of @{text C} are misinterpreted as open subgoals. While tactics can operate on the subgoals (the @{text "A\<^isub>i"} above), they are expected to leave the conclusion @{term C} intact, with the exception of possibly instantiating schematic @@ -329,7 +333,7 @@ section {* Simple Tactics\label{sec:simpletacs} *} text {* - Let us start with explaining the simple tactic @{ML print_tac}, which is quite useful + Let us start with explaining the simple tactic @{ML [index] print_tac}, which is quite useful for low-level debugging of tactics. It just prints out a message and the current goal state. Unlike @{ML my_print_tac} shown earlier, it prints the goal state as the user would see it. For example, processing the proof @@ -349,7 +353,7 @@ text {* A simple tactic for easy discharge of any proof obligations is - @{ML SkipProof.cheat_tac}. This tactic corresponds to + @{ML [index] cheat_tac in SkipProof}. This tactic corresponds to the Isabelle command \isacommand{sorry} and is sometimes useful during the development of tactics. *} @@ -362,7 +366,10 @@ (*<*)oops(*>*) text {* - Another simple tactic is the function @{ML atac}, which, as shown in the previous + This tactic works however only if the quick-and-dirty mode of Isabelle + is switched on. + + Another simple tactic is the function @{ML [index] atac}, which, as shown in the previous section, corresponds to the assumption command. *} @@ -374,7 +381,8 @@ (*<*)oops(*>*) text {* - Similarly, @{ML rtac}, @{ML dtac}, @{ML etac} and @{ML ftac} correspond + Similarly, @{ML [index] rtac}, @{ML [index] dtac}, @{ML [index] etac} and + @{ML [index] ftac} correspond (roughly) to @{text rule}, @{text drule}, @{text erule} and @{text frule}, respectively. Each of them take a theorem as argument and attempt to apply it to a goal. Below are three self-explanatory examples. @@ -402,7 +410,7 @@ (*<*)oops(*>*) text {* - The function @{ML resolve_tac} is similar to @{ML rtac}, except that it + The function @{ML [index] resolve_tac} is similar to @{ML [index] rtac}, except that it expects a list of theorems as arguments. From this list it will apply the first applicable theorem (later theorems that are also applicable can be explored via the lazy sequences mechanism). Given the code @@ -425,10 +433,11 @@ text {* Similar versions taking a list of theorems exist for the tactics - @{ML dtac} (@{ML dresolve_tac}), @{ML etac} (@{ML eresolve_tac}) and so on. + @{ML dtac} (@{ML [index] dresolve_tac}), @{ML etac} + (@{ML [index] eresolve_tac}) and so on. - Another simple tactic is @{ML cut_facts_tac}. It inserts a list of theorems + Another simple tactic is @{ML [index] cut_facts_tac}. It inserts a list of theorems into the assumptions of the current goal state. For example *} @@ -461,9 +470,9 @@ applied to the first subgoal might instantiate this meta-variable in such a way that the second subgoal becomes unprovable. If it is clear what the @{text "?x"} should be, then this situation can be avoided by introducing a more - constraint version of the @{text bspec}-rule. Such constraints can be given by + constrained version of the @{text bspec}-rule. Such constraints can be given by pre-instantiating theorems with other theorems. One function to do this is - @{ML RS} + @{ML [index] RS} @{ML_response_fake [display,gray] "@{thm disjI1} RS @{thm conjI}" "\?P1; ?Q\ \ (?P1 \ ?Q1) \ ?Q"} @@ -477,7 +486,7 @@ "*** Exception- THM (\"RSN: no unifiers\", 1, [\"\?P; ?Q\ \ ?P \ ?Q\", \"\?P \ ?Q; ?P\ \ ?Q\"]) raised"} - then the function raises an exception. The function @{ML RSN} is similar to @{ML RS}, but + then the function raises an exception. The function @{ML [index] RSN} is similar to @{ML RS}, but takes an additional number as argument that makes explicit which premise should be instantiated. @@ -490,14 +499,14 @@ "no_vars @{context} (@{thm disjI1} RS @{thm conjI})" "\P; Q\ \ (P \ Qa) \ Q"} If you want to instantiate more than one premise of a theorem, you can use - the function @{ML MRS}: + the function @{ML [index] MRS}: @{ML_response_fake [display,gray] "no_vars @{context} ([@{thm disjI1}, @{thm disjI2}] MRS @{thm conjI})" "\P; Q\ \ (P \ Qa) \ (Pa \ Q)"} If you need to instantiate lists of theorems, you can use the - functions @{ML RL} and @{ML MRL}. For example in the code below, + functions @{ML RL} and @{ML [index] MRL}. For example in the code below, every theorem in the second list is instantiated with every theorem in the first. @@ -516,7 +525,7 @@ Often proofs on the ML-level involve elaborate operations on assumptions and @{text "\"}-quantified variables. To do such operations using the basic tactics shown so far is very unwieldy and brittle. Some convenience and - safety is provided by the tactic @{ML SUBPROOF}. This tactic fixes the parameters + safety is provided by @{ML [index] SUBPROOF}. This tactic fixes the parameters and binds the various components of a goal state to a record. To see what happens, assume the function defined in Figure~\ref{fig:sptac}, which takes a record and just prints out the content of this record (using the @@ -531,17 +540,17 @@ *} ML{*fun sp_tac {prems, params, asms, concl, context, schematics} = let - val str_of_params = str_of_cterms context params - val str_of_asms = str_of_cterms context asms - val str_of_concl = str_of_cterm context concl - val str_of_prems = str_of_thms_no_vars context prems - val str_of_schms = str_of_cterms context (snd schematics) + val string_of_params = string_of_cterms context params + val string_of_asms = string_of_cterms context asms + val string_of_concl = string_of_cterm context concl + val string_of_prems = string_of_thms_no_vars context prems + val string_of_schms = string_of_cterms context (snd schematics) - val _ = (warning ("params: " ^ str_of_params); - warning ("schematics: " ^ str_of_schms); - warning ("assumptions: " ^ str_of_asms); - warning ("conclusion: " ^ str_of_concl); - warning ("premises: " ^ str_of_prems)) + val _ = (writeln ("params: " ^ string_of_params); + writeln ("schematics: " ^ string_of_schms); + writeln ("assumptions: " ^ string_of_asms); + writeln ("conclusion: " ^ string_of_concl); + writeln ("premises: " ^ string_of_prems)) in no_tac end*} @@ -579,11 +588,11 @@ @{text asms}, but also as @{ML_type thm} to @{text prems}. Notice also that we had to append @{text [quotes] "?"} to the - \isacommand{apply}-command. The reason is that @{ML SUBPROOF} normally + \isacommand{apply}-command. The reason is that @{ML [index] SUBPROOF} normally expects that the subgoal is solved completely. Since in the function @{ML sp_tac} we returned the tactic @{ML no_tac}, the subproof obviously fails. The question-mark allows us to recover from this failure in a - graceful manner so that the warning messages are not overwritten by an + graceful manner so that the output messages are not overwritten by an ``empty sequence'' error message. If we continue the proof script by applying the @{text impI}-rule @@ -629,20 +638,19 @@ (*<*)oops(*>*) text {* - The restriction in this tactic which is not present in @{ML atac} is - that it cannot instantiate any - schematic variable. This might be seen as a defect, but it is actually - an advantage in the situations for which @{ML SUBPROOF} was designed: - the reason is that, as mentioned before, instantiation of schematic variables can affect - several goals and can render them unprovable. @{ML SUBPROOF} is meant - to avoid this. + The restriction in this tactic which is not present in @{ML atac} is that it + cannot instantiate any schematic variables. This might be seen as a defect, + but it is actually an advantage in the situations for which @{ML SUBPROOF} + was designed: the reason is that, as mentioned before, instantiation of + schematic variables can affect several goals and can render them + unprovable. @{ML SUBPROOF} is meant to avoid this. - Notice that @{ML atac'} inside @{ML SUBPROOF} calls @{ML resolve_tac} with - the subgoal number @{text "1"} and also the outer call to @{ML SUBPROOF} in - the \isacommand{apply}-step uses @{text "1"}. This is another advantage - of @{ML SUBPROOF}: the addressing inside it is completely - local to the tactic inside the subproof. It is therefore possible to also apply - @{ML atac'} to the second goal by just writing: + Notice that @{ML atac'} inside @{ML SUBPROOF} calls @{ML resolve_tac} with + the subgoal number @{text "1"} and also the outer call to @{ML SUBPROOF} in + the \isacommand{apply}-step uses @{text "1"}. This is another advantage of + @{ML SUBPROOF}: the addressing inside it is completely local to the tactic + inside the subproof. It is therefore possible to also apply @{ML atac'} to + the second goal by just writing: *} lemma shows "True" and "\B x y; A x y; C x y\ \ A x y" @@ -657,8 +665,8 @@ also described in \isccite{sec:results}. \end{readmore} - Similar but less powerful functions than @{ML SUBPROOF} are @{ML SUBGOAL} - and @{ML CSUBGOAL}. They allow you to inspect a given subgoal (the former + Similar but less powerful functions than @{ML SUBPROOF} are @{ML [index] SUBGOAL} + and @{ML [index] CSUBGOAL}. They allow you to inspect a given subgoal (the former presents the subgoal as a @{ML_type term}, while the latter as a @{ML_type cterm}). With this you can implement a tactic that applies a rule according to the topmost logic connective in the subgoal (to illustrate this we only @@ -688,7 +696,7 @@ quantified variable. So you really have to construct the pattern using the basic term-constructors. This is not necessary in other cases, because their type is always fixed to function types involving only the type @{typ - bool}. (See Section \ref{sec:terms_types_manually} about constructing terms + bool}. (See Section \ref{sec:terms_manually} about constructing terms manually.) For the catch-all pattern, we chose to just return @{ML all_tac}. Consequently, @{ML select_tac} never fails. @@ -728,7 +736,7 @@ Of course, this example is contrived: there are much simpler methods available in Isabelle for - implementing a proof procedure analysing a goal according to its topmost + implementing a tactic analysing a goal according to its topmost connective. These simpler methods use tactic combinators, which we will explain in the next section. @@ -769,7 +777,7 @@ in what follows we will usually prefer it over the ``unprimed'' one. If there is a list of tactics that should all be tried out in - sequence, you can use the combinator @{ML EVERY'}. For example + sequence, you can use the combinator @{ML [index] EVERY'}. For example the function @{ML foo_tac'} from page~\pageref{tac:footacprime} can also be written as: *} @@ -791,19 +799,19 @@ text {* and call @{ML foo_tac1}. - With the combinators @{ML THEN'}, @{ML EVERY'} and @{ML EVERY1} it must be + With the combinators @{ML THEN'}, @{ML EVERY'} and @{ML [index] EVERY1} it must be guaranteed that all component tactics successfully apply; otherwise the whole tactic will fail. If you rather want to try out a number of tactics, - then you can use the combinator @{ML ORELSE'} for two tactics, and @{ML - FIRST'} (or @{ML FIRST1}) for a list of tactics. For example, the tactic + then you can use the combinator @{ML [index] ORELSE'} for two tactics, and @{ML + [index] FIRST'} (or @{ML [index] FIRST1}) for a list of tactics. For example, the tactic *} ML{*val orelse_xmp_tac = rtac @{thm disjI1} ORELSE' rtac @{thm conjI}*} text {* - will first try out whether rule @{text disjI} applies and after that - @{text conjI}. To see this consider the proof + will first try out whether rule @{text disjI} applies and in case of failure + will try @{text conjI}. To see this consider the proof *} lemma shows "True \ False" and "Foo \ Bar" @@ -847,7 +855,7 @@ text {* Since such repeated applications of a tactic to the reverse order of \emph{all} subgoals is quite common, there is the tactic combinator - @{ML ALLGOALS} that simplifies this. Using this combinator you can simply + @{ML [index] ALLGOALS} that simplifies this. Using this combinator you can simply write: *} lemma shows "A \ B" and "A \ B \C" and "\x. D x" and "E \ F" @@ -859,8 +867,21 @@ text {* Remember that we chose to implement @{ML select_tac'} so that it - always succeeds. This can be potentially very confusing for the user, - for example, in cases where the goal is the form + always succeeds by adding @{ML all_tac} at the end of the tactic + list. The same can be achieved with the tactic combinator @{ML [index] TRY}. + For example: +*} + +ML{*val select_tac'' = TRY o FIRST' [rtac @{thm conjI}, rtac @{thm impI}, + rtac @{thm notI}, rtac @{thm allI}]*} +text_raw{*\label{tac:selectprimeprime}*} + +text {* + This tactic behaves in the same way as @{ML select_tac'}: it tries out + one of the given tactics and if none applies leaves the goal state + unchanged. This, however, can be potentially very confusing when visible to + the user, for example, in cases where the goal is the form + *} lemma shows "E \ F" @@ -871,7 +892,8 @@ (*<*)oops(*>*) text {* - In this case no rule applies. The problem for the user is that there is little + In this case no rule applies, but because of @{ML TRY} or the inclusion of @{ML all_tac} + the tactics do not fail. The problem with this is that for the user there is little chance to see whether or not progress in the proof has been made. By convention therefore, tactics visible to the user should either change something or fail. @@ -879,7 +901,7 @@ from the end of the theorem list. As a result @{ML select_tac'} would only succeed on goals where it can make progress. But for the sake of argument, let us suppose that this deletion is \emph{not} an option. In such cases, you can - use the combinator @{ML CHANGED} to make sure the subgoal has been changed + use the combinator @{ML [index] CHANGED} to make sure the subgoal has been changed by the tactic. Because now *} @@ -897,7 +919,7 @@ text {* We can further extend @{ML select_tac'} so that it not just applies to the topmost connective, but also to the ones immediately ``underneath'', i.e.~analyse the goal - completely. For this you can use the tactic combinator @{ML REPEAT}. As an example + completely. For this you can use the tactic combinator @{ML [index] REPEAT}. As an example suppose the following tactic *} @@ -918,11 +940,11 @@ Here it is crucial that @{ML select_tac'} is prefixed with @{ML CHANGED}, because otherwise @{ML REPEAT} runs into an infinite loop (it applies the tactic as long as it succeeds). The function - @{ML REPEAT1} is similar, but runs the tactic at least once (failing if + @{ML [index] REPEAT1} is similar, but runs the tactic at least once (failing if this is not possible). If you are after the ``primed'' version of @{ML repeat_xmp_tac}, then you - need to implement it as + can implement it as *} ML{*val repeat_xmp_tac' = REPEAT o CHANGED o select_tac'*} @@ -930,11 +952,11 @@ text {* since there are no ``primed'' versions of @{ML REPEAT} and @{ML CHANGED}. - If you look closely at the goal state above, the tactics @{ML repeat_xmp_tac} + If you look closely at the goal state above, then you see the tactics @{ML repeat_xmp_tac} and @{ML repeat_xmp_tac'} are not yet quite what we are after: the problem is that goals 2 and 3 are not analysed. This is because the tactic is applied repeatedly only to the first subgoal. To analyse also all - resulting subgoals, you can use the tactic combinator @{ML REPEAT_ALL_NEW}. + resulting subgoals, you can use the tactic combinator @{ML [index] REPEAT_ALL_NEW}. Suppose the tactic *} @@ -987,7 +1009,7 @@ Sometimes this leads to confusing behaviour of tactics and also has the potential to explode the search space for tactics. These problems can be avoided by prefixing the tactic with the tactic - combinator @{ML DETERM}. + combinator @{ML [index] DETERM}. *} lemma "\P1 \ Q1; P2 \ Q2\ \ R" @@ -1024,7 +1046,7 @@ text {* then the tactic @{ML select_tac''} will be tried out and any failure is harnessed. We again have to use the construction with \mbox{@{text "TRY o ..."}} since there is - no primed version of @{ML TRY}. The tactic combinator @{ML TRYALL} will try out + no primed version of @{ML [index] TRY}. The tactic combinator @{ML [index] TRYALL} will try out a tactic on all subgoals. For example the tactic *} @@ -1033,7 +1055,7 @@ text {* will solve all trivial subgoals involving @{term True} or @{term "False"}. - (FIXME: say something about @{ML COND} and COND') + (FIXME: say something about @{ML [index] COND} and COND') \begin{readmore} Most tactic combinators described in this section are defined in @{ML_file "Pure/tctical.ML"}. @@ -1059,11 +1081,11 @@ \begin{isabelle} \begin{center} \begin{tabular}{l@ {\hspace{2cm}}l} - @{ML simp_tac} & @{text "(simp (no_asm))"} \\ - @{ML asm_simp_tac} & @{text "(simp (no_asm_simp))"} \\ - @{ML full_simp_tac} & @{text "(simp (no_asm_use))"} \\ - @{ML asm_lr_simp_tac} & @{text "(simp (asm_lr))"} \\ - @{ML asm_full_simp_tac} & @{text "(simp)"} + @{ML [index] simp_tac} & @{text "(simp (no_asm))"} \\ + @{ML [index] asm_simp_tac} & @{text "(simp (no_asm_simp))"} \\ + @{ML [index] full_simp_tac} & @{text "(simp (no_asm_use))"} \\ + @{ML [index] asm_lr_simp_tac} & @{text "(simp (asm_lr))"} \\ + @{ML [index] asm_full_simp_tac} & @{text "(simp)"} \end{tabular} \end{center} \end{isabelle} @@ -1110,7 +1132,7 @@ \end{center} \end{isabelle} - with @{text "constr"} being a term-constructor, like @{const "If"} or @{const "Let"}. + with @{text "constr"} being a constant, like @{const "If"} or @{const "Let"}. Every simpset contains only one congruence rule for each term-constructor, which however can be overwritten. The user can declare lemmas to be congruence rules using the @@ -1131,7 +1153,7 @@ \begin{readmore} For more information about the simplifier see @{ML_file "Pure/meta_simplifier.ML"} and @{ML_file "Pure/simplifier.ML"}. The simplifier for HOL is set up in - @{ML_file "HOL/Tools/simpdata.ML"}. Generic splitters are implemented in + @{ML_file "HOL/Tools/simpdata.ML"}. The generic splitter is implemented in @{ML_file "Provers/splitter.ML"}. \end{readmore} @@ -1144,9 +1166,9 @@ \begin{isabelle} \begin{tabular}{ll} - @{ML addsimps} & @{ML delsimps}\\ - @{ML addcongs} & @{ML delcongs}\\ - @{ML addsimprocs} & @{ML delsimprocs}\\ + @{ML [index] addsimps} & @{ML [index] delsimps}\\ + @{ML [index] addcongs} & @{ML [index] delcongs}\\ + @{ML [index] addsimprocs} & @{ML [index] delsimprocs}\\ \end{tabular} \end{isabelle} @@ -1159,25 +1181,24 @@ \begin{isabelle}*} ML{*fun print_ss ctxt ss = let - val {simps, congs, procs, ...} = MetaSimplifier.dest_ss ss + val {simps, congs, procs, ...} = Simplifier.dest_ss ss fun name_thm (nm, thm) = - " " ^ nm ^ ": " ^ (str_of_thm_no_vars ctxt thm) + " " ^ nm ^ ": " ^ (string_of_thm_no_vars ctxt thm) fun name_ctrm (nm, ctrm) = - " " ^ nm ^ ": " ^ (str_of_cterms ctxt ctrm) + " " ^ nm ^ ": " ^ (string_of_cterms ctxt ctrm) - val s = ["Simplification rules:"] @ (map name_thm simps) @ - ["Congruences rules:"] @ (map name_thm congs) @ - ["Simproc patterns:"] @ (map name_ctrm procs) + val s = ["Simplification rules:"] @ map name_thm simps @ + ["Congruences rules:"] @ map name_thm congs @ + ["Simproc patterns:"] @ map name_ctrm procs in - s |> separate "\n" - |> implode - |> warning + s |> cat_lines + |> writeln end*} text_raw {* \end{isabelle} \end{minipage} -\caption{The function @{ML MetaSimplifier.dest_ss} returns a record containing +\caption{The function @{ML [index] dest_ss in Simplifier} returns a record containing all printable information stored in a simpset. We are here only interested in the simplification rules, congruence rules and simprocs.\label{fig:printss}} \end{figure} *} @@ -1185,7 +1206,7 @@ text {* To see how they work, consider the function in Figure~\ref{fig:printss}, which prints out some parts of a simpset. If you use it to print out the components of the - empty simpset, i.e., @{ML empty_ss} + empty simpset, i.e., @{ML [index] empty_ss} @{ML_response_fake [display,gray] "print_ss @{context} empty_ss" @@ -1232,7 +1253,7 @@ expects this form of the simplification and congruence rules. However, even when adding these lemmas to @{ML empty_ss} we do not end up with anything useful yet. - In the context of HOL, the first really useful simpset is @{ML HOL_basic_ss}. While + In the context of HOL, the first really useful simpset is @{ML [index] HOL_basic_ss}. While printing out the components of this simpset @{ML_response_fake [display,gray] @@ -1259,9 +1280,9 @@ text {* This behaviour is not because of simplification rules, but how the subgoaler, solver - and looper are set up in @{ML HOL_basic_ss}. + and looper are set up in @{ML [index] HOL_basic_ss}. - The simpset @{ML HOL_ss} is an extension of @{ML HOL_basic_ss} containing + The simpset @{ML [index] HOL_ss} is an extension of @{ML HOL_basic_ss} containing already many useful simplification and congruence rules for the logical connectives in HOL. @@ -1281,7 +1302,8 @@ The simplifier is often used to unfold definitions in a proof. For this the - simplifier contains the @{ML rewrite_goals_tac}. Suppose for example the + simplifier implements the tactic @{ML [index] rewrite_goals_tac}.\footnote{FIXME: + see LocalDefs infrastructure.} Suppose for example the definition *} @@ -1475,6 +1497,10 @@ (FIXME: @{ML ObjectLogic.full_atomize_tac}, @{ML ObjectLogic.rulify_tac}) + (FIXME: what are @{ML mksimps_pairs}; used in Nominal.thy) + + (FIXME: explain @{ML simplify} and @{ML "Simplifier.rewrite_rule"} etc.) + *} section {* Simprocs *} @@ -1491,10 +1517,10 @@ you can use the function: *} -ML %linenosgray{*fun fail_sp_aux simpset redex = +ML %linenosgray{*fun fail_simproc simpset redex = let val ctxt = Simplifier.the_context simpset - val _ = warning ("The redex: " ^ (str_of_cterm ctxt redex)) + val _ = writeln ("The redex: " ^ (string_of_cterm ctxt redex)) in NONE end*} @@ -1509,7 +1535,7 @@ done by adding the simproc to the current simpset as follows *} -simproc_setup %gray fail_sp ("Suc n") = {* K fail_sp_aux *} +simproc_setup %gray fail ("Suc n") = {* K fail_simproc *} text {* where the second argument specifies the pattern and the right-hand side @@ -1539,16 +1565,16 @@ with the declaration *} -declare [[simproc del: fail_sp]] +declare [[simproc del: fail]] text {* If you want to see what happens with just \emph{this} simproc, without any - interference from other rewrite rules, you can call @{text fail_sp} + interference from other rewrite rules, you can call @{text fail} as follows: *} lemma shows "Suc 0 = 1" -apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [@{simproc fail_sp}]) 1*}) +apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [@{simproc fail}]) 1*}) (*<*)oops(*>*) text {* @@ -1558,14 +1584,14 @@ Setting up a simproc using the command \isacommand{simproc\_setup} will always add automatically the simproc to the current simpset. If you do not want this, then you have to use a slightly different method for setting - up the simproc. First the function @{ML fail_sp_aux} needs to be modified + up the simproc. First the function @{ML fail_simproc} needs to be modified to *} -ML{*fun fail_sp_aux' simpset redex = +ML{*fun fail_simproc' simpset redex = let val ctxt = Simplifier.the_context simpset - val _ = warning ("The redex: " ^ (Syntax.string_of_term ctxt redex)) + val _ = writeln ("The redex: " ^ (Syntax.string_of_term ctxt redex)) in NONE end*} @@ -1578,19 +1604,19 @@ *} -ML{*val fail_sp' = +ML{*val fail' = let val thy = @{theory} val pat = [@{term "Suc n"}] in - Simplifier.simproc_i thy "fail_sp'" pat (K fail_sp_aux') + Simplifier.simproc_i thy "fail_simproc'" pat (K fail_simproc') end*} text {* Here the pattern is given as @{ML_type term} (instead of @{ML_type cterm}). The function also takes a list of patterns that can trigger the simproc. Now the simproc is set up and can be explicitly added using - @{ML addsimprocs} to a simpset whenever + @{ML [index] addsimprocs} to a simpset whenever needed. Simprocs are applied from inside to outside and from left to right. You can @@ -1598,11 +1624,11 @@ *} lemma shows "Suc (Suc 0) = (Suc 1)" -apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [fail_sp']) 1*}) +apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [fail']) 1*}) (*<*)oops(*>*) text {* - The simproc @{ML fail_sp'} prints out the sequence + The simproc @{ML fail'} prints out the sequence @{text [display] "> Suc 0 @@ -1625,7 +1651,7 @@ *} -ML{*fun plus_one_sp_aux ss redex = +ML{*fun plus_one_simproc ss redex = case redex of @{term "Suc 0"} => NONE | _ => SOME @{thm plus_one}*} @@ -1634,12 +1660,12 @@ and set up the simproc as follows. *} -ML{*val plus_one_sp = +ML{*val plus_one = let val thy = @{theory} val pat = [@{term "Suc n"}] in - Simplifier.simproc_i thy "sproc +1" pat (K plus_one_sp_aux) + Simplifier.simproc_i thy "sproc +1" pat (K plus_one_simproc) end*} text {* @@ -1650,7 +1676,7 @@ *} lemma shows "P (Suc (Suc (Suc 0))) (Suc 0)" -apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [plus_one_sp]) 1*}) +apply(tactic {* simp_tac (HOL_basic_ss addsimprocs [plus_one]) 1*}) txt{* where the simproc produces the goal state @@ -1662,8 +1688,8 @@ text {* As usual with rewriting you have to worry about looping: you already have - a loop with @{ML plus_one_sp}, if you apply it with the default simpset (because - the default simpset contains a rule which just does the opposite of @{ML plus_one_sp}, + a loop with @{ML plus_one}, if you apply it with the default simpset (because + the default simpset contains a rule which just does the opposite of @{ML plus_one}, namely rewriting @{text [quotes] "+ 1"} to a successor). So you have to be careful in choosing the right simpset to which you add a simproc. @@ -1676,7 +1702,7 @@ | dest_suc_trm t = snd (HOLogic.dest_number t)*} text {* - It uses the library function @{ML dest_number in HOLogic} that transforms + It uses the library function @{ML [index] dest_number in HOLogic} that transforms (Isabelle) terms, like @{term "0::nat"}, @{term "1::nat"}, @{term "2::nat"} and so on, into integer values. This function raises the exception @{ML TERM}, if the term is not a number. The next function expects a pair consisting of a term @@ -1727,7 +1753,7 @@ theorem for the simproc. *} -ML{*fun nat_number_sp_aux ss t = +ML{*fun nat_number_simproc ss t = let val ctxt = Simplifier.the_context ss in @@ -1736,18 +1762,18 @@ end*} text {* - This function uses the fact that @{ML dest_suc_trm} might throw an exception + This function uses the fact that @{ML dest_suc_trm} might raise an exception @{ML TERM}. In this case there is nothing that can be rewritten and therefore no theorem is produced (i.e.~the function returns @{ML NONE}). To try out the simproc on an example, you can set it up as follows: *} -ML{*val nat_number_sp = +ML{*val nat_number = let val thy = @{theory} val pat = [@{term "Suc n"}] in - Simplifier.simproc_i thy "nat_number" pat (K nat_number_sp_aux) + Simplifier.simproc_i thy "nat_number" pat (K nat_number_simproc) end*} text {* @@ -1755,7 +1781,7 @@ *} lemma "P (Suc (Suc 2)) (Suc 99) (0::nat) (Suc 4 + Suc 0) (Suc (0 + 0))" -apply(tactic {* simp_tac (HOL_ss addsimprocs [nat_number_sp]) 1*}) +apply(tactic {* simp_tac (HOL_ss addsimprocs [nat_number]) 1*}) txt {* you obtain the more legible goal state @@ -1796,21 +1822,21 @@ text {* whereby the produced theorem is always a meta-equality. A simple conversion - is the function @{ML "Conv.all_conv"}, which maps a @{ML_type cterm} to an + is the function @{ML [index] all_conv in Conv}, which maps a @{ML_type cterm} to an instance of the (meta)reflexivity theorem. For example: @{ML_response_fake [display,gray] "Conv.all_conv @{cterm \"Foo \ Bar\"}" "Foo \ Bar \ Foo \ Bar"} - Another simple conversion is @{ML Conv.no_conv} which always raises the + Another simple conversion is @{ML [index] no_conv in Conv} which always raises the exception @{ML CTERM}. @{ML_response_fake [display,gray] "Conv.no_conv @{cterm True}" "*** Exception- CTERM (\"no conversion\", []) raised"} - A more interesting conversion is the function @{ML "Thm.beta_conversion"}: it + A more interesting conversion is the function @{ML [index] beta_conversion in Thm}: it produces a meta-equation between a term and its beta-normal form. For example @{ML_response_fake [display,gray] @@ -1826,7 +1852,7 @@ Note that the actual response in this example is @{term "2 + 10 \ 2 + (10::nat)"}, since the pretty-printer for @{ML_type cterm}s eta-normalises terms. But how we constructed the term (using the function - @{ML Thm.capply}, which is the application @{ML $} for @{ML_type cterm}s) + @{ML [index] capply in Thm}, which is the application @{ML $} for @{ML_type cterm}s) ensures that the left-hand side must contain beta-redexes. Indeed if we obtain the ``raw'' representation of the produced theorem, we can see the difference: @@ -1838,14 +1864,14 @@ val ten = @{cterm \"10::nat\"} val thm = Thm.beta_conversion true (Thm.capply (Thm.capply add two) ten) in - #prop (rep_thm thm) + Thm.prop_of thm end" "Const (\"==\",\) $ (Abs (\"x\",\,Abs (\"y\",\,\)) $\$\) $ (Const (\"HOL.plus_class.plus\",\) $ \ $ \)"} The argument @{ML true} in @{ML Thm.beta_conversion} indicates that - the right-hand side will be fully beta-normalised. If instead + the right-hand side should be fully beta-normalised. If instead @{ML false} is given, then only a single beta-reduction is performed on the outer-most level. For example @@ -1880,15 +1906,15 @@ end" "True \ (Foo \ Bar) \ Foo \ Bar"} - Note, however, that the function @{ML Conv.rewr_conv} only rewrites the + Note, however, that the function @{ML [index] rewr_conv in Conv} only rewrites the outer-most level of the @{ML_type cterm}. If the given @{ML_type cterm} does not match exactly the - left-hand side of the theorem, then @{ML Conv.rewr_conv} raises + left-hand side of the theorem, then @{ML [index] rewr_conv in Conv} fails by raising the exception @{ML CTERM}. This very primitive way of rewriting can be made more powerful by combining several conversions into one. For this you can use conversion - combinators. The simplest conversion combinator is @{ML then_conv}, + combinators. The simplest conversion combinator is @{ML [index] then_conv}, which applies one conversion after another. For example @{ML_response_fake [display,gray] @@ -1905,7 +1931,7 @@ @{thm [source] true_conj1}. (Recall the problem with the pretty-printer normalising all terms.) - The conversion combinator @{ML else_conv} tries out the + The conversion combinator @{ML [index] else_conv} tries out the first one, and if it does not apply, tries the second. For example @{ML_response_fake [display,gray] @@ -1920,10 +1946,10 @@ Here the conversion of @{thm [source] true_conj1} only applies in the first case, but fails in the second. The whole conversion - does not fail, however, because the combinator @{ML Conv.else_conv} will then - try out @{ML Conv.all_conv}, which always succeeds. + does not fail, however, because the combinator @{ML else_conv in Conv} will then + try out @{ML all_conv in Conv}, which always succeeds. - The conversion combinator @{ML Conv.try_conv} constructs a conversion + The conversion combinator @{ML [index] try_conv in Conv} constructs a conversion which is tried out on a term, but in case of failure just does nothing. For example @@ -1934,7 +1960,7 @@ Apart from the function @{ML beta_conversion in Thm}, which is able to fully beta-normalise a term, the conversions so far are restricted in that they only apply to the outer-most level of a @{ML_type cterm}. In what follows we - will lift this restriction. The combinator @{ML Conv.arg_conv} will apply + will lift this restriction. The combinator @{ML [index] arg_conv in Conv} will apply the conversion to the first argument of an application, that is the term must be of the form @{ML "t1 $ t2" for t1 t2} and the conversion is applied to @{text t2}. For example @@ -1951,23 +1977,23 @@ The reason for this behaviour is that @{text "(op \)"} expects two arguments. Therefore the term must be of the form @{text "(Const \ $ t1) $ t2"}. The conversion is then applied to @{text "t2"} which in the example above - stands for @{term "True \ Q"}. The function @{ML Conv.fun_conv} applies + stands for @{term "True \ Q"}. The function @{ML [index] fun_conv in Conv} applies the conversion to the first argument of an application. - The function @{ML Conv.abs_conv} applies a conversion under an abstractions. + The function @{ML [index] abs_conv in Conv} applies a conversion under an abstraction. For example: @{ML_response_fake [display,gray] "let - val conv = K (Conv.rewr_conv @{thm true_conj1}) + val conv = Conv.rewr_conv @{thm true_conj1} val ctrm = @{cterm \"\P. True \ P \ Foo\"} in - Conv.abs_conv conv @{context} ctrm + Conv.abs_conv (K conv) @{context} ctrm end" "\P. True \ P \ Foo \ \P. P \ Foo"} Note that this conversion needs a context as an argument. The conversion that - goes under an application is @{ML Conv.combination_conv}. It expects two conversions + goes under an application is @{ML [index] combination_conv in Conv}. It expects two conversions as arguments, each of which is applied to the corresponding ``branch'' of the application. @@ -2035,7 +2061,7 @@ text {* So far we only applied conversions to @{ML_type cterm}s. Conversions can, however, - also work on theorems using the function @{ML "Conv.fconv_rule"}. As an example, + also work on theorems using the function @{ML [index] fconv_rule in Conv}. As an example, consider the conversion @{ML all_true1_conv} and the lemma: *} @@ -2050,12 +2076,12 @@ "?P \ \ ?P"} Finally, conversions can also be turned into tactics and then applied to - goal states. This can be done with the help of the function @{ML CONVERSION}, + goal states. This can be done with the help of the function @{ML [index] CONVERSION}, and also some predefined conversion combinators that traverse a goal - state. The combinators for the goal state are: @{ML Conv.params_conv} for + state. The combinators for the goal state are: @{ML [index] params_conv in Conv} for converting under parameters (i.e.~where goals are of the form @{text "\x. P \ - Q"}); the function @{ML Conv.prems_conv} for applying a conversion to all - premises of a goal, and @{ML Conv.concl_conv} for applying a conversion to + Q"}); the function @{ML [index] prems_conv in Conv} for applying a conversion to all + premises of a goal, and @{ML [index] concl_conv in Conv} for applying a conversion to the conclusion of a goal. Assume we want to apply @{ML all_true1_conv} only in the conclusion @@ -2063,11 +2089,11 @@ Here is a tactic doing exactly that: *} -ML{*fun true1_tac ctxt = CSUBGOAL (fn (goal, i) => +ML{*fun true1_tac ctxt = CONVERSION (Conv.params_conv ~1 (fn ctxt => (Conv.prems_conv ~1 (if_true1_conv ctxt) then_conv - Conv.concl_conv ~1 (all_true1_conv ctxt))) ctxt) i)*} + Conv.concl_conv ~1 (all_true1_conv ctxt))) ctxt)*} text {* We call the conversions with the argument @{ML "~1"}. This is to @@ -2092,9 +2118,10 @@ As you can see, the premises are rewritten according to @{ML if_true1_conv}, while the conclusion according to @{ML all_true1_conv}. - To sum up this section, conversions are not as powerful as the simplifier - and simprocs; the advantage of conversions, however, is that you never have - to worry about non-termination. + To sum up this section, conversions are more general than the simplifier + or simprocs, but you have to do more work yourself. Also conversions are + often much less efficient than the simplifier. The advantage of conversions, + however, that they provide much less room for non-termination. \begin{exercise}\label{ex:addconversion} Write a tactic that does the same as the simproc in exercise @@ -2112,7 +2139,7 @@ \begin{readmore} See @{ML_file "Pure/conv.ML"} for more information about conversion combinators. - Further conversions are defined in @{ML_file "Pure/thm.ML"}, + Some basic conversions are defined in @{ML_file "Pure/thm.ML"}, @{ML_file "Pure/drule.ML"} and @{ML_file "Pure/meta_simplifier.ML"}. \end{readmore} diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/antiquote_setup.ML --- a/ProgTutorial/antiquote_setup.ML Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/antiquote_setup.ML Tue Jul 21 12:49:26 2009 +0200 @@ -1,23 +1,26 @@ (* Auxiliary antiquotations for the tutorial. *) -structure AntiquoteSetup: sig end = +structure AntiquoteSetup = struct +open OutputTutorial + (* functions for generating appropriate expressions *) -fun ml_val_open (ys, xs) txt = - let fun ml_val_open_aux ys txt = +fun ml_val_open ys istruc txt = +let + fun ml_val_open_aux ys txt = "fn " ^ (case ys of [] => "_" | _ => enclose "(" ")" (commas ys)) ^ " => (" ^ txt ^ ")"; - in - (case xs of - [] => ml_val_open_aux ys txt - | _ => ml_val_open_aux ys ("let open " ^ (space_implode " " xs) ^ " in " ^ txt ^ " end")) - end; +in + (case istruc of + NONE => ml_val_open_aux ys txt + | SOME st => ml_val_open_aux ys ("let open " ^ st ^ " in " ^ txt ^ " end")) +end -fun ml_val txt = ml_val_open ([],[]) txt; +fun ml_val txt = ml_val_open [] NONE txt; fun ml_pat (lhs, pat) = let - val pat' = implode (map (fn "\\" => "_" | s => s) (Symbol.explode pat)) + val pat' = implode (map (fn "\" => "_" | s => s) (Symbol.explode pat)) in "val " ^ pat' ^ " = " ^ lhs end; fun ml_struct txt = "functor DUMMY_FUNCTOR() = struct structure DUMMY = " ^ txt ^ " end"; @@ -28,8 +31,8 @@ ML_Context.eval_in (SOME ctxt) false Position.none exp (* string functions *) -fun string_explode str txt = - map (fn s => str ^ s) (space_explode "\n" txt) +fun string_explode prefix_str txt = + map (fn s => prefix_str ^ s) (split_lines txt) val transform_cmts_str = Source.of_string @@ -37,45 +40,49 @@ #> Source.exhaust #> Chunks.transform_cmts #> implode - #> string_explode ""; - -(* output function *) -fun output_fn txt = Chunks.output (map Pretty.str txt) + #> string_explode "" -(* checks and prints open expressions *) -fun output_ml {context = ctxt, ...} (txt, ovars) = - (eval_fn ctxt (ml_val_open ovars txt); - output_fn (transform_cmts_str txt)) +(* checks and prints open expressions, calculates index entry *) +fun output_ml {context = ctxt, ...} (txt, (ovars, istruc)) = + (eval_fn ctxt (ml_val_open ovars istruc txt); + case (istruc, Long_Name.base_name txt, Long_Name.qualifier txt) of + (NONE, bn, "") => output_indexed (transform_cmts_str txt) {main = Code txt, minor = NoString} + | (NONE, bn, qn) => output_indexed (transform_cmts_str txt) {main = Code bn, minor = IStruc qn} + | (SOME st, _, _) => output_indexed (transform_cmts_str txt) {main = Code txt, minor = IStruc st}) val parser_ml = Scan.lift (Args.name -- (Scan.optional (Args.$$$ "for" |-- OuterParse.!!! (Scan.repeat1 Args.name)) [] -- - Scan.optional (Args.$$$ "in" |-- OuterParse.!!! (Scan.repeat1 Args.name)) [])) + Scan.option (Args.$$$ "in" |-- OuterParse.!!! Args.name))) (* checks and prints types and structures *) -fun output_exp ml {context = ctxt, ...} txt = - (eval_fn ctxt (ml txt); - output_fn (string_explode "" txt)) +fun output_struct {context = ctxt, ...} txt = + (eval_fn ctxt (ml_struct txt); + output_indexed (string_explode "" txt) {main = Code txt, minor = Plain "structure"}) + +fun output_type {context = ctxt, ...} txt = + (eval_fn ctxt (ml_type txt); + output_indexed (string_explode "" txt) {main = Code txt, minor = Plain "type"}) (* checks and expression agains a result pattern *) fun output_response {context = ctxt, ...} (lhs, pat) = (eval_fn ctxt (ml_pat (lhs, pat)); - output_fn ((string_explode "" lhs) @ (string_explode "> " pat))) + output ((string_explode "" lhs) @ (string_explode "> " pat))) (* checks the expressions, but does not check it against a result pattern *) fun output_response_fake {context = ctxt, ...} (lhs, pat) = (eval_fn ctxt (ml_val lhs); - output_fn ((string_explode "" lhs) @ (string_explode "> " pat))) + output ((string_explode "" lhs) @ (string_explode "> " pat))) (* checks the expressions, but does not check it against a result pattern *) fun ouput_response_fake_both _ (lhs, pat) = - output_fn ((string_explode "" lhs) @ (string_explode "> " pat)) + output ((string_explode "" lhs) @ (string_explode "> " pat)) val single_arg = Scan.lift (Args.name) val two_args = Scan.lift (Args.name -- Args.name) val _ = ThyOutput.antiquotation "ML" parser_ml output_ml -val _ = ThyOutput.antiquotation "ML_type" single_arg (output_exp ml_type) -val _ = ThyOutput.antiquotation "ML_struct" single_arg (output_exp ml_struct) +val _ = ThyOutput.antiquotation "ML_type" single_arg output_type +val _ = ThyOutput.antiquotation "ML_struct" single_arg output_struct val _ = ThyOutput.antiquotation "ML_response" two_args output_response val _ = ThyOutput.antiquotation "ML_response_fake" two_args output_response_fake val _ = ThyOutput.antiquotation "ML_response_fake_both" two_args ouput_response_fake_both @@ -91,7 +98,7 @@ (* checks whether a file exists in the Isabelle distribution *) fun check_file_exists _ txt = (if File.exists (Path.append (Path.explode ("~~/src")) (Path.explode txt)) - then output_fn [href_link txt] + then output [href_link txt] else error ("Source file " ^ (quote txt) ^ " does not exist.")) val _ = ThyOutput.antiquotation "ML_file" single_arg check_file_exists diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/chunks.ML --- a/ProgTutorial/chunks.ML Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/chunks.ML Tue Jul 21 12:49:26 2009 +0200 @@ -2,39 +2,6 @@ structure Chunks = struct -(* rebuilding the output function from ThyOutput in order to *) -(* enable the options [gray, linenos] *) - -val gray = ref false; -val linenos = ref false - -fun output prts = - prts - |> (if ! ThyOutput.quotes then map Pretty.quote else I) - |> (if ! ThyOutput.display then - map (Output.output o Pretty.string_of o Pretty.indent (! ThyOutput.indent)) - #> space_implode "\\isasep\\isanewline%\n" - #> (if ! linenos then (enclose "\\begin{linenos}%\n" "%\n\\end{linenos}") else I) - #> (if ! gray then (enclose "\\begin{graybox}%\n" "%\n\\end{graybox}") else I) - #> enclose "\\begin{isabelle}%\n" "%\n\\end{isabelle}" - else - map (Output.output o (if ! ThyOutput.break then Pretty.string_of else Pretty.str_of)) - #> space_implode "\\isasep\\isanewline%\n" - #> enclose "\\isa{" "}"); - - -fun boolean "" = true - | boolean "true" = true - | boolean "false" = false - | boolean s = error ("Bad boolean value: " ^ quote s); - -val _ = ThyOutput.add_options - [("gray", Library.setmp gray o boolean), - ("linenos", Library.setmp linenos o boolean)] - - - - (** theory data **) structure ChunkData = TheoryDataFun @@ -158,8 +125,7 @@ end) val txt = spc ^ implode (transform_cmts toks') in - txt |> split_lines |> - (map Pretty.str) |> output + OutputTutorial.output (split_lines txt) end; val _ = ThyOutput.antiquotation "ML_chunk" (Scan.lift Args.name) output_chunk; diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/document/root.bib --- a/ProgTutorial/document/root.bib Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/document/root.bib Tue Jul 21 12:49:26 2009 +0200 @@ -135,4 +135,13 @@ series = {Lecture Notes in Computer Science}, volume = {78}, year = {1979} -} \ No newline at end of file +} + +@Article{Oppen80, + author = {D.~C.~Oppen}, + title = {Pretty Printing}, + journal = {ACM Transactions on Programming Languages and Systems}, + year = {1980}, + pages = {465--483} +} + diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/document/root.tex --- a/ProgTutorial/document/root.tex Tue Jul 21 12:27:12 2009 +0200 +++ b/ProgTutorial/document/root.tex Tue Jul 21 12:49:26 2009 +0200 @@ -1,4 +1,4 @@ -\documentclass[11pt,a4paper]{report} +\documentclass[11pt,a4paper]{book} \usepackage[latin1]{inputenc} \usepackage{amsmath,amsthm} \usepackage{isabelle} @@ -15,6 +15,8 @@ \usepackage{boxedminipage} \usepackage{mathpartir} \usepackage{flafter} +\usepackage{makeidx} +\usepackage{tocbibind} \usepackage{pdfsetup} \urlstyle{rm} @@ -24,6 +26,11 @@ \isadroptag{theory} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% indexing +\makeindex +\newcommand{\indexdef}[2]{\index{#1 (#2)}} + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % For cross references to the other manuals: \usepackage{xr} \externaldocument[I-]{implementation} @@ -107,6 +114,11 @@ \renewcommand{\isatagsmall}{\begingroup\small} \renewcommand{\endisatagsmall}{\endgroup} +% for code that should not be printed +\isakeeptag{no} +\renewcommand{\isatagno}{} +\renewcommand{\endisatagno}{} + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \renewenvironment{isabelle} {\begin{trivlist}\begin{isabellebody}\small\item\relax} @@ -116,6 +128,7 @@ % for {* *} in antiquotations \newcommand{\isasymverbopen}{\isacharverbatimopen} \newcommand{\isasymverbclose}{\isacharverbatimclose} +\newcommand{\isasymfoo}{\isa{{\isacharbackslash}{\isacharless}foo{\isachargreater}}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % since * cannot be used in text {*...*} @@ -136,22 +149,31 @@ \author{by Christian Urban with contributions from:\\[2ex] \begin{tabular}{r@{\hspace{1.8mm}}l} Stefan & Berghofer\\ + Jasmin & Blanchette\\ Sascha & Böhme\\ Jeremy & Dawson\\ Alexander & Krauss\\ \end{tabular}} \maketitle + +% table of contents +\frontmatter \setcounter{tocdepth}{1} \tableofcontents % generated text of all theories +\mainmatter \input{session} -\newpage +% bibliography +\backmatter \bibliographystyle{abbrv} \bibliography{root} +% index +\printindex + \end{document} %%% Local Variables: diff -r 949957531f63 -r 2ff4867c00cf ProgTutorial/output_tutorial.ML --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/ProgTutorial/output_tutorial.ML Tue Jul 21 12:49:26 2009 +0200 @@ -0,0 +1,105 @@ + +structure OutputTutorial = +struct + +(* rebuilding the output function from ThyOutput in order to *) +(* enable the options [gray, linenos, index] *) + +val gray = ref false +val linenos = ref false + +fun output txt = +let + val prts = map Pretty.str txt +in + prts + |> (if ! ThyOutput.quotes then map Pretty.quote else I) + |> (if ! ThyOutput.display then + map (Output.output o Pretty.string_of o Pretty.indent (! ThyOutput.indent)) + #> space_implode "\\isasep\\isanewline%\n" + #> (if ! linenos then (enclose "\\begin{linenos}%\n" "%\n\\end{linenos}") else I) + #> (if ! gray then (enclose "\\begin{graybox}%\n" "%\n\\end{graybox}") else I) + #> enclose "\\begin{isabelle}%\n" "%\n\\end{isabelle}" + else + map (Output.output o (if ! ThyOutput.break then Pretty.string_of else Pretty.str_of)) + #> space_implode "\\isasep\\isanewline%\n" + #> enclose "\\isa{" "}") +end + +datatype qstring = + NoString +| Plain of string +| Code of string +| IStruc of string + +datatype entry = + No_Entry + | Default + | Explicit of string + +val index = ref No_Entry + +fun translate f = Symbol.explode #> map f #> implode; + +val clean_string = translate + (fn "_" => "\\_" + | "#" => "\\#" + | "<" => "\\isacharless" + | ">" => "\\isachargreater" + | "{" => "\\{" + | "|" => "\\isacharbar" + | "}" => "\\}" + | "$" => "\\isachardollar" + | "!" => "\\isacharbang" + | "\" => "-" + | c => c); + +fun get_word str = +let + fun only_letters [] = true + | only_letters (x::xs) = + if (Symbol.is_ascii_blank x) then false else only_letters xs +in + if (only_letters (Symbol.explode str)) then (clean_string str) + else error ("Underspecified index entry only for single words allowed! Error with " ^ quote str) +end + +fun get_qstring NoString = "" + | get_qstring (Plain s) = get_word s + | get_qstring (Code s) = let val w = get_word s in (w ^ "@{\\tt\\slshape{}" ^ w ^ "}") end + | get_qstring (IStruc s) = "in {\\tt\\slshape{}" ^ (get_word s) ^ "}" + +fun is_empty_qstr (Plain "") = true + | is_empty_qstr (Code "") = true + | is_empty_qstr _ = false + +fun get_index_info {main = m, minor = n} = + (if n = NoString + then "\\index{" ^ (get_qstring m) ^ "}" + else "\\indexdef{" ^ (get_qstring m) ^ "}{" ^ (get_qstring n) ^ "}") + +fun index_entry entry index_info = + case entry of + No_Entry => I + | Explicit s => prefix s + | Default => prefix (get_index_info index_info) + +fun output_indexed txt index_info = + txt |> output + |> index_entry (!index) index_info + +fun boolean "" = true + | boolean "true" = true + | boolean "false" = false + | boolean s = error ("Bad boolean value: " ^ quote s); + +fun explicit "" = Default + | explicit s = Explicit s + +val _ = ThyOutput.add_options + [("gray", Library.setmp gray o boolean), + ("linenos", Library.setmp linenos o boolean), + ("index", Library.setmp index o explicit)] + + +end \ No newline at end of file diff -r 949957531f63 -r 2ff4867c00cf progtutorial.pdf