CookBook/FirstSteps.thy
changeset 2 978a3c2ed7ce
child 5 e91f54791e14
equal deleted inserted replaced
1:b729345319f0 2:978a3c2ed7ce
       
     1 theory FirstSteps
       
     2 imports Main
       
     3 uses "antiquote_setup.ML"
       
     4      ("comp_simproc")
       
     5 begin
       
     6 
       
     7 
       
     8 (*<*)
       
     9 
       
    10 ML {*
       
    11 local structure O = ThyOutput
       
    12 in
       
    13   fun check_exists f = 
       
    14     if File.exists (Path.explode ("~~/src/" ^ f)) then ()
       
    15     else error ("Source file " ^ quote f ^ " does not exist.")
       
    16   
       
    17   val _ = O.add_commands
       
    18    [("ML_file", O.args (Scan.lift Args.name) (O.output (fn _ => fn name =>
       
    19          (check_exists name; Pretty.str name))))];
       
    20 
       
    21 end
       
    22 *}
       
    23 (*>*)
       
    24 
       
    25 chapter {* First Steps *}
       
    26 
       
    27 text {* 
       
    28   Isabelle programming happens in an enhanced dialect of Standard ML,
       
    29   which adds antiquotations containing references to the logical
       
    30   context.
       
    31 
       
    32   Just like all lemmas or proofs, all ML code that you write lives in
       
    33   a theory, where it is embedded using the \isacommand{ML} command:
       
    34 *}
       
    35 
       
    36 ML {*
       
    37   3 + 4
       
    38 *}
       
    39 
       
    40 text {*
       
    41   The \isacommand{ML} command takes an arbitrary ML expression, which
       
    42   is evaluated. It can also contain value or function bindings.
       
    43 *}
       
    44 
       
    45 section {* Antiquotations *}
       
    46 
       
    47 text {*
       
    48   The main advantage of embedding all code in a theory is that the
       
    49   code can contain references to entities that are defined in the
       
    50   theory. Let us for example, print out the name of the current
       
    51   theory:
       
    52 *}
       
    53 
       
    54 ML {* Context.theory_name @{theory} *}
       
    55 
       
    56 text {* The @{text "@{theory}"} antiquotation is substituted with the
       
    57   current theory, whose name can then be extracted using a the
       
    58   function @{ML "Context.theory_name"}. Note that antiquotations are
       
    59   statically scoped. The function
       
    60 *}
       
    61 
       
    62 ML {* 
       
    63   fun current_thyname () = Context.theory_name @{theory}
       
    64 *}
       
    65 
       
    66 text {*
       
    67   does \emph{not} return the name of the current theory. Instead, we have
       
    68   defined the constant function that always returns the string @{ML "\"CookBook\""}, which is
       
    69   the name of @{text "@{theory}"} at the point where the code
       
    70   is embedded. Operationally speaking,  @{text "@{theory}"} is \emph{not}
       
    71   replaced with code that will look up the current theory in some
       
    72   (destructive) data structure and return it. Instead, it is really
       
    73   replaced with the theory value.
       
    74 
       
    75   In the course of this introduction, we will learn about more of
       
    76   these antoquotations, which greatly simplify programming, since you
       
    77   can directly access all kinds of logical elements from ML.
       
    78 *}
       
    79 
       
    80 section {* Terms *}
       
    81 
       
    82 text {*
       
    83   We can simply quote Isabelle terms from ML using the @{text "@{term \<dots>}"} 
       
    84   antiquotation:
       
    85 *}
       
    86 
       
    87 ML {* @{term "(a::nat) + b = c"} *}
       
    88 
       
    89 text {*
       
    90   This shows the term @{term "(a::nat) + b = c"} in the internal
       
    91   representation, with all gory details. Terms are just an ML
       
    92   datatype, and they are defined in @{ML_file "Pure/term.ML"}.
       
    93   
       
    94   The representation of terms uses deBruin indices: Bound variables
       
    95   are represented by the constructor @{ML Bound}, and the index refers to
       
    96   the number of lambdas we have to skip until we hit the lambda that
       
    97   binds the variable. The names of bound variables are kept at the
       
    98   abstractions, but they are just comments. 
       
    99   See \ichcite{ch:logic} for more details.
       
   100 
       
   101   \begin{readmore}
       
   102   Terms are described in detail in \ichcite{ch:logic}. Their
       
   103   definition and many useful operations can be found in @{ML_file "Pure/term.ML"}.
       
   104   \end{readmore}
       
   105 
       
   106   In a similar way we can quote types and theorems:
       
   107 *}
       
   108 
       
   109 ML {* @{typ "(int * nat) list"} *}
       
   110 ML {* @{thm allI} *}
       
   111 
       
   112 text {* 
       
   113   In the default setup, types and theorems are printed as strings. 
       
   114 *}
       
   115 
       
   116 
       
   117 text {*
       
   118   Sometimes the internal representation can be surprisingly different
       
   119   from what you see at the user level, because the layer of
       
   120   parsing/type checking/pretty printing can be quite thick. 
       
   121 
       
   122 \begin{exercise}
       
   123   Look at the internal term representation of the following terms, and
       
   124   find out why they are represented like this.
       
   125 
       
   126   \begin{itemize}
       
   127   \item @{term "case x of 0 \<Rightarrow> 0 | Suc y \<Rightarrow> y"}  
       
   128   \item @{term "\<lambda>(x,y). P y x"}  
       
   129   \item @{term "{ [x::int] | x. x \<le> -2 }"}  
       
   130   \end{itemize}
       
   131 
       
   132   Hint: The third term is already quite big, and the pretty printer
       
   133   may omit parts of it by default. If you want to see all of it, you
       
   134   can use @{ML "print_depth 50"} to set the limit to a value high enough.
       
   135 \end{exercise}
       
   136 *}
       
   137 
       
   138 section {* Type checking *}
       
   139 
       
   140 text {* 
       
   141   We can freely construct and manipulate terms, since they are just
       
   142   arbitrary unchecked trees. However, we eventually want to see if a
       
   143   term is wellformed in a certain context.
       
   144 
       
   145   Type checking is done via @{ML cterm_of}, which turns a @{ML_type
       
   146   term} into a  @{ML_type cterm}, a \emph{certified} term. Unlike
       
   147   @{ML_type term}s, which are just trees, @{ML_type
       
   148   "cterm"}s are abstract objects that are guaranteed to be
       
   149   type-correct, and can only be constructed via the official
       
   150   interfaces.
       
   151 
       
   152   Type
       
   153   checking is always relative to a theory context. For now we can use
       
   154   the @{ML "@{theory}"} antiquotation to get hold of the theory at the current
       
   155   point:
       
   156 *}
       
   157 
       
   158 ML {*
       
   159   let
       
   160     val natT = @{typ "nat"}
       
   161     val zero = @{term "0::nat"}(*Const ("HOL.zero_class.zero", natT)*)
       
   162   in
       
   163     cterm_of @{theory} 
       
   164         (Const ("HOL.plus_class.plus", natT --> natT --> natT) 
       
   165          $ zero $ zero)
       
   166   end
       
   167 *}
       
   168 
       
   169 ML {*
       
   170   @{const_name plus}
       
   171 *}
       
   172 
       
   173 ML {*
       
   174   @{term "{ [x::int] | x. x \<le> -2 }"}
       
   175 *}
       
   176 
       
   177 text {*
       
   178   The internal names of constants like @{term "zero"} or @{text "+"} are
       
   179   often more complex than one first expects. Here, the extra prefixes
       
   180   @{text zero_class} and @{text plus_class} are present because the
       
   181   constants are defined within a type class. Guessing such internal
       
   182   names can be extremely hard, which is why the system provides
       
   183   another antiquotation: @{ML "@{const_name plus}"} gives just this
       
   184   name.
       
   185 
       
   186   \begin{exercise}
       
   187   Write a function @{ML_text "rev_sum : term -> term"} that takes a
       
   188   term of the form @{text "t\<^isub>1 + t\<^isub>2 + \<dots> + t\<^isub>n"}
       
   189   and returns the reversed sum @{text "t\<^isub>n + \<dots> + t\<^isub>2 + t\<^isub>1"}.
       
   190   Note that @{text "+"} associates to the left.
       
   191   Try your function on some examples, and see if the result typechecks.
       
   192   \end{exercise}
       
   193 
       
   194   \begin{exercise}
       
   195   Write a function which takes two terms representing natural numbers
       
   196   in unary (like @{term "Suc (Suc (Suc 0))"}), and produce the unary
       
   197   number representing their sum.
       
   198   \end{exercise}
       
   199 
       
   200   \begin{exercise}
       
   201   Look at the functions defined in @{ML_file "Pure/logic.ML"} and
       
   202   @{ML_file "HOL/hologic.ML"} and see if they can make your life
       
   203   easier.
       
   204   \end{exercise}
       
   205 *}
       
   206 
       
   207 section {* Theorems *}
       
   208 
       
   209 text {*
       
   210   Just like @{ML_type cterm}s, theorems (of type @{ML_type thm}) are
       
   211   abstract objects that can only be built by going through the kernel
       
   212   interfaces, which means that all your proofs will be checked. The
       
   213   basic rules of the Isabelle/Pure logical framework are defined in
       
   214   @{ML_file "Pure/thm.ML"}. 
       
   215 
       
   216   Using these rules, which are just ML functions, you can do simple
       
   217   natural deduction proofs on the ML level. For example, the statement
       
   218   @{prop "(\<And>(x::nat). P x \<Longrightarrow> Q x) \<Longrightarrow> P t \<Longrightarrow> Q t"} can be proved like
       
   219   this\footnote{Note that @{text "|>"} is just reverse
       
   220   application. This combinator, and several variants are defined in
       
   221   @{ML_file "Pure/General/basics.ML"}}:
       
   222 *}
       
   223 
       
   224 ML {*
       
   225 let
       
   226   val thy = @{theory}
       
   227   val nat = HOLogic.natT
       
   228   val x = Free ("x", nat)
       
   229   val t = Free ("t", nat)
       
   230   val P = Free ("P", nat --> HOLogic.boolT)
       
   231   val Q = Free ("Q", nat --> HOLogic.boolT)
       
   232 
       
   233   val A1 = Logic.all x 
       
   234            (Logic.mk_implies (HOLogic.mk_Trueprop (P $ x),
       
   235                               HOLogic.mk_Trueprop (Q $ x)))
       
   236            |> cterm_of thy
       
   237 
       
   238   val A2 = HOLogic.mk_Trueprop (P $ t)
       
   239            |> cterm_of thy
       
   240 
       
   241   val Pt_implies_Qt = 
       
   242         assume A1
       
   243         |> forall_elim (cterm_of thy t)
       
   244 
       
   245   val Qt = implies_elim Pt_implies_Qt (assume A2)
       
   246 in
       
   247   Qt 
       
   248   |> implies_intr A2
       
   249   |> implies_intr A1
       
   250 end
       
   251 *}
       
   252 
       
   253 section {* Tactical reasoning *}
       
   254 
       
   255 text {*
       
   256   The goal-oriented tactical style is similar to the @{text apply}
       
   257   style at the user level. Reasoning is centered around a \emph{goal},
       
   258   which is modified in a sequence of proof steps until it is solved.
       
   259 
       
   260   A goal (or goal state) is a special @{ML_type thm}, which by
       
   261   convention is an implication:
       
   262   @{text[display] "A\<^isub>1 \<Longrightarrow> \<dots> \<Longrightarrow> A\<^isub>n \<Longrightarrow> #(C)"}
       
   263   Since the final result @{term C} could again be an implication, there is the
       
   264   @{text "#"} around the final result, which protects its premises from being
       
   265   misinterpreted as open subgoals. The protection @{text "# :: prop \<Rightarrow>
       
   266   prop"} is just the identity and used as a syntactic marker.
       
   267 
       
   268   Now tactics are just functions that map a goal state to a (lazy)
       
   269   sequence of successor states, hence the type of a tactic is
       
   270   @{ML_type[display] "thm -> thm Seq.seq"}
       
   271   See @{ML_file "Pure/General/seq.ML"} for the implementation of lazy
       
   272   sequences.
       
   273 
       
   274   Of course, tactics are expected to behave nicely and leave the final
       
   275   conclusion @{term C} intact. In order to start a tactical proof for
       
   276   @{term A}, we
       
   277   just set up the trivial goal @{text "A \<Longrightarrow> #(A)"} and run the tactic
       
   278   on it. When the subgoal is solved, we have just @{text "#(A)"} and
       
   279   can remove the protection.
       
   280 
       
   281   The operations in @{ML_file "Pure/goal.ML"} do just that and we can use
       
   282   them.
       
   283 
       
   284   Let us transcribe a simple apply style proof from the
       
   285   tutorial\cite{isa-tutorial} into ML:
       
   286 *}
       
   287 
       
   288 lemma disj_swap: "P \<or> Q \<Longrightarrow> Q \<or> P"
       
   289 apply (erule disjE)
       
   290  apply (rule disjI2)
       
   291  apply assumption
       
   292 apply (rule disjI1)
       
   293 apply assumption
       
   294 done
       
   295 
       
   296 ML {*
       
   297 let
       
   298   val ctxt = @{context}
       
   299   val goal = @{prop "P \<or> Q \<Longrightarrow> Q \<or> P"}
       
   300 in
       
   301   Goal.prove ctxt ["P", "Q"] [] goal (fn _ => 
       
   302     eresolve_tac [disjE] 1
       
   303     THEN resolve_tac [disjI2] 1
       
   304     THEN assume_tac 1
       
   305     THEN resolve_tac [disjI1] 1
       
   306     THEN assume_tac 1)
       
   307 end
       
   308 *}
       
   309 
       
   310 text {*
       
   311   Tactics that affect only a certain subgoal, take a subgoal number as
       
   312   an integer parameter. Here we always work on the first subgoal,
       
   313   following exactly the @{text "apply"} script.
       
   314 *}
       
   315 
       
   316 
       
   317 section {* Case Study: Relation Composition *}
       
   318 
       
   319 text {*
       
   320   \emph{Note: This is completely unfinished. I hoped to have a section
       
   321   with a nontrivial example, but I ran into several problems.}
       
   322 
       
   323 
       
   324   Recall that HOL has special syntax for set comprehensions:
       
   325   @{term "{ f x y |x y. P x y}"} abbreviates 
       
   326   @{term[source] "{u. \<exists>x y. u = f x y \<and> P x y}"}. 
       
   327 
       
   328   We will automatically prove statements of the following form:
       
   329   
       
   330   @{lemma[display] "{(l\<^isub>1 x, r\<^isub>1 x) |x. P\<^isub>1 x} O {(l\<^isub>2 x, r\<^isub>2 x) |x. P\<^isub>2 x}
       
   331   = {(l\<^isub>2 x, r\<^isub>1 y) |x y. r\<^isub>2 x = l\<^isub>1 y \<and>
       
   332   P\<^isub>2 x \<and> P\<^isub>1 y}" by auto}
       
   333 
       
   334   In Isabelle, relation composition is defined to be consistent with
       
   335   function composition, that is, the relation applied ``first'' is
       
   336   written on the right hand side. This different from what many
       
   337   textbooks do.
       
   338 
       
   339   The above statement about composition is not proved automatically by
       
   340   @{method simp}, and it cannot be solved by a fixed set of rewrite
       
   341   rules, since the number of (implicit) quantifiers may vary. Here, we
       
   342   only have one bound variable in each comprehension, but in general
       
   343   there can be more. On the other hand, @{method auto} proves the
       
   344   above statement quickly, by breaking the equality into two parts and
       
   345   proving them separately. However, if e.g.\ @{term "P\<^isub>1"} is a
       
   346   complicated expression, the automated tools may get confused.
       
   347 
       
   348   Our goal is now to develop a small procedure that can compute (with proof) the
       
   349   composition of two relation comprehensions, which can be used to
       
   350   extend the simplifier.
       
   351 *}
       
   352 
       
   353 section {*A tactic *}
       
   354 
       
   355 text {* Let's start with a step-by-step proof of the above statement *}
       
   356 
       
   357 lemma "{(l\<^isub>1 x, r\<^isub>1 x) |x. P\<^isub>1 x} O {(l\<^isub>2
       
   358   x, r\<^isub>2 x) |x. P\<^isub>2 x}
       
   359   = {(l\<^isub>2 x, r\<^isub>1 y) |x y. r\<^isub>2 x = l\<^isub>1 y \<and> P\<^isub>2 x \<and> P\<^isub>1 y}"
       
   360 apply (rule set_ext)
       
   361 apply (rule iffI)
       
   362  apply (erule rel_compE)  -- {* @{text "\<subseteq>"} *}
       
   363  apply (erule CollectE)     -- {* eliminate @{text "Collect"}, @{text "\<exists>"}, @{text "\<and>"}, and pairs *}
       
   364  apply (erule CollectE)
       
   365  apply (erule exE)
       
   366  apply (erule exE)
       
   367  apply (erule conjE)
       
   368  apply (erule conjE)
       
   369  apply (erule Pair_inject)
       
   370  apply (erule Pair_inject)
       
   371  apply (simp only:)
       
   372 
       
   373  apply (rule CollectI)    -- {* introduce them again *}
       
   374  apply (rule exI)
       
   375  apply (rule exI)
       
   376  apply (rule conjI)
       
   377   apply (rule refl)
       
   378  apply (rule conjI)
       
   379   apply (rule sym)
       
   380   apply (assumption)
       
   381  apply (rule conjI)
       
   382   apply assumption
       
   383  apply assumption
       
   384 
       
   385 apply (erule CollectE)   -- {* @{text "\<subseteq>"} *}
       
   386 apply (erule exE)+
       
   387 apply (erule conjE)+
       
   388 apply (simp only:)
       
   389 apply (rule rel_compI)
       
   390  apply (rule CollectI)
       
   391  apply (rule exI)
       
   392  apply (rule conjI)
       
   393   apply (rule refl)
       
   394  apply assumption
       
   395 
       
   396 apply (rule CollectI)
       
   397 apply (rule exI)
       
   398 apply (rule conjI)
       
   399 apply (subst Pair_eq)
       
   400 apply (rule conjI)
       
   401  apply assumption
       
   402 apply (rule refl)
       
   403 apply assumption
       
   404 done
       
   405 
       
   406 text {*
       
   407   The reader will probably need to step through the proof and verify
       
   408   that there is nothing spectacular going on here.
       
   409   The @{text apply} script just applies the usual elimination and introduction rules in the right order.
       
   410 
       
   411   This script is of course totally unreadable. But we are not trying
       
   412   to produce pretty Isar proofs here. We just want to find out which
       
   413   rules are needed and how they must be applied to complete the
       
   414   proof. And a detailed apply-style proof can often be turned into a
       
   415   tactic quite easily. Of course we must resist the temptation to use
       
   416   @{method auto}, @{method blast} and friends, since their behaviour
       
   417   is not predictable enough. But the simple @{method rule} and
       
   418   @{method erule} methods are fine.
       
   419 
       
   420   Notice that this proof depends only in one detail on the
       
   421   concrete equation that we want to prove: The number of bound
       
   422   variables in the comprehension corresponds to the number of
       
   423   existential quantifiers that we have to eliminate and introduce
       
   424   again. In fact this is the only reason why the equations that we
       
   425   want to prove are not just instances of a single rule.
       
   426 
       
   427   Here is the ML equivalent of the tactic script above:
       
   428 *}
       
   429 
       
   430 ML {*
       
   431 val compr_compose_tac =
       
   432   rtac @{thm set_ext}
       
   433   THEN' rtac @{thm iffI}
       
   434   THEN' etac @{thm rel_compE}
       
   435   THEN' etac @{thm CollectE}
       
   436   THEN' etac @{thm CollectE}
       
   437   THEN' (fn i => REPEAT (etac @{thm exE} i))
       
   438   THEN' etac @{thm conjE}
       
   439   THEN' etac @{thm conjE}
       
   440   THEN' etac @{thm Pair_inject}
       
   441   THEN' etac @{thm Pair_inject}
       
   442   THEN' asm_full_simp_tac HOL_basic_ss
       
   443   THEN' rtac @{thm CollectI}
       
   444   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   445   THEN' rtac @{thm conjI}
       
   446   THEN' rtac @{thm refl}
       
   447   THEN' rtac @{thm conjI}
       
   448   THEN' rtac @{thm sym}
       
   449   THEN' assume_tac
       
   450   THEN' rtac @{thm conjI}
       
   451   THEN' assume_tac
       
   452   THEN' assume_tac
       
   453 
       
   454   THEN' etac @{thm CollectE}
       
   455   THEN' (fn i => REPEAT (etac @{thm exE} i))
       
   456   THEN' etac @{thm conjE}
       
   457   THEN' etac @{thm conjE}
       
   458   THEN' etac @{thm conjE}
       
   459   THEN' asm_full_simp_tac HOL_basic_ss
       
   460   THEN' rtac @{thm rel_compI}
       
   461   THEN' rtac @{thm CollectI}
       
   462   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   463   THEN' rtac @{thm conjI}
       
   464   THEN' rtac @{thm refl}
       
   465   THEN' assume_tac
       
   466   THEN' rtac @{thm CollectI}
       
   467   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   468   THEN' rtac @{thm conjI}
       
   469   THEN' simp_tac (HOL_basic_ss addsimps [@{thm Pair_eq}])
       
   470   THEN' rtac @{thm conjI}
       
   471   THEN' assume_tac
       
   472   THEN' rtac @{thm refl}
       
   473   THEN' assume_tac
       
   474 *}
       
   475 
       
   476 lemma test1: "{(l\<^isub>1 x, r\<^isub>1 x) |x. P\<^isub>1 x} O {(l\<^isub>2
       
   477   x, r\<^isub>2 x) |x. P\<^isub>2 x}
       
   478   = {(l\<^isub>2 x, r\<^isub>1 y) |x y. r\<^isub>2 x = l\<^isub>1 y \<and> P\<^isub>2 x \<and> P\<^isub>1 y}"
       
   479 by (tactic "compr_compose_tac 1")
       
   480 
       
   481 lemma test3: "{(l\<^isub>1 x, r\<^isub>1 x) |x. P\<^isub>1 x} O {(l\<^isub>2 x z, r\<^isub>2 x z) |x z. P\<^isub>2 x z}
       
   482   = {(l\<^isub>2 x z, r\<^isub>1 y) |x y z. r\<^isub>2 x z = l\<^isub>1 y \<and> P\<^isub>2 x z \<and> P\<^isub>1 y}"
       
   483 by (tactic "compr_compose_tac 1")
       
   484 
       
   485 text {*
       
   486   So we have a tactic that works on at least two examples.
       
   487   Getting it really right requires some more effort. Consider the goal
       
   488 *}
       
   489 lemma "{(n, Suc n) |n. n > 0} O {(n, Suc n) |n. P n}
       
   490   = {(n, Suc m)|n m. Suc n = m \<and> P n \<and> m > 0}"
       
   491 
       
   492 (*lemma "{(l\<^isub>1 x, r\<^isub>1 x) |x. P\<^isub>1 x} O {(l\<^isub>2
       
   493   x, r\<^isub>2 x) |x. P\<^isub>2 x}
       
   494   = {(l\<^isub>2 x, r\<^isub>1 y) |x y. r\<^isub>2 x = l\<^isub>1 y \<and> P\<^isub>2 x \<and> P\<^isub>1 y}"*)
       
   495 txt {*
       
   496   This is exactly an instance of @{fact test1}, but our tactic fails
       
   497   on it with the usual uninformative
       
   498   \emph{empty result requence}.
       
   499 
       
   500   We are now in the frequent situation that we need to debug. One simple instrument for this is @{ML "print_tac"},
       
   501   which is the same as @{ML all_tac} (the identity for @{ML_text "THEN"}),
       
   502   i.e.\ it does nothing, but it prints the current goal state as a
       
   503   side effect.
       
   504   Another debugging option is of course to step through the interactive apply script.
       
   505 
       
   506   Finding the problem could be taken as an exercise for the patient
       
   507   reader, and we will go ahead with the solution.
       
   508 
       
   509   The problem is that in this instance the simplifier does more than it did in the general version
       
   510   of lemma @{fact test1}. Since @{text "l\<^isub>1"} and @{text "l\<^isub>2"} are just the identity function,
       
   511   the equation corresponding to @{text "l\<^isub>1 y = r\<^isub>2 x "}
       
   512   becomes @{text "m = Suc n"}. Then the simplifier eagerly replaces
       
   513   all occurences of @{term "m"} by @{term "Suc n"} which destroys the
       
   514   structure of the proof.
       
   515 
       
   516   This is perhaps the most important lesson to learn, when writing tactics:
       
   517   \textbf{Avoid automation at all cost!!!}.
       
   518 
       
   519   Let us look at the proof state at the point where the simplifier is
       
   520   invoked:
       
   521   
       
   522 *}
       
   523 (*<*)
       
   524 apply (rule set_ext)
       
   525 apply (rule iffI)
       
   526  apply (erule rel_compE)
       
   527  apply (erule CollectE)
       
   528  apply (erule CollectE)
       
   529  apply (erule exE)
       
   530  apply (erule exE)
       
   531  apply (erule conjE)
       
   532  apply (erule conjE)
       
   533  apply (erule Pair_inject)
       
   534  apply (erule Pair_inject)(*>*)
       
   535 txt {*
       
   536   
       
   537   @{subgoals[display]}
       
   538   
       
   539   Like in the apply proof, we now want to eliminate the equations that
       
   540   ``define'' @{term x}, @{term xa} and @{term z}. The other equations
       
   541   are just there by coincidence, and we must not touch them.
       
   542   
       
   543   For such purposes, there is the internal tactic @{text "hyp_subst_single"}.
       
   544   Its job is to take exactly one premise of the form @{term "v = t"},
       
   545   where @{term v} is a variable, and replace @{term "v"} in the whole
       
   546   subgoal. The hypothesis to eliminate is given by its position.
       
   547 
       
   548   We can use this tactic to eliminate @{term x}:
       
   549 *}
       
   550 apply (tactic "single_hyp_subst_tac 0 1")
       
   551 txt {*
       
   552   @{subgoals[display]}
       
   553 *}
       
   554 apply (tactic "single_hyp_subst_tac 2 1")
       
   555 apply (tactic "single_hyp_subst_tac 2 1")
       
   556 apply (tactic "single_hyp_subst_tac 3 1")
       
   557  apply (rule CollectI)    -- {* introduce them again *}
       
   558  apply (rule exI)
       
   559  apply (rule exI)
       
   560  apply (rule conjI)
       
   561   apply (rule refl)
       
   562  apply (rule conjI)
       
   563   apply (assumption)
       
   564  apply (rule conjI)
       
   565   apply assumption
       
   566  apply assumption
       
   567 
       
   568 apply (erule CollectE)   -- {* @{text "\<subseteq>"} *}
       
   569 apply (erule exE)+
       
   570 apply (erule conjE)+
       
   571 apply (tactic "single_hyp_subst_tac 0 1")
       
   572 apply (rule rel_compI)
       
   573  apply (rule CollectI)
       
   574  apply (rule exI)
       
   575  apply (rule conjI)
       
   576   apply (rule refl)
       
   577  apply assumption
       
   578 
       
   579 apply (rule CollectI)
       
   580 apply (rule exI)
       
   581 apply (rule conjI)
       
   582 apply (subst Pair_eq)
       
   583 apply (rule conjI)
       
   584  apply assumption
       
   585 apply (rule refl)
       
   586 apply assumption
       
   587 done
       
   588 
       
   589 ML {*
       
   590 val compr_compose_tac =
       
   591   rtac @{thm set_ext}
       
   592   THEN' rtac @{thm iffI}
       
   593   THEN' etac @{thm rel_compE}
       
   594   THEN' etac @{thm CollectE}
       
   595   THEN' etac @{thm CollectE}
       
   596   THEN' (fn i => REPEAT (etac @{thm exE} i))
       
   597   THEN' etac @{thm conjE}
       
   598   THEN' etac @{thm conjE}
       
   599   THEN' etac @{thm Pair_inject}
       
   600   THEN' etac @{thm Pair_inject}
       
   601   THEN' single_hyp_subst_tac 0
       
   602   THEN' single_hyp_subst_tac 2
       
   603   THEN' single_hyp_subst_tac 2
       
   604   THEN' single_hyp_subst_tac 3
       
   605   THEN' rtac @{thm CollectI}
       
   606   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   607   THEN' rtac @{thm conjI}
       
   608   THEN' rtac @{thm refl}
       
   609   THEN' rtac @{thm conjI}
       
   610   THEN' assume_tac
       
   611   THEN' rtac @{thm conjI}
       
   612   THEN' assume_tac
       
   613   THEN' assume_tac
       
   614 
       
   615   THEN' etac @{thm CollectE}
       
   616   THEN' (fn i => REPEAT (etac @{thm exE} i))
       
   617   THEN' etac @{thm conjE}
       
   618   THEN' etac @{thm conjE}
       
   619   THEN' etac @{thm conjE}
       
   620   THEN' single_hyp_subst_tac 0
       
   621   THEN' rtac @{thm rel_compI}
       
   622   THEN' rtac @{thm CollectI}
       
   623   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   624   THEN' rtac @{thm conjI}
       
   625   THEN' rtac @{thm refl}
       
   626   THEN' assume_tac
       
   627   THEN' rtac @{thm CollectI}
       
   628   THEN' (fn i => REPEAT (rtac @{thm exI} i))
       
   629   THEN' rtac @{thm conjI}
       
   630   THEN' stac @{thm Pair_eq}
       
   631   THEN' rtac @{thm conjI}
       
   632   THEN' assume_tac
       
   633   THEN' rtac @{thm refl}
       
   634   THEN' assume_tac
       
   635 *}
       
   636 
       
   637 lemma "{(n, Suc n) |n. n > 0 \<and> A} O {(n, Suc n) |n m. P m n}
       
   638   = {(n, Suc m)|n m' m. Suc n = m \<and> P m' n \<and> (m > 0 \<and> A)}"
       
   639 apply (tactic "compr_compose_tac 1")
       
   640 done
       
   641 
       
   642 text {*
       
   643   The next step is now to turn this tactic into a simplification
       
   644   procedure. This just means that we need some code that builds the
       
   645   term of the composed relation.
       
   646 *}
       
   647 
       
   648 use "comp_simproc"
       
   649 
       
   650 (*<*)
       
   651 (*simproc_setup mysp ("x O y") = {* compose_simproc *}*)
       
   652 
       
   653 lemma "{(n, Suc n) |n. n > 0 \<and> A} O {(n, Suc n) |n m. P m n} = x"
       
   654 (*apply (simp del:ex_simps)*)
       
   655 oops
       
   656 
       
   657 
       
   658 lemma "({(g m, k) | m k. Q m k} 
       
   659 O {(h j, f j) | j. R j}) = x"
       
   660 (*apply (simp del:ex_simps) *)
       
   661 oops
       
   662 
       
   663 lemma "{uu. \<exists>j m k. uu = (h j, k) \<and> f j = g m \<and> R j \<and> Q m k}
       
   664 O {(h j, f j) | j. R j} = x"
       
   665 (*apply (simp del:ex_simps)*)
       
   666 oops
       
   667 
       
   668 lemma "
       
   669   { (l x, r x) | x. P x \<and> Q x \<and> Q' x }
       
   670 O { (l1 x, r1 x) | x. P1 x \<and> Q1 x \<and> Q1' x }
       
   671 = A"
       
   672 (*apply (simp del:ex_simps)*)
       
   673 oops
       
   674 
       
   675 lemma "
       
   676   { (l x, r x) | x. P x }
       
   677 O { (l1 x, r1 x) | x. P1 x }
       
   678 O { (l2 x, r2 x) | x. P2 x }
       
   679 = A"
       
   680 (*
       
   681 apply (simp del:ex_simps)*)
       
   682 oops
       
   683 
       
   684 lemma "{(f n, m) |n m. P n m} O ({(g m, k) | m k. Q m k} 
       
   685 O {(h j, f j) | j. R j}) = x"
       
   686 (*apply (simp del:ex_simps)*)
       
   687 oops
       
   688 
       
   689 lemma "{u. \<exists>n. u=(f n, g n)} O {u. \<exists>n. u=(h n, j n)} = A"
       
   690 oops
       
   691 
       
   692 
       
   693 (*>*)
       
   694 end