# HG changeset patch # User Christian Urban # Date 1295477892 -3600 # Node ID 52e1e98edb34a40346e5fbd1a3fff39d9fa54a17 # Parent 1df873b63cb2bec64e99340a972d1cc2b266679b added a very rough version of the tutorial; all seems to work diff -r 1df873b63cb2 -r 52e1e98edb34 Tutorial/Lambda.thy --- a/Tutorial/Lambda.thy Wed Jan 19 19:41:50 2011 +0100 +++ b/Tutorial/Lambda.thy Wed Jan 19 23:58:12 2011 +0100 @@ -4,6 +4,7 @@ section {* Definitions for Lambda Terms *} + text {* type of variables *} atom_decl name @@ -50,7 +51,7 @@ by (relation "measure size") (simp_all add: lam.size) -subsection {* Capture-avoiding Substitution *} +subsection {* Capture-Avoiding Substitution *} nominal_primrec subst :: "lam \ name \ lam \ lam" ("_ [_ ::= _]" [90,90,90] 90) @@ -97,7 +98,7 @@ done termination - by (relation "measure (\(t,_,_). size t)") + by (relation "measure (\(t, _, _). size t)") (simp_all add: lam.size) lemma subst_eqvt[eqvt]: diff -r 1df873b63cb2 -r 52e1e98edb34 Tutorial/Minimal.thy --- a/Tutorial/Minimal.thy Wed Jan 19 19:41:50 2011 +0100 +++ b/Tutorial/Minimal.thy Wed Jan 19 23:58:12 2011 +0100 @@ -9,6 +9,8 @@ | App "lam" "lam" | Lam x::"name" l::"lam" bind x in l ("Lam [_]. _" [100, 100] 100) + + lemma alpha_test: shows "Lam [x]. (Var x) = Lam [y]. (Var y)" by (simp add: lam.eq_iff Abs1_eq_iff lam.fresh fresh_at_base) diff -r 1df873b63cb2 -r 52e1e98edb34 Tutorial/Tutorial1.thy --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/Tutorial/Tutorial1.thy Wed Jan 19 23:58:12 2011 +0100 @@ -0,0 +1,1553 @@ + +header {* + + Nominal Isabelle Tutorial + ========================= + + There will be hands-on exercises throughout the tutorial. Therefore + it would be good if you have your own laptop. + + Nominal Isabelle is a definitional extension of Isabelle/HOL, which + means it does not add any new axioms to higher-order logic. It merely + adds new definitions and an infrastructure for 'nominal resoning'. + + + The Interface + ------------- + + The Isabelle theorem prover comes with an interface for jEdit interface. + Unlike many other theorem prover interfaces (e.g. ProofGeneral) where you + try to advance a 'checked' region in a proof script, this interface immediately + checks the whole buffer. The text you type is also immediately checked + as you type. Malformed text or unfinished proofs are highlighted in red + with a little red 'stop' signal on the left-hand side. If you drag the + 'red-box' cursor over a line, the Output window gives further feedback. + + Note: If a section is not parsed correctly, the work-around is to cut it + out and paste it back into the text (cut-out: Ctrl + X; paste in: Ctrl + V; + Cmd is Ctrl on the Mac) + + Nominal Isabelle and the interface can be started on the command line with + + isabelle jedit -l HOL-Nominal2 + isabelle jedit -l HOL-Nominal2 A.thy B.thy ... + + + Symbols + ------- + + Symbols can considerably improve the readability of your statements and proofs. + They can be input by just typing 'name-of-symbol' where 'name-of-symbol' is the + usual latex name of that symbol. A little window will then appear in which + you can select the symbol. With `Escape' you can ignore any suggestion. + + There are some handy short-cuts for frequently used symbols. + For example + + short-cut symbol + + => \ + ==> \ + --> \ + ! \ + ? \ + /\ \ + \/ \ + ~ \ + ~= \ + : \ + ~: \ + + For nominal two important symbols are + + \ sharp (freshness) + \ bullet (permutations) +*} + +theory Tutorial1 +imports Lambda +begin + +section {* Theories *} + +text {* + All formal developments in Isabelle are part of a theory. A theory + needs to have a name and must import some pre-existing theory (as indicated + above). The imported theory will normally be the theory Nominal2 (which + contains many useful concepts like set-theory, lists, nominal theory etc). + + For the purpose of the tutorial we import the theory Lambda.thy which + contains already some useful definitions for (alpha-equated) lambda terms. +*} + + + +section {* Types *} + +text {* + Isabelle is based on simple types including some polymorphism. It also includes + some overloading, which means that sometimes explicit type annotations need to + be given. + + - Base types include: nat, bool, string + + - Type formers include: 'a list, ('a \ 'b), 'c set + + - Type variables are written like in ML with an apostrophe: 'a, 'b, \ + + Types known to Isabelle can be queried using the command "typ". +*} + +typ nat +typ bool +typ string +typ lam -- {* alpha-equated lambda terms defined in Lambda.thy *} +typ name -- {* type of (object) variables defined in Lambda.thy *} +typ "('a \ 'b)" -- {* pair type *} +typ "'c set" -- {* set type *} +typ "'a list" -- {* list type *} +typ "nat \ bool" -- {* type of functions from natural numbers to booleans *} + + +text {* Some malformed types: *} + +(* +typ boolean -- {* undeclared type *} +typ set -- {* type argument missing *} +*) + + +section {* Terms *} + +text {* + Every term in Isabelle needs to be well-typed. However they can have + polymorphic type. Whether a term is accepted can be queried using + the command "term". +*} + +term c -- {* a variable of polymorphic type *} +term "1::nat" -- {* the constant 1 in natural numbers *} +term 1 -- {* the constant 1 with polymorphic type *} +term "{1, 2, 3::nat}" -- {* the set containing natural numbers 1, 2 and 3 *} +term "[1, 2, 3]" -- {* the list containing the polymorphic numbers 1, 2 and 3 *} +term "(True, ''c'')" -- {* a pair consisting of the boolean true and the string "c" *} +term "Suc 0" -- {* successor of 0, in other words 1::nat *} +term "Lam [x].Var x" -- {* a lambda-term *} +term "App t1 t2" -- {* another lambda-term *} +term "x::name" -- {* an (object) variable of type name *} +term "atom (x::name)" -- {* atom is an overloded function *} + +text {* + Lam [x].Var is the syntax we made up for lambda abstractions. You can have + your own syntax. We also came up with "name". If you prefer, you can call + it identifiers or have more than one type for (object languag) variables. + + Isabelle provides some useful colour feedback about constants (black), + free variables (blue) and bound variables (green). +*} + +term "True" -- {* a constant that is defined in HOL...written in black *} +term "true" -- {* not recognised as a constant, therefore it is interpreted + as a free variable, written in blue *} +term "\x. P x" -- {* x is bound (green), P is free (blue) *} + + +text {* Formulae in Isabelle/HOL are terms of type bool *} + +term "True" +term "True \ False" +term "True \ B" +term "{1,2,3} = {3,2,1}" +term "\x. P x" +term "A \ B" +term "atom a \ t" + +text {* + When working with Isabelle, one deals with an object logic (that is HOL) and + Isabelle's rule framework (called Pure). Occasionally one has to pay attention + to this fact. But for the moment we ignore this completely. +*} + +term "A \ B" -- {* versus *} +term "A \ B" + +term "\x. P x" -- {* versus *} +term "\x. P x" + + + +section {* Inductive Definitions: Transitive Closures of Beta-Reduction *} + +text {* + The theory Lmabda alraedy contains a definition for beta-reduction, written + + t \b t' + + In this section we want to define inductively the transitive closure of + beta-reduction. + + Inductive definitions in Isabelle start with the keyword "inductive" and a predicate + name. One can optionally provide a type for the predicate. Clauses of the inductive + predicate are introduced by "where" and more than two clauses need to be + separated by "|". One can also give a name to each clause and indicate that it + should be added to the hints database ("[intro]"). A typical clause has some + premises and a conclusion. This is written in Isabelle as: + + "premise \ conclusion" + "premise1 \ premise2 \ \ premisen \ conclusion" + + There is an alternative way of writing the latter clause as + + "\premise1; premise2; \ premisen\ \ conclusion" + + If no premise is present, then one just writes + + "conclusion" + + Below we give two definitions for the transitive closure +*} + +inductive + beta_star1 :: "lam \ lam \ bool" ("_ \b* _" [60, 60] 60) +where + bs1_refl: "t \b* t" +| bs1_trans: "\t1 \b t2; t2 \b* t3\ \ t1 \b* t3" + + +inductive + beta_star2 :: "lam \ lam \ bool" ("_ \b** _" [60, 60] 60) +where + bs2_refl: "t \b** t" +| bs2_step: "t1 \b t2 \ t1 \b** t2" +| bs2_trans: "\t1 \b** t2; t2 \b** t3\ \ t1 \b** t3" + +section {* Theorems *} + +text {* + A central concept in Isabelle is that of theorems. Isabelle's theorem + database can be queried using +*} + +thm bs1_refl +thm bs2_trans +thm conjI +thm conjunct1 + +text {* + Notice that theorems usually contain schematic variables (e.g. ?P, ?Q, \). + These schematic variables can be substituted with any term (of the right type + of course). + + When defining the predicates beta_star, Isabelle provides us automatically + with the following theorems that state how they can be introduced and what + constitutes an induction over them. +*} + +thm beta_star1.intros +thm beta_star2.induct + + +section {* Lemmas / Theorems / Corollaries *} + +text {* + Whether to use lemma, theorem or corollary makes no semantic difference + in Isabelle. + + A lemma starts with "lemma" and consists of a statement ("shows \") and + optionally a lemma name, some type-information for variables ("fixes \") + and some assumptions ("assumes \"). + + Lemmas also need to have a proof, but ignore this 'detail' for the moment. + + Examples are +*} + +lemma alpha_equ: + shows "Lam [x].Var x = Lam [y].Var y" + by (simp add: lam.eq_iff Abs1_eq_iff lam.fresh fresh_at_base) + +lemma Lam_freshness: + assumes a: "x \ y" + and b: "atom y \ Lam [x].t" + shows "atom y \ t" + using a b by (simp add: lam.fresh Abs_fresh_iff) + +lemma neutral_element: + fixes x::"nat" + shows "x + 0 = x" + by simp + +text {* + Note that in the last statement, the explicit type annotation is important + in order for Isabelle to be able to figure out what 0 stands for (e.g. a + natural number, a vector, etc) and which lemmas to apply. +*} + + +section {* Isar Proofs *} + +text {* + Isar is a language for writing formal proofs that can be understood by humans + and by Isabelle. An Isar proof can be thought of as a sequence of 'stepping stones' + that start with some assumptions and lead to the goal to be established. Every such + stepping stone is introduced by "have" followed by the statement of the stepping + stone. An exception is the goal to be proved, which need to be introduced with "show". + + have "statement" \ + show "goal_to_be_proved" \ + + Since proofs usually do not proceed in a linear fashion, labels can be given + to every stepping stone and they can be used later to explicitly refer to this + corresponding stepping stone ("using"). + + have label: "statement1" \ + \ + have "later_statement" using label \ + + Each stepping stone (or have-statement) needs to have a justification. The + simplest justification is "sorry" which admits any stepping stone, even false + ones (this is good during the development of proofs). + + have "outrageous_false_statement" sorry + + Assumptions can be 'justified' using "by fact". + + have "assumption" by fact + + Derived facts can be justified using + + have "statement" by simp -- simplification + have "statement" by auto -- proof search and simplification + have "statement" by blast -- only proof search + + If facts or lemmas are needed in order to justify a have-statement, then + one can feed these facts into the proof by using "using label \" or + "using theorem-name \". More than one label at a time is allowed. For + example + + have "statement" using label1 label2 theorem_name by auto + + Induction proofs in Isar are set up by indicating over which predicate(s) + the induction proceeds ("using a b") followed by the command "proof (induct)". + In this way, Isabelle uses defaults for which induction should be performed. + These defaults can be overridden by giving more information, like the variable + over which a structural induction should proceed, or a specific induction principle, + such as well-founded inductions. + + After the induction is set up, the proof proceeds by cases. In Isar these + cases can be given in any order. Most commonly they are started with "case" and the + name of the case, and optionally some legible names for the variables used inside + the case. + + In each "case", we need to establish a statement introduced by "show". Once + this has been done, the next case can be started using "next". When all cases + are completed, the proof can be finished using "qed". + + This means a typical induction proof has the following pattern + + proof (induct) + case \ + \ + show \ + next + case \ + \ + show \ + \ + qed +*} + + +subsection {* Exercise I *} + +text {* + Remove the sorries in the proof below and fill in the proper + justifications. +*} + + +lemma + assumes a: "t1 \b* t2" + shows "t1 \b** t2" + using a +proof (induct) + case (bs1_refl t) + show "t \b** t" using bs2_refl by blast +next + case (bs1_trans t1 t2 t3) + have beta: "t1 \b t2" by fact + have ih: "t2 \b** t3" by fact + have a: "t1 \b** t2" using beta bs2_step by blast + show "t1 \b** t3" using a ih bs2_trans by blast +qed + + +subsection {* Exercise II *} + +text {* + Again remove the sorries in the proof below and fill in the proper + justifications. +*} + +lemma bs1_trans2: + assumes a: "t1 \b* t2" + and b: "t2 \b* t3" + shows "t1 \b* t3" +using a b +proof (induct) + case (bs1_refl t1) + have a: "t1 \b* t3" by fact + show "t1 \b* t3" using a by blast +next + case (bs1_trans t1 t2 t3') + have ih1: "t1 \b t2" by fact + have ih2: "t3' \b* t3 \ t2 \b* t3" by fact + have asm: "t3' \b* t3" by fact + have a: "t2 \b* t3" using ih2 asm by blast + show "t1 \b* t3" using ih1 a beta_star1.bs1_trans by blast +qed + +lemma + assumes a: "t1 \b** t2" + shows "t1 \b* t2" +using a +proof (induct) + case (bs2_refl t) + show "t \b* t" using bs1_refl by blast +next + case (bs2_step t1 t2) + have ih: "t1 \b t2" by fact + have a: "t2 \b* t2" using bs1_refl by blast + show "t1 \b* t2" using ih a bs1_trans by blast +next + case (bs2_trans t1 t2 t3) + have ih1: "t1 \b* t2" by fact + have ih2: "t2 \b* t3" by fact + show "t1 \b* t3" using ih1 ih2 bs1_trans2 by blast +qed + +text {* + Just like gotos in the Basic programming language, labels often reduce + the readability of proofs. Therefore one can use in Isar the notation + "then have" in order to feed a have-statement to the proof of + the next have-statement. This is used in teh second case below. +*} + +lemma + assumes a: "t1 \b* t2" + and b: "t2 \b* t3" + shows "t1 \b* t3" +using a b +proof (induct) + case (bs1_refl t1) + show "t1 \b* t3" by fact +next + case (bs1_trans t1 t2 t3') + have ih1: "t1 \b t2" by fact + have ih2: "t3' \b* t3 \ t2 \b* t3" by fact + have "t3' \b* t3" by fact + then have "t2 \b* t3" using ih2 by blast + then show "t1 \b* t3" using ih1 beta_star1.bs1_trans by blast +qed + +text {* + The label ih2 cannot be got rid of in this way, because it is used + two lines below and we cannot rearange them. We can still avoid the + label by feeding a sequence of facts into a proof using the + "moreover"-chaining mechanism: + + have "statement_1" \ + moreover + have "statement_2" \ + \ + moreover + have "statement_n" \ + ultimately have "statement" \ + + In this chain, all "statement_i" can be used in the proof of the final + "statement". With this we can simplify our proof further to: +*} + +lemma + assumes a: "t1 \b* t2" + and b: "t2 \b* t3" + shows "t1 \b* t3" +using a b +proof (induct) + case (bs1_refl t1) + show "t1 \b* t3" by fact +next + case (bs1_trans t1 t2 t3') + have "t3' \b* t3 \ t2 \b* t3" by fact + moreover + have "t3' \b* t3" by fact + ultimately + have "t2 \b* t3" by blast + moreover + have "t1 \b t2" by fact + ultimately show "t1 \b* t3" using beta_star1.bs1_trans by blast +qed + + +text {* + While automatic proof procedures in Isabelle are not able to prove statements + like "P = NP" assuming usual definitions for P and NP, they can automatically + discharge the lemmas we just proved. For this we only have to set up the induction + and auto will take care of the rest. This means we can write: +*} + +lemma + assumes a: "t1 \b* t2" + shows "t1 \b** t2" + using a +by (induct) (auto intro: beta_star2.intros) + +lemma + assumes a: "t1 \b* t2" + and b: "t2 \b* t3" + shows "t1 \b* t3" +using a b +by (induct) (auto intro: beta_star1.intros) + +lemma + assumes a: "t1 \b** t2" + shows "t1 \b* t2" +using a +by (induct) (auto intro: bs1_trans2 beta_star1.intros) + +inductive + eval :: "lam \ lam \ bool" ("_ \ _" [60, 60] 60) +where + e_Lam: "Lam [x].t \ Lam [x].t" +| e_App: "\t1 \ Lam [x].t; t2 \ v'; t[x::=v'] \ v\ \ App t1 t2 \ v" + +declare eval.intros[intro] + +text {* + Values are also defined using inductive. In our case values + are just lambda-abstractions. *} + +inductive + val :: "lam \ bool" +where + v_Lam[intro]: "val (Lam [x].t)" + + +section {* Datatypes: Evaluation Contexts *} + +text {* + + Datatypes can be defined in Isabelle as follows: we have to provide the name + of the datatype and list its type-constructors. Each type-constructor needs + to have the information about the types of its arguments, and optionally + can also contain some information about pretty syntax. For example, we like + to write "\" for holes. +*} + +datatype ctx = + Hole ("\") + | CAppL "ctx" "lam" + | CAppR "lam" "ctx" + +text {* Now Isabelle knows about: *} + +typ ctx +term "\" +term "CAppL" +term "CAppL \ (Var x)" + +text {* + + 1.) MINI EXERCISE + ----------------- + + Try and see what happens if you apply a Hole to a Hole? + +*} + +type_synonym ctxs = "ctx list" + +inductive + machine :: "lam \ ctxs \lam \ ctxs \ bool" ("<_,_> \ <_,_>" [60, 60, 60, 60] 60) +where + m1[intro]: " \ t2) # Es>" +| m2[intro]: "val v \ t2) # Es> \ ) # Es>" +| m3[intro]: "val v \ ) # Es> \ " + + +text {* + Since the machine defined above only performs a single reduction, + we need to define the transitive closure of this machine. *} + +inductive + machines :: "lam \ ctxs \ lam \ ctxs \ bool" ("<_,_> \* <_,_>" [60, 60, 60, 60] 60) +where + ms1[intro]: " \* " +| ms2[intro]: "\ \ ; \* \ \ \* " + +lemma + assumes a: " \* " + and b: " \* " + shows " \* " +using a b +proof(induct) + case (ms1 e1 Es1) + have c: " \* " by fact + show " \* " sorry +next + case (ms2 e1 Es1 e2 Es2 e2' Es2') + have ih: " \* \ \* " by fact + have d1: " \* " by fact + have d2: " \ " by fact + + show " \* " sorry +qed + +text {* + Just like gotos in the Basic programming language, labels can reduce + the readability of proofs. Therefore one can use in Isar the notation + "then have" in order to feed a have-statement to the proof of + the next have-statement. In the proof below this has been used + in order to get rid of the labels c and d1. +*} + +lemma + assumes a: " \* " + and b: " \* " + shows " \* " +using a b +proof(induct) + case (ms1 e1 Es1) + show " \* " by fact +next + case (ms2 e1 Es1 e2 Es2 e2' Es2') + have ih: " \* \ \* " by fact + have " \* " by fact + then have d3: " \* " using ih by simp + have d2: " \ " by fact + show " \* " using d2 d3 by auto +qed + +lemma + assumes a: " \* " + and b: " \* " + shows " \* " +using a b +proof(induct) + case (ms1 e1 Es1) + show " \* " by fact +next + case (ms2 e1 Es1 e2 Es2 e2' Es2') + have ih: " \* \ \* " by fact + have " \* " by fact + then have " \* " using ih by simp + moreover + have " \ " by fact + ultimately show " \* " by auto +qed + + +lemma ms3[intro]: + assumes a: " \* " + and b: " \* " + shows " \* " +using a b by (induct) (auto) + +lemma eval_to_val: + assumes a: "t \ t'" + shows "val t'" +using a by (induct) (auto) + + +theorem + assumes a: "t \ t'" + shows " \* " +using a +proof (induct) + case (e_Lam x t) + (* no assumptions *) + show " \* " sorry +next + case (e_App t1 x t t2 v' v) + (* all assumptions in this case *) + have a1: "t1 \ Lam [x].t" by fact + have ih1: " \* " by fact + have a2: "t2 \ v'" by fact + have ih2: " \* " by fact + have a3: "t[x::=v'] \ v" by fact + have ih3: " \* " by fact + (* your details *) + show " \* " sorry +qed + +text {* + Again the automatic tools in Isabelle can discharge automatically + of the routine work in these proofs. We can write: *} + +theorem eval_implies_machines_ctx: + assumes a: "t \ t'" + shows " \* " +using a +by (induct arbitrary: Es) + (metis eval_to_val machine.intros ms1 ms2 ms3 v_Lam)+ + +corollary eval_implies_machines: + assumes a: "t \ t'" + shows " \* " +using a eval_implies_machines_ctx by simp + + +nominal_datatype ty = + tVar "string" +| tArr "ty" "ty" ("_ \ _" [100, 100] 100) + + +text {* + Having defined them as nominal datatypes gives us additional + definitions and theorems that come with types (see below). + *} + +text {* + We next define the type of typing contexts, a predicate that + defines valid contexts (i.e. lists that contain only unique + variables) and the typing judgement. + +*} + +type_synonym ty_ctx = "(name \ ty) list" + +inductive + valid :: "ty_ctx \ bool" +where + v1[intro]: "valid []" +| v2[intro]: "\valid \; atom x \ \\\ valid ((x, T) # \)" + +inductive + typing :: "ty_ctx \ lam \ ty \ bool" ("_ \ _ : _" [60, 60, 60] 60) +where + t_Var[intro]: "\valid \; (x, T) \ set \\ \ \ \ Var x : T" +| t_App[intro]: "\\ \ t1 : T1 \ T2; \ \ t2 : T1\ \ \ \ App t1 t2 : T2" +| t_Lam[intro]: "\atom x \ \; (x, T1) # \ \ t : T2\ \ \ \ Lam [x].t : T1 \ T2" + + +text {* + The predicate x \ \, read as x fresh for \, is defined by Nominal Isabelle. + Freshness is defined for lambda-terms, products, lists etc. For example + we have: + *} + +lemma + fixes x::"name" + shows "atom x \ Lam [x].t" + and "atom x \ (t1, t2) \ atom x \ App t1 t2" + and "atom x \ Var y \ atom x \ y" + and "\atom x \ t1; atom x \ t2\ \ atom x \ (t1, t2)" + and "\atom x \ l1; atom x \ l2\ \ atom x \ (l1 @ l2)" + and "atom x \ y \ x \ y" + by (simp_all add: lam.fresh fresh_append fresh_at_base) + +text {* We can also prove that every variable is fresh for a type *} + +lemma ty_fresh: + fixes x::"name" + and T::"ty" + shows "atom x \ T" +by (induct T rule: ty.induct) + (simp_all add: ty.fresh pure_fresh) + +text {* + In order to state the weakening lemma in a convenient form, we overload + the subset-notation and define the abbreviation below. Abbreviations behave + like definitions, except that they are always automatically folded and + unfolded. +*} + +abbreviation + "sub_ty_ctx" :: "ty_ctx \ ty_ctx \ bool" ("_ \ _" [60, 60] 60) +where + "\1 \ \2 \ \x. x \ set \1 \ x \ set \2" + +text {***************************************************************** + + 4.) Exercise + ------------ + + Fill in the details and give a proof of the weakening lemma. + +*} + +lemma + assumes a: "\1 \ t : T" + and b: "valid \2" + and c: "\1 \ \2" + shows "\2 \ t : T" +using a b c +proof (induct arbitrary: \2) + case (t_Var \1 x T) + have a1: "valid \1" by fact + have a2: "(x, T) \ set \1" by fact + have a3: "valid \2" by fact + have a4: "\1 \ \2" by fact + + show "\2 \ Var x : T" sorry +next + case (t_Lam x \1 T1 t T2) + have ih: "\\3. \valid \3; (x, T1) # \1 \ \3\ \ \3 \ t : T2" by fact + have a0: "atom x \ \1" by fact + have a1: "valid \2" by fact + have a2: "\1 \ \2" by fact + + show "\2 \ Lam [x].t : T1 \ T2" sorry +qed (auto) + + +text {* + Despite the frequent claim that the weakening lemma is trivial, + routine or obvious, the proof in the lambda-case does not go + smoothly through. Painful variable renamings seem to be necessary. + But the proof using renamings is horribly complicated. It is really + interesting whether people who claim this proof is trivial, routine + or obvious had this proof in mind. + + BTW: The following two commands help already with showing that validity + and typing are invariant under variable (permutative) renamings. +*} + +equivariance valid +equivariance typing + +lemma not_to_be_tried_at_home_weakening: + assumes a: "\1 \ t : T" + and b: "valid \2" + and c: "\1 \ \2" + shows "\2 \ t : T" +using a b c +proof (induct arbitrary: \2) + case (t_Lam x \1 T1 t T2) (* lambda case *) + have fc0: "atom x \ \1" by fact + have ih: "\\3. \valid \3; (x, T1) # \1 \ \3\ \ \3 \ t : T2" by fact + obtain c::"name" where fc1: "atom c \ (x, t, \1, \2)" by (rule obtain_fresh) + have "Lam [c].((c \ x) \ t) = Lam [x].t" using fc1 (* we then alpha-rename the lambda-abstraction *) + by (auto simp add: lam.eq_iff Abs1_eq_iff flip_def) + moreover + have "\2 \ Lam [c].((c \ x) \ t) : T1 \ T2" (* we can then alpha-rename our original goal *) + proof - + (* we establish (x, T1) # \1 \ (x, T1) # ((c \ x) \ \2) and valid ((x, T1) # ((c \ x) \ \2)) *) + have "(x, T1) # \1 \ (x, T1) # ((c \ x) \ \2)" + proof - + have "\1 \ \2" by fact + then have "(c \ x) \ ((x, T1) # \1 \ (x, T1) # ((c \ x) \ \2))" using fc0 fc1 + by (perm_simp) (simp add: flip_fresh_fresh) + then show "(x, T1) # \1 \ (x, T1) # ((c \ x) \ \2)" by (rule permute_boolE) + qed + moreover + have "valid ((x, T1) # ((c \ x) \ \2))" + proof - + have "valid \2" by fact + then show "valid ((x, T1) # ((c \ x) \ \2))" using fc1 + by (auto simp add: fresh_permute_left atom_eqvt valid.eqvt) + qed + (* these two facts give us by induction hypothesis the following *) + ultimately have "(x, T1) # ((c \ x) \ \2) \ t : T2" using ih by simp + (* we now apply renamings to get to our goal *) + then have "(c \ x) \ ((x, T1) # ((c \ x) \ \2) \ t : T2)" by (rule permute_boolI) + then have "(c, T1) # \2 \ ((c \ x) \ t) : T2" using fc1 + by (perm_simp) (simp add: flip_fresh_fresh ty_fresh) + then show "\2 \ Lam [c].((c \ x) \ t) : T1 \ T2" using fc1 by auto + qed + ultimately show "\2 \ Lam [x].t : T1 \ T2" by simp +qed (auto) (* var and app cases *) + + +text {* + The remedy to the complicated proof of the weakening proof + shown above is to use a stronger induction principle that + has the usual variable convention already build in. The + following command derives this induction principle for us. + (We shall explain what happens here later.) + +*} + +nominal_inductive typing + avoids t_Lam: "x" + unfolding fresh_star_def + by (simp_all add: fresh_Pair lam.fresh ty_fresh) + +text {* Compare the two induction principles *} +thm typing.induct +thm typing.strong_induct + +text {* + We can use the stronger induction principle by replacing + the line + + proof (induct arbitrary: \2) + + with + + proof (nominal_induct avoiding: \2 rule: typing.strong_induct) + + Try now the proof. + +*} + + +lemma weakening: + assumes a: "\1 \ t : T" + and b: "valid \2" + and c: "\1 \ \2" + shows "\2 \ t : T" +using a b c +proof (nominal_induct avoiding: \2 rule: typing.strong_induct) + case (t_Var \1 x T) (* variable case *) + have "\1 \ \2" by fact + moreover + have "valid \2" by fact + moreover + have "(x, T)\ set \1" by fact + ultimately show "\2 \ Var x : T" by auto +next + case (t_Lam x \1 T1 t T2) + have vc: "atom x \ \2" by fact (* additional fact *) + have ih: "\\3. \valid \3; (x, T1) # \1 \ \3\ \ \3 \ t : T2" by fact + have a0: "atom x \ \1" by fact + have a1: "valid \2" by fact + have a2: "\1 \ \2" by fact + + show "\2 \ Lam [x].t : T1 \ T2" sorry +qed (auto) (* app case *) + + +text {***************************************************************** + + Function Definitions: Filling a Lambda-Term into a Context + ---------------------------------------------------------- + + Many functions over datatypes can be defined by recursion on the + structure. For this purpose, Isabelle provides "fun". To use it one needs + to give a name for the function, its type, optionally some pretty-syntax + and then some equations defining the function. Like in "inductive", + "fun" expects that more than one such equation is separated by "|". + +*} + +fun + filling :: "ctx \ lam \ lam" ("_\_\" [100, 100] 100) +where + "\\t\ = t" +| "(CAppL E t')\t\ = App (E\t\) t'" +| "(CAppR t' E)\t\ = App t' (E\t\)" + +text {* + After this definition Isabelle will be able to simplify + statements like: *} + +lemma + shows "(CAppL \ (Var x))\Var y\ = App (Var y) (Var x)" + by simp + + +fun + ctx_compose :: "ctx \ ctx \ ctx" ("_ \ _" [101,100] 100) +where + "\ \ E' = E'" +| "(CAppL E t') \ E' = CAppL (E \ E') t'" +| "(CAppR t' E) \ E' = CAppR t' (E \ E')" + +fun + ctx_composes :: "ctxs \ ctx" ("_\" [110] 110) +where + "[]\ = \" + | "(E # Es)\ = (Es\) \ E" + +text {* + Notice that we not just have given a pretty syntax for the functions, but + also some precedences..The numbers inside the [\] stand for the precedences + of the arguments; the one next to it the precedence of the whole term. + + This means we have to write (Es1 \ Es2) \ Es3 otherwise Es1 \ Es2 \ Es3 is + interpreted as Es1 \ (Es2 \ Es3). +*} + +text {****************************************************************** + + Structural Inductions over Contexts + ------------------------------------ + + So far we have had a look at an induction over an inductive predicate. + Another important induction principle is structural inductions for + datatypes. To illustrate structural inductions we prove some facts + about context composition, some of which we will need later on. + +*} + +text {****************************************************************** + + 5.) EXERCISE + ------------ + + Complete the proof and remove the sorries. + +*} + +lemma ctx_compose: + shows "(E1 \ E2)\t\ = E1\E2\t\\" +proof (induct E1) + case Hole + show "\ \ E2\t\ = \\E2\t\\" sorry +next + case (CAppL E1 t') + have ih: "(E1 \ E2)\t\ = E1\E2\t\\" by fact + show "((CAppL E1 t') \ E2)\t\ = (CAppL E1 t')\E2\t\\" sorry +next + case (CAppR t' E1) + have ih: "(E1 \ E2)\t\ = E1\E2\t\\" by fact + show "((CAppR t' E1) \ E2)\t\ = (CAppR t' E1)\E2\t\\" sorry +qed + +lemma neut_hole: + shows "E \ \ = E" +by (induct E) (simp_all) + +lemma circ_assoc: + fixes E1 E2 E3::"ctx" + shows "(E1 \ E2) \ E3 = E1 \ (E2 \ E3)" +by (induct E1) (simp_all) + +lemma + shows "(Es1 @ Es2)\ = (Es2\) \ (Es1\)" +proof (induct Es1) + case Nil + show "([] @ Es2)\ = Es2\ \ []\" sorry +next + case (Cons E Es1) + have ih: "(Es1 @ Es2)\ = Es2\ \ Es1\" by fact + + show "((E # Es1) @ Es2)\ = Es2\ \ (E # Es1)\" sorry +qed + + +text {* + The last proof involves several steps of equational reasoning. + Isar provides some convenient means to express such equational + reasoning in a much cleaner fashion using the "also have" + construction. *} + +lemma + shows "(Es1 @ Es2)\ = (Es2\) \ (Es1\)" +proof (induct Es1) + case Nil + show "([] @ Es2)\ = Es2\ \ []\" using neut_hole by simp +next + case (Cons E Es1) + have ih: "(Es1 @ Es2)\ = Es2\ \ Es1\" by fact + have "((E # Es1) @ Es2)\ = (Es1 @ Es2)\ \ E" by simp + also have "\ = (Es2\ \ Es1\) \ E" using ih by simp + also have "\ = Es2\ \ (Es1\ \ E)" using circ_assoc by simp + also have "\ = Es2\ \ (E # Es1)\" by simp + finally show "((E # Es1) @ Es2)\ = Es2\ \ (E # Es1)\" by simp +qed + +text {****************************************************************** + + Formalising Barendregt's Proof of the Substitution Lemma + -------------------------------------------------------- + + Barendregt's proof needs in the variable case a case distinction. + One way to do this in Isar is to use blocks. A block is some sequent + or reasoning steps enclosed in curly braces + + { \ + + have "statement" + } + + Such a block can contain local assumptions like + + { assume "A" + assume "B" + \ + have "C" by \ + } + + Where "C" is the last have-statement in this block. The behaviour + of such a block to the 'outside' is the implication + + \A; B\ \ "C" + + Now if we want to prove a property "smth" using the case-distinctions + P\<^isub>1, P\<^isub>2 and P\<^isub>3 then we can use the following reasoning: + + { assume "P\<^isub>1" + \ + have "smth" + } + moreover + { assume "P\<^isub>2" + \ + have "smth" + } + moreover + { assume "P\<^isub>3" + \ + have "smth" + } + ultimately have "smth" by blast + + The blocks establish the implications + + P\<^isub>1 \ smth + P\<^isub>2 \ smth + P\<^isub>3 \ smth + + If we know that P\<^isub>1, P\<^isub>2 and P\<^isub>3 cover all the cases, that is P\<^isub>1 \ P\<^isub>2 \ P\<^isub>3 is + true, then we have 'ultimately' established the property "smth" + +*} + +text {****************************************************************** + + 7.) Exercise + ------------ + + Fill in the cases 1.2 and 1.3 and the equational reasoning + in the lambda-case. +*} + +lemma forget: + shows "atom x \ t \ t[x ::= s] = t" +apply(nominal_induct t avoiding: x s rule: lam.strong_induct) +apply(auto simp add: lam.fresh fresh_at_base) +done + +lemma fresh_fact: + fixes z::"name" + assumes a: "atom z \ s" + and b: "z = y \ atom z \ t" + shows "atom z \ t[y ::= s]" +using a b +apply (nominal_induct t avoiding: z y s rule: lam.strong_induct) +apply (auto simp add: lam.fresh fresh_at_base) +done + +thm forget +thm fresh_fact + +lemma + assumes a: "x \ y" + and b: "atom x \ L" + shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]" +using a b +proof (nominal_induct M avoiding: x y N L rule: lam.strong_induct) + case (Var z) + have a1: "x \ y" by fact + have a2: "atom x \ L" by fact + show "Var z[x::=N][y::=L] = Var z[y::=L][x::=N[y::=L]]" (is "?LHS = ?RHS") + proof - + { (*Case 1.1*) + assume c1: "z=x" + have "(1)": "?LHS = N[y::=L]" using c1 by simp + have "(2)": "?RHS = N[y::=L]" using c1 a1 by simp + have "?LHS = ?RHS" using "(1)" "(2)" by simp + } + moreover + { (*Case 1.2*) + assume c2: "z = y" "z \ x" + + have "?LHS = ?RHS" sorry + } + moreover + { (*Case 1.3*) + assume c3: "z \ x" "z \ y" + + have "?LHS = ?RHS" sorry + } + ultimately show "?LHS = ?RHS" by blast + qed +next + case (Lam z M1) (* case 2: lambdas *) + have ih: "\x \ y; atom x \ L\ \ M1[x::=N][y::=L] = M1[y::=L][x::=N[y::=L]]" by fact + have a1: "x \ y" by fact + have a2: "atom x \ L" by fact + have fs: "atom z \ x" "atom z \ y" "atom z \ N" "atom z \ L" by fact+ + then have b: "atom z \ N[y::=L]" by (simp add: fresh_fact) + show "(Lam [z].M1)[x::=N][y::=L] = (Lam [z].M1)[y::=L][x::=N[y::=L]]" (is "?LHS=?RHS") + proof - + have "?LHS = \" sorry + + also have "\ = ?RHS" sorry + finally show "?LHS = ?RHS" by simp + qed +next + case (App M1 M2) (* case 3: applications *) + then show "(App M1 M2)[x::=N][y::=L] = (App M1 M2)[y::=L][x::=N[y::=L]]" by simp +qed + +text {* + Again the strong induction principle enables Isabelle to find + the proof of the substitution lemma automatically. +*} + +lemma substitution_lemma_version: + assumes asm: "x \ y" "atom x \ L" + shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]" + using asm +by (nominal_induct M avoiding: x y N L rule: lam.strong_induct) + (auto simp add: fresh_fact forget) + +text {****************************************************************** + + The CBV Reduction Relation (Small-Step Semantics) + ------------------------------------------------- + + In order to establish the property that the CK Machine + calculates a nomrmalform which corresponds to the + evaluation relation, we introduce the call-by-value + small-step semantics. + +*} + +inductive + cbv :: "lam \ lam \ bool" ("_ \cbv _" [60, 60] 60) +where + cbv1: "\val v; atom x \ v\ \ App (Lam [x].t) v \cbv t[x::=v]" +| cbv2[intro]: "t \cbv t' \ App t t2 \cbv App t' t2" +| cbv3[intro]: "t \cbv t' \ App t2 t \cbv App t2 t'" + +equivariance val +equivariance cbv +nominal_inductive cbv + avoids cbv1: "x" + unfolding fresh_star_def + by (simp_all add: lam.fresh Abs_fresh_iff fresh_Pair fresh_fact) + +text {* + In order to satisfy the vc-condition we have to formulate + this relation with the additional freshness constraint + x\v. Though this makes the definition vc-ompatible, it + makes the definition less useful. We can with some pain + show that the more restricted rule is equivalent to the + usual rule. *} + +lemma subst_rename: + assumes a: "atom y \ t" + shows "t[x ::= s] = ((y \ x) \t)[y ::= s]" +using a +apply (nominal_induct t avoiding: x y s rule: lam.strong_induct) +apply (auto simp add: lam.fresh fresh_at_base) +done + +thm subst_rename + +lemma better_cbv1[intro]: + assumes a: "val v" + shows "App (Lam [x].t) v \cbv t[x::=v]" +proof - + obtain y::"name" where fs: "atom y \ (x, t, v)" by (rule obtain_fresh) + have "App (Lam [x].t) v = App (Lam [y].((y \ x) \ t)) v" using fs + by (auto simp add: lam.eq_iff Abs1_eq_iff' flip_def fresh_Pair fresh_at_base) + also have "\ \cbv ((y \ x) \ t)[y ::= v]" using fs a by (auto intro: cbv1) + also have "\ = t[x ::= v]" using fs by (simp add: subst_rename[symmetric]) + finally show "App (Lam [x].t) v \cbv t[x ::= v]" by simp +qed + +text {* + The transitive closure of the cbv-reduction relation: *} + +inductive + "cbvs" :: "lam \ lam \ bool" (" _ \cbv* _" [60, 60] 60) +where + cbvs1[intro]: "e \cbv* e" +| cbvs2[intro]: "\e1\cbv e2; e2 \cbv* e3\ \ e1 \cbv* e3" + +lemma cbvs3[intro]: + assumes a: "e1 \cbv* e2" "e2 \cbv* e3" + shows "e1 \cbv* e3" +using a by (induct) (auto) + +text {****************************************************************** + + 8.) Exercise + ------------ + + If more simple exercises are needed, then complete the following proof. + +*} + +lemma cbv_in_ctx: + assumes a: "t \cbv t'" + shows "E\t\ \cbv E\t'\" +using a +proof (induct E) + case Hole + have "t \cbv t'" by fact + then show "\\t\ \cbv \\t'\" sorry +next + case (CAppL E s) + have ih: "t \cbv t' \ E\t\ \cbv E\t'\" by fact + have a: "t \cbv t'" by fact + show "(CAppL E s)\t\ \cbv (CAppL E s)\t'\" sorry +next + case (CAppR s E) + have ih: "t \cbv t' \ E\t\ \cbv E\t'\" by fact + have a: "t \cbv t'" by fact + show "(CAppR s E)\t\ \cbv (CAppR s E)\t'\" sorry +qed + + +text {****************************************************************** + + 9.) Exercise + ------------ + + The point of the cbv-reduction was that we can easily relatively + establish the follwoing property: + +*} + +lemma machine_implies_cbvs_ctx: + assumes a: " \ " + shows "(Es\)\e\ \cbv* (Es'\)\e'\" +using a +proof (induct) + case (m1 t1 t2 Es) + + show "Es\\App t1 t2\ \cbv* ((CAppL \ t2) # Es)\\t1\" sorry +next + case (m2 v t2 Es) + have "val v" by fact + + show "((CAppL \ t2) # Es)\\v\ \cbv* (CAppR v \ # Es)\\t2\" sorry +next + case (m3 v x t Es) + have "val v" by fact + + show "(((CAppR (Lam [x].t) \) # Es)\)\v\ \cbv* (Es\)\(t[x ::= v])\" sorry +qed + +text {* + It is not difficult to extend the lemma above to + arbitrary reductions sequences of the CK machine. *} + +lemma machines_implies_cbvs_ctx: + assumes a: " \* " + shows "(Es\)\e\ \cbv* (Es'\)\e'\" +using a +by (induct) (auto dest: machine_implies_cbvs_ctx) + +text {* + So whenever we let the CL machine start in an initial + state and it arrives at a final state, then there exists + a corresponding cbv-reduction sequence. *} + +corollary machines_implies_cbvs: + assumes a: " \* " + shows "e \cbv* e'" +using a by (auto dest: machines_implies_cbvs_ctx) + +text {* + We now want to relate the cbv-reduction to the evaluation + relation. For this we need two auxiliary lemmas. *} + +lemma eval_val: + assumes a: "val t" + shows "t \ t" +using a by (induct) (auto) + +lemma e_App_elim: + assumes a: "App t1 t2 \ v" + shows "\x t v'. t1 \ Lam [x].t \ t2 \ v' \ t[x::=v'] \ v" +using a by (cases) (auto simp add: lam.eq_iff lam.distinct) + +text {****************************************************************** + + 10.) Exercise + ------------- + + Complete the first case in the proof below. + +*} + +lemma cbv_eval: + assumes a: "t1 \cbv t2" "t2 \ t3" + shows "t1 \ t3" +using a +proof(induct arbitrary: t3) + case (cbv1 v x t t3) + have a1: "val v" by fact + have a2: "t[x ::= v] \ t3" by fact + + show "App (Lam [x].t) v \ t3" sorry +next + case (cbv2 t t' t2 t3) + have ih: "\t3. t' \ t3 \ t \ t3" by fact + have "App t' t2 \ t3" by fact + then obtain x t'' v' + where a1: "t' \ Lam [x].t''" + and a2: "t2 \ v'" + and a3: "t''[x ::= v'] \ t3" using e_App_elim by blast + have "t \ Lam [x].t''" using ih a1 by auto + then show "App t t2 \ t3" using a2 a3 by auto +qed (auto dest!: e_App_elim) + + +text {* + Next we extend the lemma above to arbitray initial + sequences of cbv-reductions. *} + +lemma cbvs_eval: + assumes a: "t1 \cbv* t2" "t2 \ t3" + shows "t1 \ t3" +using a by (induct) (auto intro: cbv_eval) + +text {* + Finally, we can show that if from a term t we reach a value + by a cbv-reduction sequence, then t evaluates to this value. *} + +lemma cbvs_implies_eval: + assumes a: "t \cbv* v" "val v" + shows "t \ v" +using a +by (induct) (auto intro: eval_val cbvs_eval) + +text {* + All facts tied together give us the desired property about + K machines. *} + +theorem machines_implies_eval: + assumes a: " \* " + and b: "val t2" + shows "t1 \ t2" +proof - + have "t1 \cbv* t2" using a by (simp add: machines_implies_cbvs) + then show "t1 \ t2" using b by (simp add: cbvs_implies_eval) +qed + +lemma valid_elim: + assumes a: "valid ((x, T) # \)" + shows "atom x \ \ \ valid \" +using a by (cases) (auto) + +lemma valid_insert: + assumes a: "valid (\ @ [(x, T)] @ \)" + shows "valid (\ @ \)" +using a +by (induct \) + (auto simp add: fresh_append fresh_Cons dest!: valid_elim) + +lemma fresh_list: + shows "atom y \ xs = (\x \ set xs. atom y \ x)" +by (induct xs) (simp_all add: fresh_Nil fresh_Cons) + +lemma context_unique: + assumes a1: "valid \" + and a2: "(x, T) \ set \" + and a3: "(x, U) \ set \" + shows "T = U" +using a1 a2 a3 +by (induct) (auto simp add: fresh_list fresh_Pair fresh_at_base) + +lemma type_substitution_aux: + assumes a: "(\ @ [(x, T')] @ \) \ e : T" + and b: "\ \ e' : T'" + shows "(\ @ \) \ e[x ::= e'] : T" +using a b +proof (nominal_induct \'\"\ @ [(x, T')] @ \" e T avoiding: x e' \ rule: typing.strong_induct) + case (t_Var y T x e' \) + have a1: "valid (\ @ [(x, T')] @ \)" by fact + have a2: "(y,T) \ set (\ @ [(x, T')] @ \)" by fact + have a3: "\ \ e' : T'" by fact + from a1 have a4: "valid (\ @ \)" by (rule valid_insert) + { assume eq: "x = y" + from a1 a2 have "T = T'" using eq by (auto intro: context_unique) + with a3 have "\ @ \ \ Var y[x::=e'] : T" using eq a4 by (auto intro: weakening) + } + moreover + { assume ineq: "x \ y" + from a2 have "(y, T) \ set (\ @ \)" using ineq by simp + then have "\ @ \ \ Var y[x::=e'] : T" using ineq a4 by auto + } + ultimately show "\ @ \ \ Var y[x::=e'] : T" by blast +qed (force simp add: fresh_append fresh_Cons)+ + +corollary type_substitution: + assumes a: "(x,T') # \ \ e : T" + and b: "\ \ e' : T'" + shows "\ \ e[x::=e'] : T" +using a b type_substitution_aux[where \="[]"] +by (auto) + +lemma t_App_elim: + assumes a: "\ \ App t1 t2 : T" + shows "\T'. \ \ t1 : T' \ T \ \ \ t2 : T'" +using a +by (cases) (auto simp add: lam.eq_iff lam.distinct) + +lemma t_Lam_elim: + assumes ty: "\ \ Lam [x].t : T" + and fc: "atom x \ \" + shows "\T1 T2. T = T1 \ T2 \ (x, T1) # \ \ t : T2" +using ty fc +apply(cases) +apply(auto simp add: lam.eq_iff lam.distinct ty.eq_iff) +apply(auto simp add: Abs1_eq_iff) +apply(rule_tac p="(x \ xa)" in permute_boolE) +apply(perm_simp) +apply(simp add: flip_def swap_fresh_fresh ty_fresh) +done + +theorem cbv_type_preservation: + assumes a: "t \cbv t'" + and b: "\ \ t : T" + shows "\ \ t' : T" +using a b +by (nominal_induct avoiding: \ T rule: cbv.strong_induct) + (auto dest!: t_Lam_elim t_App_elim simp add: type_substitution ty.eq_iff) + +corollary cbvs_type_preservation: + assumes a: "t \cbv* t'" + and b: "\ \ t : T" + shows "\ \ t' : T" +using a b +by (induct) (auto intro: cbv_type_preservation) + +text {* + The Type-Preservation Property for the Machine and Evaluation Relation. *} + +theorem machine_type_preservation: + assumes a: " \* " + and b: "\ \ t : T" + shows "\ \ t' : T" +proof - + from a have "t \cbv* t'" by (simp add: machines_implies_cbvs) + then show "\ \ t' : T" using b by (simp add: cbvs_type_preservation) +qed + +theorem eval_type_preservation: + assumes a: "t \ t'" + and b: "\ \ t : T" + shows "\ \ t' : T" +proof - + from a have " \* " by (simp add: eval_implies_machines) + then show "\ \ t' : T" using b by (simp add: machine_type_preservation) +qed + +text {* The Progress Property *} + +lemma canonical_tArr: + assumes a: "[] \ t : T1 \ T2" + and b: "val t" + shows "\x t'. t = Lam [x].t'" +using b a by (induct) (auto) + +theorem progress: + assumes a: "[] \ t : T" + shows "(\t'. t \cbv t') \ (val t)" +using a +by (induct \\"[]::ty_ctx" t T) + (auto intro: cbv.intros dest!: canonical_tArr) + + + +end +