Nominal/activities/CK_Machine.thy
changeset 415 f1be8028a4a9
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Nominal/activities/CK_Machine.thy	Wed Mar 30 17:27:34 2016 +0100
@@ -0,0 +1,1826 @@
+(***************************************************************** 
+  
+  Nominal Isabelle Tutorial
+  -------------------------
+
+  11th August 2008, Sydney 
+
+  This file contains most of the material that will be covered in the 
+  tutorial. The file can be "stepped through"; though it contains much 
+  commented code (purple text). 
+
+*)
+
+(***************************************************************** 
+
+  Proof General
+  -------------
+
+  Proof General is the front end for Isabelle.
+
+  To run Nominal Isabelle proof-scripts you must have HOL-Nominal enabled in
+  the menu Isabelle -> Logics. You also need to enable X-Symbols in the menu 
+  Proof-General -> Options (make sure to save this option once enabled).
+
+  Proof General "paints" blue the part of the proof-script that has already
+  been processed by Isabelle. You can advance or retract the "frontier" of
+  this processed part using the "Next" and "Undo" buttons in the
+  menubar. "Goto" will process everything up to the current cursor position,
+  or retract if the cursor is inside the blue part. The key-combination
+  control-c control-return is a short-cut for the "Goto"-operation.
+
+  Proof General gives feedback inside the "Response" and "Goals" buffers.
+  The response buffer will contain warning messages (in yellow) and 
+  error messages (in red). Warning messages can generally be ignored. Error 
+  messages must be dealt with in order to advance the proof script. The goal 
+  buffer shows which goals need to be proved. The sole idea of interactive 
+  theorem proving is to get the message "No subgoals." ;o)
+  
+*)
+
+(***************************************************************** 
+
+  X-Symbols
+  ---------
+
+  X-symbols provide a nice way to input non-ascii characters. For example:
+
+  \<forall>, \<exists>, \<Down>, \<sharp>, \<And>, \<Gamma>, \<times>, \<noteq>, \<in>, \<subseteq>, \<dots>
+
+  They need to be input via the combination \<name-of-x-symbol> 
+  where name-of-x-symbol coincides with the usual latex name of
+  that symbol.
+
+  However, there are some handy short-cuts for frequently used X-symbols. 
+  For example
+
+  [|  \<dots> \<lbrakk> 
+  |]  \<dots> \<rbrakk> 
+  ==> \<dots> \<Longrightarrow> 
+  =>  \<dots> \<Rightarrow> 
+  --> \<dots> \<longrightarrow> 
+  /\  \<dots> \<and>
+  \/  \<dots> \<or> 
+  |-> \<dots> \<mapsto>
+  =_  \<dots> \<equiv>    
+
+*)
+
+(***************************************************************** 
+
+  Theories
+  --------
+
+  Every Isabelle proof-script needs to have a name and must import
+  some pre-existing theory. For Nominal Isabelle proof-scripts this will 
+  normally be the theory Nominal, but we use here the theory Lambda.thy, 
+  which extends Nominal with a definition for lambda-terms and capture-
+  avoiding substitution.
+
+  BTW, the Nominal theory builds directly on Isabelle/HOL and extends it
+  only with some definitions and some reasoning infrastructure. It does not 
+  add any new axiom to Isabelle/HOL. So you can trust what you are doing. ;o)
+
+*)
+
+theory CK_Machine
+  imports "Lambda" 
+begin
+
+text {***************************************************************** 
+
+  Types
+  -----
+
+  Isabelle is based, roughly, on the theory of simple types including some 
+  polymorphism. It also includes some overloading, which means that sometimes 
+  explicit type annotations need to be given.
+
+  - Base types include: nat, bool, string, lam 
+
+  - Type formers include: 'a list, ('a\<times>'b), 'c set   
+
+  - Type variables are written like in ML with an apostrophe: 'a, 'b, \<dots>
+
+  Types known to Isabelle can be queried using:
+
+*}
+
+typ nat
+typ bool
+typ lam              (* the type for alpha-equated lambda-terms *)
+typ "('a \<times> 'b)"      (* product type *)
+typ "'c set"         (* set type *)
+typ "nat \<Rightarrow> bool"    (* the type for functions from nat to bool *)
+
+(* These give errors: *)
+(*typ boolean *) 
+(*typ set*)
+
+
+text {***************************************************************** 
+
+   Terms
+   -----
+
+   Every term in Isabelle needs to be well-typed (however they can have polymorphic
+   type). Whether a term is accepted can be queried using: 
+
+*}
+
+term c                 (* a variable of polymorphic type *)
+term "1::nat"          (* the constant 1 in natural numbers *)
+term 1                 (* the constant 1 with polymorphic type *)          
+term "{1, 2, 3::nat}"  (* the set containing natural numbers 1, 2 and 3 *)      
+term "[1, 2, 3]"       (* the list containing the polymorphic numbers 1, 2 and 3 *)  
+term "Lam [x].(Var x)" (* a lambda-term *)
+term "App t1 t2"       (* another lambda-term *)
+
+text {* 
+  Isabelle provides some useful colour feedback about what are constants (in black),
+  free variables (in blue) and bound variables (in green). *}
+
+term "True"     (* a constant that is defined in HOL *)
+term "true"     (* not recognised as a constant, therefore it is interpreted as a variable *)
+term "\<forall>x. P x"  (* x is bound, P is free *)
+
+text {* Every formula in Isabelle needs to have type bool *}
+
+term "True"
+term "True \<and> False"
+term "{1,2,3} = {3,2,1}"
+term "\<forall>x. P x"
+term "A \<longrightarrow> B"
+
+text {* 
+  When working with Isabelle, one is confronted with an object logic (that is HOL)
+  and Isabelle's meta-logic (called Pure). Sometimes one has to pay attention 
+  to this fact. *}
+
+term "A \<longrightarrow> B" (* versus *)
+term "A \<Longrightarrow> B"
+
+term "\<forall>x. P x" (* versus *)
+term "\<And>x. P x"
+
+term "A \<Longrightarrow> B \<Longrightarrow> C" (* is synonymous with *)
+term "\<lbrakk>A; B\<rbrakk> \<Longrightarrow> C"
+
+
+text {***************************************************************** 
+
+  Inductive Definitions: The Evaluation Judgement and the Value Predicate
+  -----------------------------------------------------------------------
+
+  Inductive definitions start with the keyword "inductive" and a predicate name. 
+  One also needs to provide a type for the predicate. Clauses of the inductive
+  predicate are introduced by "where" and more than two clauses need to be 
+  separated by "|". Optionally one can give a name to each clause and indicate 
+  that it should be added to the hints database ("[intro]"). A typical clause
+  has some premises and a conclusion. This is written in Isabelle as:
+
+  "premise \<Longrightarrow> conclusion"
+  "\<lbrakk>premise1; premise2; \<dots> \<rbrakk> \<Longrightarrow> conclusion"
+
+  If no premise is present, then one just writes
+
+  "conclusion"
+
+  Below we define the evaluation judgement for lambda-terms. This definition 
+  introduces the predicate named "eval". After giving its type, we declare 
+  the usual pretty syntax _ \<Down> _. In this declaration _ stands for an argument
+  of eval.
+
+*}
+
+inductive
+  eval :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<Down> _") 
+where
+  e_Lam:  "Lam [x].t \<Down> Lam [x].t"
+| e_App:  "\<lbrakk>t1 \<Down> Lam [x].t; t2 \<Down> v'; t[x::=v'] \<Down> v\<rbrakk> \<Longrightarrow> App t1 t2 \<Down> v"
+
+declare eval.intros[intro]
+
+text {* 
+  Values are also defined using inductive. In our case values
+  are just lambda-abstractions. *}
+
+inductive
+  val :: "lam \<Rightarrow> bool" 
+where
+  v_Lam[intro]:   "val (Lam [x].t)"
+
+
+text {***************************************************************** 
+
+  Theorems
+  --------
+
+  A central concept in Isabelle is that of theorems. Isabelle's theorem
+  database can be queried using 
+
+*}
+
+thm e_Lam
+thm e_App
+thm conjI
+thm conjunct1
+
+text {* 
+  Notice that theorems usually contain schematic variables (e.g. ?P, ?Q, \<dots>). 
+  These schematic variables can be substituted with any term (of the right type 
+  of course). It is sometimes useful to suppress the "?" from the schematic 
+  variables. This can be achieved by using the attribute "[no_vars]". *}
+
+thm e_Lam[no_vars]
+thm e_App[no_vars]
+thm conjI[no_vars]
+thm conjunct1[no_vars]
+
+
+text {* 
+  When defining the predicate eval, Isabelle provides us automatically with 
+  the following theorems that state how evaluation judgments can be introduced 
+  and what constitutes an induction over the predicate eval. *}
+
+thm eval.intros[no_vars]
+thm eval.induct[no_vars]
+
+text {***************************************************************** 
+
+  Lemma / Theorem / Corollary Statements 
+  --------------------------------------
+
+  Whether to use lemma, theorem or corollary makes no semantic difference 
+  in Isabelle. A lemma starts with "lemma" and consists of a statement 
+  ("shows \<dots>") and optionally a lemma name, some type-information for 
+  variables ("fixes \<dots>") and some assumptions ("assumes \<dots>"). Lemmas 
+  also need to have a proof, but ignore this 'detail' for the moment.
+  
+*}
+
+lemma alpha_equ:
+  shows "Lam [x].Var x = Lam [y].Var y"
+by (simp add: lam.inject alpha swap_simps fresh_atm)
+
+lemma Lam_freshness:
+  assumes a: "x\<noteq>y"
+  shows "y\<sharp>Lam [x].t \<Longrightarrow> y\<sharp>t"
+using a by (simp add: abs_fresh) 
+
+lemma neutral_element:
+  fixes x::"nat"
+  shows "x + 0 = x"
+by simp
+
+
+text {***************************************************************** 
+
+  Datatypes: Evaluation Contexts 
+  ------------------------------
+
+  Datatypes can be defined in Isabelle as follows: we have to provide the name 
+  of the datatype and list its type-constructors. Each type-constructor needs 
+  to have the information about the types of its arguments, and optionally 
+  can also contain some information about pretty syntax. For example, we like
+  to write "\<box>" for holes.
+
+*}
+
+datatype ctx = 
+    Hole ("\<box>")  
+  | CAppL "ctx" "lam"
+  | CAppR "lam" "ctx"
+
+text {* Now Isabelle knows about: *}
+
+typ ctx
+term "\<box>"
+term "CAppL"
+term "CAppL \<box> (Var x)"
+
+text {* 
+
+  1.) MINI EXERCISE
+  -----------------
+
+  Try and see what happens if you apply a Hole to a Hole? 
+
+*}
+
+
+text {***************************************************************** 
+
+  The CK Machine
+  --------------
+
+  The CK machine is also defined using an inductive predicate with four
+  arguments. The idea behind this abstract machine is to transform, or reduce, 
+  a configuration consisting of a lambda-term and a framestack (a list of 
+  contexts), to a new configuration.
+
+  We use the type abbreviation "ctxs" for the type for framestacks. 
+
+  The pretty syntax for the CK machine is <_,_> \<mapsto> <_,_>.
+
+*}
+
+types ctxs = "ctx list"
+
+inductive
+  machine :: "lam \<Rightarrow> ctxs \<Rightarrow>lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto> <_,_>")
+where
+  m1[intro]: "<App t1 t2,Es> \<mapsto> <t1,(CAppL \<box> t2)#Es>"
+| m2[intro]: "val v \<Longrightarrow> <v,(CAppL \<box> t2)#Es> \<mapsto> <t2,(CAppR v \<box>)#Es>"
+| m3[intro]: "val v \<Longrightarrow> <v,(CAppR (Lam [x].t) \<box>)#Es> \<mapsto> <t[x::=v],Es>"
+
+
+text {*
+  Since the machine defined above only performs a single reduction,
+  we need to define the transitive closure of this machine. *}
+
+inductive 
+  machines :: "lam \<Rightarrow> ctxs \<Rightarrow> lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto>* <_,_>")
+where
+  ms1[intro]: "<t,Es> \<mapsto>* <t,Es>"
+| ms2[intro]: "\<lbrakk><t1,Es1> \<mapsto> <t2,Es2>; <t2,Es2> \<mapsto>* <t3,Es3>\<rbrakk> \<Longrightarrow> <t1,Es1> \<mapsto>* <t3,Es3>"
+
+
+text {***************************************************************** 
+  
+  Isar Proofs
+  -----------
+
+  Isar is a language for writing down proofs that can be understood by humans 
+  and by Isabelle. An Isar proof can be thought of as a sequence of 'stepping stones' 
+  that start with the assumptions and lead to the goal to be established. Every such 
+  stepping stone is introduced by "have" followed by the statement of the stepping 
+  stone. An exception is the goal to be proved, which need to be introduced with "show".
+
+  Since proofs usually do not proceed in a linear fashion, a label can be given 
+  to each stepping stone and then used later to refer to this stepping stone 
+  ("using").
+
+  Each stepping stone (or have-statement) needs to have a justification. The 
+  simplest justification is "sorry" which admits any stepping stone, even false 
+  ones (this is good during the development of proofs). Assumption can be 
+  "justified" using "by fact". Derived facts can be justified using 
+
+  - by simp    (* simplification *)
+  - by auto    (* proof search and simplification *)
+  - by blast   (* only proof search *)
+
+  If facts or lemmas are needed in order to justify a have-statement, then
+  one can feed these facts into the proof by using "using label \<dots>" or 
+  "using theorem-name \<dots>". More than one label at the time is allowed.
+
+  Induction proofs in Isar are set up by indicating over which predicate(s) 
+  the induction proceeds ("using a b") followed by the command "proof (induct)". 
+  In this way, Isabelle uses default settings for which induction should
+  be performed. These default settings can be overridden by giving more
+  information, like the variable over which a structural induction should
+  proceed, or a specific induction principle such as well-founded inductions. 
+
+  After the induction is set up, the proof proceeds by cases. In Isar these 
+  cases can be given in any order, but must be started with "case" and the 
+  name of the case, and optionally some legible names for the variables 
+  referred to inside the case. 
+
+  The possible case-names can be found by looking inside the menu "Isabelle -> 
+  Show me -> cases". In each "case", we need to establish a statement introduced 
+  by "show". Once this has been done, the next case can be started using "next". 
+  When all cases are completed, the proof can be finished using "qed".
+
+  This means a typical induction proof has the following pattern
+
+     proof (induct)
+       case \<dots>
+       \<dots>
+       show \<dots>
+     next
+       case \<dots>
+       \<dots>
+       show \<dots>
+     \<dots>
+     qed
+  
+  The four lemmas are by induction on the predicate machines. All proofs establish 
+  the same property, namely a transitivity rule for machines. The complete Isar 
+  proofs are given for the first three proofs. The point of these three proofs is 
+  that each proof increases the readability for humans. 
+
+*}
+
+text {***************************************************************** 
+
+  2.) EXERCISE
+  ------------
+
+  Remove the sorries in the proof below and fill in the correct 
+  justifications. 
+*}
+
+lemma 
+  assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" 
+  and     b: "<e2,Es2> \<mapsto>* <e3,Es3>"
+  shows "<e1,Es1> \<mapsto>* <e3,Es3>"
+using a b
+proof(induct)
+  case (ms1 e1 Es1)
+  have c: "<e1,Es1> \<mapsto>* <e3,Es3>" by fact
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" sorry
+next
+  case (ms2 e1 Es1 e2 Es2 e2' Es2') 
+  have ih: "<e2',Es2'> \<mapsto>* <e3,Es3> \<Longrightarrow> <e2,Es2> \<mapsto>* <e3,Es3>" by fact
+  have d1: "<e2',Es2'> \<mapsto>* <e3,Es3>" by fact
+  have d2: "<e1,Es1> \<mapsto> <e2,Es2>" by fact
+  
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" sorry
+qed
+
+text {* 
+  Just like gotos in the Basic programming language, labels can reduce 
+  the readability of proofs. Therefore one can use in Isar the notation
+  "then have" in order to feed a have-statement to the proof of 
+  the next have-statement. In the proof below this has been used
+  in order to get rid of the labels c and d1. 
+*}
+
+lemma 
+  assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" 
+  and     b: "<e2,Es2> \<mapsto>* <e3,Es3>"
+  shows "<e1,Es1> \<mapsto>* <e3,Es3>"
+using a b
+proof(induct)
+  case (ms1 e1 Es1)
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" by fact
+next
+  case (ms2 e1 Es1 e2 Es2 e2' Es2')
+  have ih: "<e2',Es2'> \<mapsto>* <e3,Es3> \<Longrightarrow> <e2,Es2> \<mapsto>* <e3,Es3>" by fact
+  have "<e2',Es2'> \<mapsto>* <e3,Es3>" by fact
+  then have d3: "<e2,Es2> \<mapsto>* <e3,Es3>" using ih by simp
+  have d2: "<e1,Es1> \<mapsto> <e2,Es2>" by fact
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" using d2 d3 by auto
+qed
+
+text {* 
+  The labels d2 and d3 cannot be got rid of in this way, because both 
+  facts are needed to prove the show-statement. We can still avoid the
+  labels by feeding a sequence of facts into a proof using the chaining
+  mechanism:
+
+   have "statement1" \<dots>
+   moreover
+   have "statement2" \<dots>
+   \<dots>
+   moreover
+   have "statementn" \<dots>
+   ultimately have "statement" \<dots>
+
+  In this chain, all "statementi" can be used in the proof of the final 
+  "statement". With this we can simplify our proof further to:
+*}
+  
+lemma 
+  assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" 
+  and     b: "<e2,Es2> \<mapsto>* <e3,Es3>"
+  shows "<e1,Es1> \<mapsto>* <e3,Es3>"
+using a b
+proof(induct)
+  case (ms1 e1 Es1)
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" by fact
+next
+  case (ms2 e1 Es1 e2 Es2 e2' Es2')
+  have ih: "<e2',Es2'> \<mapsto>* <e3,Es3> \<Longrightarrow> <e2,Es2> \<mapsto>* <e3,Es3>" by fact
+  have "<e2',Es2'> \<mapsto>* <e3,Es3>" by fact
+  then have "<e2,Es2> \<mapsto>* <e3,Es3>" using ih by simp
+  moreover
+  have "<e1,Es1> \<mapsto> <e2,Es2>" by fact
+  ultimately show "<e1,Es1> \<mapsto>* <e3,Es3>" by auto
+qed
+
+text {* 
+  While automatic proof procedures in Isabelle are not able to prove statements
+  like "P = NP" assuming usual definitions for P and NP, they can automatically
+  discharge the lemma we just proved. For this we only have to set up the induction
+  and auto will take care of the rest. This means we can write:
+*}
+
+lemma ms3[intro]:
+  assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" 
+  and     b: "<e2,Es2> \<mapsto>* <e3,Es3>"
+  shows "<e1,Es1> \<mapsto>* <e3,Es3>"
+using a b by (induct) (auto)
+
+text {* 
+  The attribute [intro] indicates that this lemma should be from now on used in
+  any proof obtained by "auto" or "blast". 
+*}
+
+
+text {***************************************************************** 
+
+  A simple fact we need later on is that if t \<Down> t' then t' is a value. 
+
+*}
+
+lemma eval_to_val:
+  assumes a: "t \<Down> t'"
+  shows "val t'"
+using a by (induct) (auto)
+
+
+text {***************************************************************** 
+
+  3.) EXERCISE
+  ------------
+
+  Fill in the details in the proof below. The proof will establish the fact
+  that if t \<Down> t' then <t,[]> \<mapsto>* <t',[]>. As can be seen, the proof is by 
+  induction on the definition of eval. If you want to know how the predicates
+  machine and machines can be introduced, then use
+
+  thm machine.intros[no_vars]
+  thm machines.intros[no_vars]
+
+*}
+
+theorem 
+  assumes a: "t \<Down> t'"
+  shows "<t,[]> \<mapsto>* <t',[]>"
+using a 
+proof (induct)
+  case (e_Lam x t)
+  (* no assumptions *)
+  show "<Lam [x].t,[]> \<mapsto>* <Lam [x].t,[]>" sorry
+next
+  case (e_App t1 x t t2 v' v)
+  (* all assumptions in this case *)
+  have a1: "t1 \<Down> Lam [x].t" by fact
+  have ih1: "<t1,[]> \<mapsto>* <Lam [x].t,[]>" by fact
+  have a2: "t2 \<Down> v'" by fact
+  have ih2: "<t2,[]> \<mapsto>* <v',[]>" by fact
+  have a3: "t[x::=v'] \<Down> v" by fact
+  have ih3: "<t[x::=v'],[]> \<mapsto>* <v,[]>" by fact
+  (* your details *)
+  show "<App t1 t2,[]> \<mapsto>* <v,[]>" sorry
+qed
+
+text {* 
+  Again the automatic tools in Isabelle can discharge automatically 
+  of the routine work in these proofs. We can write: *}
+
+theorem eval_implies_machines_ctx:
+  assumes a: "t \<Down> t'"
+  shows "<t,Es> \<mapsto>* <t',Es>"
+using a
+by (induct arbitrary: Es)
+   (metis eval_to_val machine.intros ms1 ms2 ms3 v_Lam)+
+
+corollary eval_implies_machines:
+  assumes a: "t \<Down> t'"
+  shows "<t,[]> \<mapsto>* <t',[]>"
+using a eval_implies_machines_ctx by simp
+
+text {***************************************************************** 
+
+  The Weakening Lemma
+  -------------------
+
+  The proof of the weakening lemma is often said to be simple, 
+  routine or trivial. Below we will see how this lemma can be
+  proved in Nominal Isabelle. First we define types, which
+  we however do not define as datatypes, but as nominal datatypes.
+
+*}
+
+nominal_datatype ty =
+  tVar "string"
+| tArr "ty" "ty" ("_ \<rightarrow> _")
+
+text {* 
+  Having defined them as nominal datatypes gives us additional
+  definitions and theorems that come with types (see below).
+  *}
+
+text {* 
+  We next define the type of typing contexts, a predicate that
+  defines valid contexts (i.e. lists that contain only unique
+  variables) and the typing judgement.
+
+*}
+
+types ty_ctx = "(name\<times>ty) list"
+
+inductive
+  valid :: "ty_ctx \<Rightarrow> bool"
+where
+  v1[intro]: "valid []"
+| v2[intro]: "\<lbrakk>valid \<Gamma>; x\<sharp>\<Gamma>\<rbrakk>\<Longrightarrow> valid ((x,T)#\<Gamma>)"
+
+inductive
+  typing :: "ty_ctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _") 
+where
+  t_Var[intro]:  "\<lbrakk>valid \<Gamma>; (x,T)\<in>set \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var x : T"
+| t_App[intro]:  "\<lbrakk>\<Gamma> \<turnstile> t1 : T1\<rightarrow>T2; \<Gamma> \<turnstile> t2 : T1\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> App t1 t2 : T2"
+| t_Lam[intro]:  "\<lbrakk>x\<sharp>\<Gamma>; (x,T1)#\<Gamma> \<turnstile> t : T2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Lam [x].t : T1 \<rightarrow> T2"
+
+
+text {*
+  The predicate x\<sharp>\<Gamma>, read as x fresh for \<Gamma>, is defined by Nominal Isabelle.
+  Freshness is defined for lambda-terms, products, lists etc. For example
+  we have:
+  *}
+
+lemma
+  fixes x::"name"
+  shows "x\<sharp>Lam [x].t"
+  and   "x\<sharp>(t1,t2) \<Longrightarrow> x\<sharp>App t1 t2"
+  and   "x\<sharp>(Var y) \<Longrightarrow> x\<sharp>y" 
+  and   "\<lbrakk>x\<sharp>t1; x\<sharp>t2\<rbrakk> \<Longrightarrow> x\<sharp>(t1,t2)"
+  and   "\<lbrakk>x\<sharp>l1; x\<sharp>l2\<rbrakk> \<Longrightarrow> x\<sharp>(l1@l2)"
+  and   "x\<sharp>y \<Longrightarrow> x\<noteq>y" 
+by (simp_all add: abs_fresh fresh_prod fresh_list_append fresh_atm)
+
+text {* We can also prove that every variable is fresh for a type *}
+
+lemma ty_fresh:
+  fixes x::"name"
+  and   T::"ty"
+  shows "x\<sharp>T"
+by (induct T rule: ty.induct)
+   (simp_all add: fresh_string)
+
+text {* 
+  In order to state the weakening lemma in a convenient form, we overload
+  the subset-notation and define the abbreviation below. Abbreviations behave
+  like definitions, except that they are always automatically folded and
+  unfolded.
+*}
+
+abbreviation
+  "sub_ty_ctx" :: "ty_ctx \<Rightarrow> ty_ctx \<Rightarrow> bool" ("_ \<subseteq> _") 
+where
+  "\<Gamma>1 \<subseteq> \<Gamma>2 \<equiv> \<forall>x. x \<in> set \<Gamma>1 \<longrightarrow> x \<in> set \<Gamma>2"
+
+text {***************************************************************** 
+
+  4.) Exercise
+  ------------
+
+  Fill in the details and give a proof of the weakening lemma. 
+
+*}
+
+lemma 
+  fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
+  assumes a: "\<Gamma>1 \<turnstile> t : T"
+  and     b: "valid \<Gamma>2" 
+  and     c: "\<Gamma>1 \<subseteq> \<Gamma>2"
+  shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (induct arbitrary: \<Gamma>2)
+  case (t_Var \<Gamma>1 x T)
+  have a1: "valid \<Gamma>1" by fact
+  have a2: "(x,T) \<in> set \<Gamma>1" by fact
+  have a3: "valid \<Gamma>2" by fact
+  have a4: "\<Gamma>1 \<subseteq> \<Gamma>2" by fact
+
+  show "\<Gamma>2 \<turnstile> Var x : T" sorry
+next
+  case (t_Lam x \<Gamma>1 T1 t T2) 
+  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x,T1)#\<Gamma>1 \<subseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+  have a0: "x\<sharp>\<Gamma>1" by fact
+  have a1: "valid \<Gamma>2" by fact
+  have a2: "\<Gamma>1 \<subseteq> \<Gamma>2" by fact
+
+  show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
+qed (auto)
+
+
+text {* 
+  Despite the frequent claim that the weakening lemma is trivial,
+  routine or obvious, the proof in the lambda-case does not go 
+  smoothly through. Painful variable renamings seem to be necessary. 
+  But the proof using renamings is horribly complicated. It is really 
+  interesting whether people who claim this proof is  trivial, routine 
+  or obvious had this proof in mind. 
+
+  BTW: The following two commands help already with showing that validity 
+  and typing are invariant under variable (permutative) renamings. 
+*}
+
+equivariance valid
+equivariance typing
+
+lemma not_to_be_tried_at_home_weakening: 
+  fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
+  assumes a: "\<Gamma>1 \<turnstile> t : T"
+  and     b: "valid \<Gamma>2" 
+  and     c: "\<Gamma>1 \<subseteq> \<Gamma>2"
+  shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (induct arbitrary: \<Gamma>2)
+  case (t_Lam x \<Gamma>1 T1 t T2) (* lambda case *)
+  have fc0: "x\<sharp>\<Gamma>1" by fact
+  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x,T1)#\<Gamma>1 \<subseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+  obtain c::"name" where fc1: "c\<sharp>(x,t,\<Gamma>1,\<Gamma>2)"  (* we obtain a fresh name *)
+    by (rule exists_fresh) (auto simp add: fs_name1)
+  have "Lam [c].([(c,x)]\<bullet>t) = Lam [x].t" using fc1 (* we then alpha-rename the lambda-abstraction *)
+    by (auto simp add: lam.inject alpha fresh_prod fresh_atm)
+  moreover
+  have "\<Gamma>2 \<turnstile> Lam [c].([(c,x)]\<bullet>t) : T1 \<rightarrow> T2" (* we can then alpha-rename our original goal *)
+  proof - 
+    (* we establish (x,T1)#\<Gamma>1 \<subseteq> (x,T1)#([(c,x)]\<bullet>\<Gamma>2) and valid ((x,T1)#([(c,x)]\<bullet>\<Gamma>2)) *)
+    have "(x,T1)#\<Gamma>1 \<subseteq> (x,T1)#([(c,x)]\<bullet>\<Gamma>2)" 
+    proof -
+      have "\<Gamma>1 \<subseteq> \<Gamma>2" by fact
+      then have "[(c,x)]\<bullet>((x,T1)#\<Gamma>1 \<subseteq> (x,T1)#([(c,x)]\<bullet>\<Gamma>2))" using fc0 fc1 
+	by (perm_simp add: eqvts calc_atm perm_fresh_fresh ty_fresh)
+      then show "(x,T1)#\<Gamma>1 \<subseteq> (x,T1)#([(c,x)]\<bullet>\<Gamma>2)" by (rule perm_boolE)
+    qed
+    moreover 
+    have "valid ((x,T1)#([(c,x)]\<bullet>\<Gamma>2))" 
+    proof -
+      have "valid \<Gamma>2" by fact
+      then show "valid ((x,T1)#([(c,x)]\<bullet>\<Gamma>2))" using fc1
+	by (auto intro!: v2 simp add: fresh_left calc_atm eqvts)
+    qed
+    (* these two facts give us by induction hypothesis the following *)
+    ultimately have "(x,T1)#([(c,x)]\<bullet>\<Gamma>2) \<turnstile> t : T2" using ih by simp 
+    (* we now apply renamings to get to our goal *)
+    then have "[(c,x)]\<bullet>((x,T1)#([(c,x)]\<bullet>\<Gamma>2) \<turnstile> t : T2)" by (rule perm_boolI)
+    then have "(c,T1)#\<Gamma>2 \<turnstile> ([(c,x)]\<bullet>t) : T2" using fc1 
+      by (perm_simp add: eqvts calc_atm perm_fresh_fresh ty_fresh)
+    then show "\<Gamma>2 \<turnstile> Lam [c].([(c,x)]\<bullet>t) : T1 \<rightarrow> T2" using fc1 by auto
+  qed
+  ultimately show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" by simp
+qed (auto) (* var and app cases *)
+
+
+text {* 
+  The remedy to the complicated proof of the weakening proof
+  shown above is to use a stronger induction principle that
+  has the usual variable convention already build in. The
+  following command derives this induction principle for us.
+  (We shall explain what happens here later.)
+
+*}
+
+nominal_inductive typing
+  by (simp_all add: abs_fresh ty_fresh)
+
+text {* Compare the two induction principles *}
+thm typing.induct[no_vars]
+thm typing.strong_induct[no_vars]
+
+text {* 
+  We can use the stronger induction principle by replacing
+  the line
+
+  proof (induct arbitrary: \<Gamma>2)
+
+  with 
+  
+  proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
+
+  Try now the proof.
+
+*}
+  
+
+lemma  
+  fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
+  and   t ::"lam"
+  and   \<tau> ::"ty"
+  assumes a: "\<Gamma>1 \<turnstile> t : T"
+  and     b: "valid \<Gamma>2" 
+  and     c: "\<Gamma>1 \<subseteq> \<Gamma>2"
+  shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
+  case (t_Var \<Gamma>1 x T)  (* variable case *)
+  have "\<Gamma>1 \<subseteq> \<Gamma>2" by fact 
+  moreover  
+  have "valid \<Gamma>2" by fact 
+  moreover 
+  have "(x,T)\<in> set \<Gamma>1" by fact
+  ultimately show "\<Gamma>2 \<turnstile> Var x : T" by auto
+next
+  case (t_Lam x \<Gamma>1 T1 t T2) 
+  have vc: "x\<sharp>\<Gamma>2" by fact   (* additional fact *)
+  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x,T1)#\<Gamma>1 \<subseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+  have a0: "x\<sharp>\<Gamma>1" by fact
+  have a1: "valid \<Gamma>2" by fact
+  have a2: "\<Gamma>1 \<subseteq> \<Gamma>2" by fact
+
+  show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
+qed (auto) (* app case *)
+
+text {*
+  Since we can use the stronger induction principle, the
+  proof of the weakening lemma can actually be found 
+  automatically by Isabelle. Maybe the weakening lemma
+  is actually trivial (in Nominal Isabelle ;o). 
+*}
+
+lemma weakening: 
+  fixes \<Gamma>1 \<Gamma>2::"ty_ctx"
+  assumes a: "\<Gamma>1 \<turnstile> t : T" 
+  and     b: "valid \<Gamma>2" 
+  and     c: "\<Gamma>1 \<subseteq> \<Gamma>2"
+  shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+by (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
+   (auto)
+
+
+text {***************************************************************** 
+
+  Function Definitions: Filling a Lambda-Term into a Context
+  ----------------------------------------------------------
+
+  Many functions over datatypes can be defined by recursion on the
+  structure. For this purpose, Isabelle provides "fun". To use it one needs 
+  to give a name for the function, its type, optionally some pretty-syntax 
+  and then some equations defining the function. Like in "inductive",
+  "fun" expects that more than one such equation is separated by "|".
+
+*}
+
+fun
+  filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>")
+where
+  "\<box>\<lbrakk>t\<rbrakk> = t"
+| "(CAppL E t')\<lbrakk>t\<rbrakk> = App (E\<lbrakk>t\<rbrakk>) t'"
+| "(CAppR t' E)\<lbrakk>t\<rbrakk> = App t' (E\<lbrakk>t\<rbrakk>)"
+
+text {* 
+  After this definition Isabelle will be able to simplify
+  statements like: *}
+
+lemma 
+  shows "(CAppL \<box> (Var x))\<lbrakk>Var y\<rbrakk> = App (Var y) (Var x)"
+  by simp
+
+
+fun 
+ ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<circ> _" [101,100] 100)
+where
+  "\<box> \<circ> E' = E'"
+| "(CAppL E t') \<circ> E' = CAppL (E \<circ> E') t'"
+| "(CAppR t' E) \<circ> E' = CAppR t' (E \<circ> E')"
+
+fun
+  ctx_composes :: "ctxs \<Rightarrow> ctx" ("_\<down>" [110] 110)
+where
+    "[]\<down> = \<box>"
+  | "(E#Es)\<down> = (Es\<down>) \<circ> E"
+
+text {*  
+  Notice that we not just have given a pretty syntax for the functions, but
+  also some precedences..The numbers inside the [\<dots>] stand for the precedences
+  of the arguments; the one next to it the precedence of the whole term.
+  
+  This means we have to write (Es1 \<circ> Es2) \<circ> Es3 otherwise Es1 \<circ> Es2 \<circ> Es3 is
+  interpreted as Es1 \<circ> (Es2 \<circ> Es3).
+*}
+
+text {******************************************************************
+  
+  Structural Inductions over Contexts
+  ------------------------------------
+
+  So far we have had a look at an induction over an inductive predicate. 
+  Another important induction principle is structural inductions for 
+  datatypes. To illustrate structural inductions we prove some facts
+  about context composition, some of which we will need later on.
+
+*}
+
+text {******************************************************************
+
+  5.) EXERCISE
+  ------------
+
+  Complete the proof and remove the sorries.
+
+*}
+
+lemma ctx_compose:
+  shows "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
+proof (induct E1)
+  case Hole
+  show "\<box> \<circ> E2\<lbrakk>t\<rbrakk> = \<box>\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
+next
+  case (CAppL E1 t')
+  have ih: "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
+  show "((CAppL E1 t') \<circ> E2)\<lbrakk>t\<rbrakk> = (CAppL E1 t')\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
+next
+  case (CAppR t' E1)
+  have ih: "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
+  show "((CAppR t' E1) \<circ> E2)\<lbrakk>t\<rbrakk> = (CAppR t' E1)\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
+qed
+
+
+text {******************************************************************
+
+  6.) EXERCISE
+  ------------
+
+  Prove associativity of \<circ> using the lemmas neut_hole and circ_assoc. 
+
+*}
+
+lemma neut_hole:
+  shows "E \<circ> \<box> = E"
+by (induct E) (simp_all)
+
+lemma circ_assoc:
+  fixes E1 E2 E3::"ctx"
+  shows "(E1 \<circ> E2) \<circ> E3 = E1 \<circ> (E2 \<circ> E3)"
+by (induct E1) (simp_all)
+
+lemma
+  shows "(Es1@Es2)\<down> = (Es2\<down>) \<circ> (Es1\<down>)"
+proof (induct Es1)
+  case Nil
+  show "([]@Es2)\<down> = Es2\<down> \<circ> []\<down>" sorry
+next
+  case (Cons E Es1)
+  have ih: "(Es1@Es2)\<down> = Es2\<down> \<circ> Es1\<down>" by fact
+
+  show "((E#Es1)@Es2)\<down> = Es2\<down> \<circ> (E#Es1)\<down>" sorry
+qed
+
+
+text {* 
+  The last proof involves several steps of equational reasoning.
+  Isar provides some convenient means to express such equational 
+  reasoning in a much cleaner fashion using the "also have" 
+  construction. *}
+
+lemma 
+  shows "(Es1@Es2)\<down> = (Es2\<down>) \<circ> (Es1\<down>)"
+proof (induct Es1)
+  case Nil
+  show "([]@Es2)\<down> = Es2\<down> \<circ> []\<down>" using neut_hole by simp
+next
+  case (Cons E Es1)
+  have ih: "(Es1@Es2)\<down> = Es2\<down> \<circ> Es1\<down>" by fact
+  have "((E#Es1)@Es2)\<down> = (Es1@Es2)\<down> \<circ> E" by simp
+  also have "\<dots> = (Es2\<down> \<circ> Es1\<down>) \<circ> E" using ih by simp
+  also have "\<dots> = Es2\<down> \<circ> (Es1\<down> \<circ> E)" using circ_assoc by simp
+  also have "\<dots> = Es2\<down> \<circ> (E#Es1)\<down>" by simp
+  finally show "((E#Es1)@Es2)\<down> = Es2\<down> \<circ> (E#Es1)\<down>" by simp
+qed
+
+
+text {******************************************************************
+  
+  Formalising Barendregt's Proof of the Substitution Lemma
+  --------------------------------------------------------
+
+  Barendregt's proof needs in the variable case a case distinction.
+  One way to do this in Isar is to use blocks. A block is some sequent
+  or reasoning steps enclosed in curly braces
+
+  { \<dots>
+
+    have "statement"
+  }
+
+  Such a block can contain local assumptions like
+
+  { assume "A"
+    assume "B"
+    \<dots>
+    have "C" by \<dots>
+  }
+
+  Where "C" is the last have-statement in this block. The behaviour 
+  of such a block to the 'outside' is the implication
+
+   \<lbrakk>A; B\<rbrakk> \<Longrightarrow> "C" 
+
+  Now if we want to prove a property "smth" using the case-distinctions
+  P\<^isub>1, P\<^isub>2 and P\<^isub>3 then we can use the following reasoning:
+
+    { assume "P\<^isub>1"
+      \<dots>
+      have "smth"
+    }
+    moreover
+    { assume "P\<^isub>2"
+      \<dots>
+      have "smth"
+    }
+    moreover
+    { assume "P\<^isub>3"
+      \<dots>
+      have "smth"
+    }
+    ultimately have "smth" by blast
+
+  The blocks establish the implications
+
+    P\<^isub>1 \<Longrightarrow> smth
+    P\<^isub>2 \<Longrightarrow> smth
+    P\<^isub>3 \<Longrightarrow> smth
+
+  If we know that P\<^isub>1, P\<^isub>2 and P\<^isub>3 cover all the cases, that is P\<^isub>1 \<or> P\<^isub>2 \<or> P\<^isub>3 is
+  true, then we have 'ultimately' established the property "smth" 
+  
+*}
+
+text {******************************************************************
+  
+  7.) Exercise
+  ------------
+
+  Fill in the cases 1.2 and 1.3 and the equational reasoning 
+  in the lambda-case.
+*}
+
+thm forget[no_vars] 
+thm fresh_fact[no_vars] 
+
+lemma 
+  assumes a: "x\<noteq>y"
+  and     b: "x\<sharp>L"
+  shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]"
+using a b
+proof (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
+  case (Var z)
+  have a1: "x\<noteq>y" by fact
+  have a2: "x\<sharp>L" by fact
+  show "Var z[x::=N][y::=L] = Var z[y::=L][x::=N[y::=L]]" (is "?LHS = ?RHS")
+  proof -
+    { (*Case 1.1*)
+      assume c1: "z=x"
+      have "(1)": "?LHS = N[y::=L]" using c1 by simp
+      have "(2)": "?RHS = N[y::=L]" using c1 a1 by simp
+      have "?LHS = ?RHS" using "(1)" "(2)" by simp
+    }
+    moreover 
+    { (*Case 1.2*)
+      assume c2: "z=y" "z\<noteq>x" 
+      
+      have "?LHS = ?RHS" sorry
+    }
+    moreover 
+    { (*Case 1.3*)
+      assume c3: "z\<noteq>x" "z\<noteq>y"
+      
+      have "?LHS = ?RHS" sorry
+    }
+    ultimately show "?LHS = ?RHS" by blast
+  qed
+next
+  case (Lam z M1) (* case 2: lambdas *)
+  have ih: "\<lbrakk>x\<noteq>y; x\<sharp>L\<rbrakk> \<Longrightarrow> M1[x::=N][y::=L] = M1[y::=L][x::=N[y::=L]]" by fact
+  have a1: "x\<noteq>y" by fact
+  have a2: "x\<sharp>L" by fact
+  have fs: "z\<sharp>x" "z\<sharp>y" "z\<sharp>N" "z\<sharp>L" by fact+
+  then have b: "z\<sharp>N[y::=L]" by (simp add: fresh_fact)
+  show "(Lam [z].M1)[x::=N][y::=L] = (Lam [z].M1)[y::=L][x::=N[y::=L]]" (is "?LHS=?RHS") 
+  proof - 
+    have "?LHS = \<dots>" sorry
+
+    also have "\<dots> = ?RHS" sorry
+    finally show "?LHS = ?RHS" by simp
+  qed
+next
+  case (App M1 M2) (* case 3: applications *)
+  then show "(App M1 M2)[x::=N][y::=L] = (App M1 M2)[y::=L][x::=N[y::=L]]" by simp
+qed
+
+text {* 
+  Again the strong induction principle enables Isabelle to find
+  the proof of the substitution lemma automatically. 
+*}
+
+lemma substitution_lemma_version:  
+  assumes asm: "x\<noteq>y" "x\<sharp>L"
+  shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]"
+  using asm 
+by (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
+   (auto simp add: fresh_fact forget)
+
+text {******************************************************************
+  
+  The CBV Reduction Relation (Small-Step Semantics) 
+  -------------------------------------------------
+
+  In order to establish the property that the CK Machine
+  calculates a nomrmalform which corresponds to the
+  evaluation relation, we introduce the call-by-value
+  small-step semantics.
+
+*}
+
+inductive
+  cbv :: "lam\<Rightarrow>lam\<Rightarrow>bool" ("_ \<longrightarrow>cbv _") 
+where
+  cbv1: "\<lbrakk>val v; x\<sharp>v\<rbrakk> \<Longrightarrow> App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]"
+| cbv2[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t t2 \<longrightarrow>cbv App t' t2"
+| cbv3[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t2 t \<longrightarrow>cbv App t2 t'"
+
+equivariance val
+equivariance cbv
+nominal_inductive cbv
+ by (simp_all add: abs_fresh fresh_fact)
+
+text {*
+  In order to satisfy the vc-condition we have to formulate
+  this relation with the additional freshness constraint
+  x\<sharp>v. Though this makes the definition vc-ompatible, it
+  makes the definition less useful. We can with some pain
+  show that the more restricted rule is equivalent to the
+  usual rule. *}
+
+thm subst_rename
+
+lemma better_cbv1[intro]: 
+  assumes a: "val v" 
+  shows "App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]"
+proof -
+  obtain y::"name" where fs: "y\<sharp>(x,t,v)" by (rule exists_fresh) (auto simp add: fs_name1)
+  have "App (Lam [x].t) v = App (Lam [y].([(y,x)]\<bullet>t)) v" using fs
+    by (auto simp add: lam.inject alpha' fresh_prod fresh_atm)
+  also have "\<dots> \<longrightarrow>cbv  ([(y,x)]\<bullet>t)[y::=v]" using fs a by (auto intro: cbv1)
+  also have "\<dots> = t[x::=v]" using fs by (simp add: subst_rename[symmetric])
+  finally show "App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]" by simp
+qed
+
+text {*
+  The transitive closure of the cbv-reduction relation: *}
+
+inductive 
+  "cbvs" :: "lam\<Rightarrow>lam\<Rightarrow>bool" (" _ \<longrightarrow>cbv* _")
+where
+  cbvs1[intro]: "e \<longrightarrow>cbv* e"
+| cbvs2[intro]: "\<lbrakk>e1\<longrightarrow>cbv e2; e2 \<longrightarrow>cbv* e3\<rbrakk> \<Longrightarrow> e1 \<longrightarrow>cbv* e3"
+
+lemma cbvs3[intro]:
+  assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3"
+  shows "e1 \<longrightarrow>cbv* e3"
+using a by (induct) (auto) 
+
+text {******************************************************************
+  
+  8.) Exercise
+  ------------
+
+  If more simple exercises are needed, then complete the following proof. 
+
+*}
+
+lemma cbv_in_ctx:
+  assumes a: "t \<longrightarrow>cbv t'"
+  shows "E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>"
+using a
+proof (induct E)
+  case Hole
+  have "t \<longrightarrow>cbv t'" by fact
+  then show "\<box>\<lbrakk>t\<rbrakk> \<longrightarrow>cbv \<box>\<lbrakk>t'\<rbrakk>" sorry
+next
+  case (CAppL E s)
+  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
+  have a: "t \<longrightarrow>cbv t'" by fact
+  show "(CAppL E s)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppL E s)\<lbrakk>t'\<rbrakk>" sorry
+next
+  case (CAppR s E)
+  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
+  have a: "t \<longrightarrow>cbv t'" by fact
+  show "(CAppR s E)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppR s E)\<lbrakk>t'\<rbrakk>" sorry
+qed
+
+  
+text {******************************************************************
+  
+  9.) Exercise
+  ------------
+
+  The point of the cbv-reduction was that we can easily relatively 
+  establish the follwoing property:
+
+*}
+
+lemma machine_implies_cbvs_ctx:
+  assumes a: "<e,Es> \<mapsto> <e',Es'>"
+  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
+using a 
+proof (induct)
+  case (m1 t1 t2 Es)
+
+  show "Es\<down>\<lbrakk>App t1 t2\<rbrakk> \<longrightarrow>cbv* ((CAppL \<box> t2)#Es)\<down>\<lbrakk>t1\<rbrakk>"  sorry
+next
+  case (m2 v t2 Es)
+  have "val v" by fact
+
+  show "((CAppL \<box> t2)#Es)\<down>\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (CAppR v \<box> # Es)\<down>\<lbrakk>t2\<rbrakk>" sorry
+next
+  case (m3 v x t Es)
+  have "val v" by fact
+ 
+  show "(((CAppR (Lam [x].t) \<box>)#Es)\<down>)\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (Es\<down>)\<lbrakk>t[x::=v]\<rbrakk>" sorry
+qed
+
+text {* 
+  It is not difficult to extend the lemma above to
+  arbitrary reductions sequences of the CK machine. *}
+
+lemma machines_implies_cbvs_ctx:
+  assumes a: "<e,Es> \<mapsto>* <e',Es'>"
+  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
+using a 
+by (induct) (auto dest: machine_implies_cbvs_ctx)
+
+text {* 
+  So whenever we let the CL machine start in an initial
+  state and it arrives at a final state, then there exists
+  a corresponding cbv-reduction sequence. *}
+
+corollary machines_implies_cbvs:
+  assumes a: "<e,[]> \<mapsto>* <e',[]>"
+  shows "e \<longrightarrow>cbv* e'"
+using a by (auto dest: machines_implies_cbvs_ctx)
+
+text {*
+  We now want to relate the cbv-reduction to the evaluation
+  relation. For this we need two auxiliary lemmas. *}
+
+lemma eval_val:
+  assumes a: "val t"
+  shows "t \<Down> t"
+using a by (induct) (auto)
+
+lemma e_App_elim:
+  assumes a: "App t1 t2 \<Down> v"
+  shows "\<exists>x t v'. t1 \<Down> Lam [x].t \<and> t2 \<Down> v' \<and> t[x::=v'] \<Down> v"
+using a by (cases) (auto simp add: lam.inject) 
+
+text {******************************************************************
+  
+  10.) Exercise
+  -------------
+
+  Complete the first case in the proof below. 
+
+*}
+
+lemma cbv_eval:
+  assumes a: "t1 \<longrightarrow>cbv t2" "t2 \<Down> t3"
+  shows "t1 \<Down> t3"
+using a
+proof(induct arbitrary: t3)
+  case (cbv1 v x t t3)
+  have a1: "val v" by fact
+  have a2: "t[x::=v] \<Down> t3" by fact
+
+  show "App Lam [x].t v \<Down> t3" sorry
+next
+  case (cbv2 t t' t2 t3)
+  have ih: "\<And>t3. t' \<Down> t3 \<Longrightarrow> t \<Down> t3" by fact
+  have "App t' t2 \<Down> t3" by fact
+  then obtain x t'' v' 
+    where a1: "t' \<Down> Lam [x].t''" 
+      and a2: "t2 \<Down> v'" 
+      and a3: "t''[x::=v'] \<Down> t3" using e_App_elim by blast
+  have "t \<Down>  Lam [x].t''" using ih a1 by auto 
+  then show "App t t2 \<Down> t3" using a2 a3 by auto
+qed (auto dest!: e_App_elim)
+
+
+text {* 
+  Next we extend the lemma above to arbitray initial
+  sequences of cbv-reductions. *}
+
+lemma cbvs_eval:
+  assumes a: "t1 \<longrightarrow>cbv* t2" "t2 \<Down> t3"
+  shows "t1 \<Down> t3"
+using a by (induct) (auto intro: cbv_eval)
+
+text {* 
+  Finally, we can show that if from a term t we reach a value 
+  by a cbv-reduction sequence, then t evaluates to this value. *}
+
+lemma cbvs_implies_eval:
+  assumes a: "t \<longrightarrow>cbv* v" "val v"
+  shows "t \<Down> v"
+using a
+by (induct) (auto intro: eval_val cbvs_eval)
+
+text {* 
+  All facts tied together give us the desired property about
+  K machines. *}
+
+theorem machines_implies_eval:
+  assumes a: "<t1,[]> \<mapsto>* <t2,[]>" 
+  and     b: "val t2" 
+  shows "t1 \<Down> t2"
+proof -
+  have "t1 \<longrightarrow>cbv* t2" using a by (simp add: machines_implies_cbvs)
+  then show "t1 \<Down> t2" using b by (simp add: cbvs_implies_eval)
+qed
+
+text {******************************************************************
+  
+  Formalising a Type-Soundness and Progress Lemma for CBV
+  -------------------------------------------------------
+
+  The central lemma for type-soundness is type-substitutity. In 
+  our setting type-substitutivity is slightly painful to establish.
+
+*}
+
+lemma valid_elim:
+  assumes a: "valid ((x,T)#\<Gamma>)"
+  shows "x\<sharp>\<Gamma> \<and> valid \<Gamma>"
+using a by (cases) (auto)
+
+lemma valid_insert:
+  assumes a: "valid (\<Delta>@[(x,T)]@\<Gamma>)"
+  shows "valid (\<Delta>@\<Gamma>)" 
+using a
+by (induct \<Delta>)
+   (auto simp add: fresh_list_append fresh_list_cons dest!: valid_elim)
+
+lemma fresh_list: 
+  shows "y\<sharp>xs = (\<forall>x\<in>set xs. y\<sharp>x)"
+by (induct xs) (simp_all add: fresh_list_nil fresh_list_cons)
+
+lemma context_unique:
+  assumes a1: "valid \<Gamma>"
+  and     a2: "(x,T) \<in> set \<Gamma>"
+  and     a3: "(x,U) \<in> set \<Gamma>"
+  shows "T = U" 
+using a1 a2 a3
+by (induct) (auto simp add: fresh_list fresh_prod fresh_atm)
+
+lemma type_substitution_aux:
+  assumes a: "(\<Delta>@[(x,T')]@\<Gamma>) \<turnstile> e : T"
+  and     b: "\<Gamma> \<turnstile> e' : T'"
+  shows "(\<Delta>@\<Gamma>) \<turnstile> e[x::=e'] : T" 
+using a b 
+proof (nominal_induct \<Gamma>'\<equiv>"\<Delta>@[(x,T')]@\<Gamma>" e T avoiding: x e' \<Delta> rule: typing.strong_induct)
+  case (t_Var \<Gamma>' y T x e' \<Delta>)
+  then have a1: "valid (\<Delta>@[(x,T')]@\<Gamma>)" 
+       and  a2: "(y,T) \<in> set (\<Delta>@[(x,T')]@\<Gamma>)" 
+       and  a3: "\<Gamma> \<turnstile> e' : T'" by simp_all
+  from a1 have a4: "valid (\<Delta>@\<Gamma>)" by (rule valid_insert)
+  { assume eq: "x=y"
+    from a1 a2 have "T=T'" using eq by (auto intro: context_unique)
+    with a3 have "\<Delta>@\<Gamma> \<turnstile> Var y[x::=e'] : T" using eq a4 by (auto intro: weakening)
+  }
+  moreover
+  { assume ineq: "x\<noteq>y"
+    from a2 have "(y,T) \<in> set (\<Delta>@\<Gamma>)" using ineq by simp
+    then have "\<Delta>@\<Gamma> \<turnstile> Var y[x::=e'] : T" using ineq a4 by auto
+  }
+  ultimately show "\<Delta>@\<Gamma> \<turnstile> Var y[x::=e'] : T" by blast
+qed (force simp add: fresh_list_append fresh_list_cons)+
+
+corollary type_substitution:
+  assumes a: "(x,T')#\<Gamma> \<turnstile> e : T"
+  and     b: "\<Gamma> \<turnstile> e' : T'"
+  shows "\<Gamma> \<turnstile> e[x::=e'] : T"
+using a b type_substitution_aux[where \<Delta>="[]"]
+by (auto)
+
+lemma t_App_elim:
+  assumes a: "\<Gamma> \<turnstile> App t1 t2 : T"
+  shows "\<exists>T'. \<Gamma> \<turnstile> t1 : T' \<rightarrow> T \<and> \<Gamma> \<turnstile> t2 : T'"
+using a
+by (cases) (auto simp add: lam.inject)
+
+lemma t_Lam_elim:
+  assumes ty: "\<Gamma> \<turnstile> Lam [x].t : T" 
+  and     fc: "x\<sharp>\<Gamma>" 
+  shows "\<exists>T1 T2. T = T1 \<rightarrow> T2 \<and> (x,T1)#\<Gamma> \<turnstile> t : T2"
+using ty fc 
+by (cases rule: typing.strong_cases) 
+   (auto simp add: alpha lam.inject abs_fresh ty_fresh)
+
+theorem cbv_type_preservation:
+  assumes a: "t \<longrightarrow>cbv t'"
+  and     b: "\<Gamma> \<turnstile> t : T" 
+  shows "\<Gamma> \<turnstile> t' : T"
+using a b
+by (nominal_induct avoiding: \<Gamma> T rule: cbv.strong_induct)
+   (auto dest!: t_Lam_elim t_App_elim simp add: type_substitution ty.inject)
+
+corollary cbvs_type_preservation:
+  assumes a: "t \<longrightarrow>cbv* t'"
+  and     b: "\<Gamma> \<turnstile> t : T" 
+  shows "\<Gamma> \<turnstile> t' : T"
+using a b
+by (induct) (auto intro: cbv_type_preservation)
+
+text {* 
+  The Type-Preservation Property for the Machine and Evaluation Relation. *}
+
+theorem machine_type_preservation:
+  assumes a: "<t,[]> \<mapsto>* <t',[]>"
+  and     b: "\<Gamma> \<turnstile> t : T" 
+  shows "\<Gamma> \<turnstile> t' : T"
+proof -
+  from a have "t \<longrightarrow>cbv* t'" by (simp add: machines_implies_cbvs)
+  then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: cbvs_type_preservation)
+qed
+
+theorem eval_type_preservation:
+  assumes a: "t \<Down> t'"
+  and     b: "\<Gamma> \<turnstile> t : T" 
+  shows "\<Gamma> \<turnstile> t' : T"
+proof -
+  from a have "<t,[]> \<mapsto>* <t',[]>" by (simp add: eval_implies_machines)
+  then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: machine_type_preservation)
+qed
+
+text {* The Progress Property *}
+
+lemma canonical_tArr:
+  assumes a: "[] \<turnstile> t : T1 \<rightarrow> T2"
+  and     b: "val t"
+  shows "\<exists>x t'. t = Lam [x].t'"
+using b a by (induct) (auto) 
+
+theorem progress:
+  assumes a: "[] \<turnstile> t : T"
+  shows "(\<exists>t'. t \<longrightarrow>cbv t') \<or> (val t)"
+using a
+by (induct \<Gamma>\<equiv>"[]::ty_ctx" t T)
+   (auto intro: cbv.intros dest!: canonical_tArr)
+
+
+text {***********************************************************
+
+  Strong Induction Principle 
+  --------------------------
+
+  A proof for the strong (structural) induction principle in the 
+  lambda-calculus.
+
+*}
+
+
+lemma lam_strong_induct:
+  fixes c::"'a::fs_name"
+  assumes h1: "\<And>x c. P c (Var x)"
+  and     h2: "\<And>t1 t2 c. \<lbrakk>\<forall>d. P d t1; \<forall>d. P d t2\<rbrakk> \<Longrightarrow> P c (App t1 t2)"
+  and     h3: "\<And>x t c. \<lbrakk>x\<sharp>c; \<forall>d. P d t\<rbrakk> \<Longrightarrow> P c (Lam [x].t)"
+  shows "P c t"
+proof -
+  have "\<forall>(\<pi>::name prm) c. P c (\<pi>\<bullet>t)"
+  proof (induct t rule: lam.induct)
+    case (Lam x t)
+    have ih: "\<forall>(\<pi>::name prm) c. P c (\<pi>\<bullet>t)" by fact
+    { fix \<pi>::"name prm" and c::"'a::fs_name"
+      obtain y::"name" where fc: "y\<sharp>(\<pi>\<bullet>x,\<pi>\<bullet>t,c)" 
+	by (rule exists_fresh) (auto simp add: fs_name1)
+      from ih have "\<forall>c. P c (([(y,\<pi>\<bullet>x)]@\<pi>)\<bullet>t)" by simp
+      then have "\<forall>c. P c ([(y,\<pi>\<bullet>x)]\<bullet>(\<pi>\<bullet>t))" by (auto simp only: pt_name2)
+      with h3 have "P c (Lam [y].[(y,\<pi>\<bullet>x)]\<bullet>(\<pi>\<bullet>t))" using fc by (simp add: fresh_prod) 
+      moreover
+      have "Lam [y].[(y,\<pi>\<bullet>x)]\<bullet>(\<pi>\<bullet>t) = Lam [(\<pi>\<bullet>x)].(\<pi>\<bullet>t)" 
+	using fc by (simp add: lam.inject alpha fresh_atm fresh_prod)
+      ultimately have "P c (Lam [(\<pi>\<bullet>x)].(\<pi>\<bullet>t))" by simp
+    }
+    then have "\<forall>(\<pi>::name prm) c. P c (Lam [(\<pi>\<bullet>x)].(\<pi>\<bullet>t))" by simp
+    then show "\<forall>(\<pi>::name prm) c. P c (\<pi>\<bullet>(Lam [x].t))" by simp
+  qed (auto intro: h1 h2) (* var and app case *)
+  then have "P c (([]::name prm)\<bullet>t)" by blast
+  then show "P c t" by simp
+qed
+
+text {***********************************************************
+
+  ---------
+  SOLUTIONS
+  ---------
+
+*}
+
+text {************************************************************
+
+  1.) MINI EXERCISE 
+
+  The way we defined contexts does not allow us to
+  apply a Hole to a Hole. Therefore the following 
+  will result in a typing error. *}
+
+(* term "CAppL \<box> \<box>" *)
+
+
+text {************************************************************
+
+  2. EXERCISE
+
+  A readable proof for this lemma is as follows:
+  
+*}
+
+lemma 
+  assumes a: "<e1,Es1> \<mapsto>* <e2,Es2>" 
+  and     b: "<e2,Es2> \<mapsto>* <e3,Es3>"
+  shows "<e1,Es1> \<mapsto>* <e3,Es3>"
+using a b
+proof(induct)
+  case (ms1 e1 Es1)
+  show "<e1,Es1> \<mapsto>* <e3,Es3>" by fact
+next
+  case (ms2 e1 Es1 e2 Es2 e2' Es2')
+  have ih: "<e2',Es2'> \<mapsto>* <e3,Es3> \<Longrightarrow> <e2,Es2> \<mapsto>* <e3,Es3>" by fact
+  have "<e2',Es2'> \<mapsto>* <e3,Es3>" by fact
+  then have "<e2,Es2> \<mapsto>* <e3,Es3>" using ih by simp
+  moreover
+  have "<e1,Es1> \<mapsto> <e2,Es2>" by fact
+  ultimately show "<e1,Es1> \<mapsto>* <e3,Es3>" by auto
+qed
+
+
+text {************************************************************ 
+  
+  3.) Exercise
+
+  As one can quickly see in the second case, the theorem as stated
+  does not go through. We need to generalise the induction hypothesis
+  so that we show the lemma for all contexts Es. In Isar, variables 
+  can be generalised by declaring "arbitrary: variable \<dots>" when the 
+  induction is set up.
+
+*}
+
+theorem 
+  assumes a: "t \<Down> t'"
+  shows "<t,Es> \<mapsto>* <t',Es>"
+using a 
+proof (induct arbitrary: Es)    (* here we generalise over Es *)
+  case (e_Lam x t Es)
+  show "<Lam [x].t,Es> \<mapsto>* <Lam [x].t,Es>" by auto
+next
+  case (e_App t1 x t t2 v' v Es)
+  have ih1: "\<And>Es. <t1,Es> \<mapsto>* <Lam [x].t,Es>" by fact
+  have ih2: "\<And>Es. <t2,Es> \<mapsto>* <v',Es>" by fact
+  have ih3: "\<And>Es. <t[x::=v'],Es> \<mapsto>* <v,Es>" by fact
+  have "<App t1 t2,Es> \<mapsto>* <t1,CAppL \<box> t2#Es>" by auto
+  moreover
+  have "<t1,CAppL \<box> t2#Es> \<mapsto>* <Lam [x].t,CAppL \<box> t2#Es>" using ih1 by auto
+  moreover
+  have "<Lam [x].t,CAppL \<box> t2#Es> \<mapsto>* <t2,CAppR (Lam [x].t) \<box>#Es>" by auto
+  moreover
+  have "<t2,CAppR (Lam [x].t) \<box>#Es> \<mapsto>* <v',CAppR (Lam [x].t) \<box>#Es>" using ih2 by auto
+  moreover
+  have "t2 \<Down> v'" by fact
+  then have "val v'" using eval_to_val by auto
+  then have "<v',CAppR (Lam [x].t) \<box>#Es> \<mapsto>* <t[x::=v'],Es>" by auto
+  moreover
+  have "<t[x::=v'],Es> \<mapsto>* <v,Es>" using ih3 by auto
+  ultimately show "<App t1 t2,Es> \<mapsto>* <v,Es>" by blast
+qed
+
+text {************************************************************ 
+  
+  4.) Exercise
+
+  A proof for the weakening lemma:
+
+*}
+
+lemma  
+  fixes \<Gamma>1 \<Gamma>2::"(name\<times>ty) list"
+  assumes a: "\<Gamma>1 \<turnstile> t : T"
+  and     b: "valid \<Gamma>2" 
+  and     c: "\<Gamma>1 \<subseteq> \<Gamma>2"
+  shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (nominal_induct \<Gamma>1 t T avoiding: \<Gamma>2 rule: typing.strong_induct)
+  case (t_Var \<Gamma>1 x T)  (* variable case *)
+  have "\<Gamma>1 \<subseteq> \<Gamma>2" by fact 
+  moreover  
+  have "valid \<Gamma>2" by fact 
+  moreover 
+  have "(x,T)\<in> set \<Gamma>1" by fact
+  ultimately show "\<Gamma>2 \<turnstile> Var x : T" by auto
+next
+  case (t_Lam x \<Gamma>1 T1 t T2) (* lambda case *)
+  have vc: "x\<sharp>\<Gamma>2" by fact   (* variable convention *)
+  have ih: "\<lbrakk>valid ((x,T1)#\<Gamma>2); (x,T1)#\<Gamma>1 \<subseteq> (x,T1)#\<Gamma>2\<rbrakk> \<Longrightarrow> (x,T1)#\<Gamma>2 \<turnstile> t:T2" by fact
+  have "\<Gamma>1 \<subseteq> \<Gamma>2" by fact
+  then have "(x,T1)#\<Gamma>1 \<subseteq> (x,T1)#\<Gamma>2" by simp
+  moreover
+  have "valid \<Gamma>2" by fact
+  then have "valid ((x,T1)#\<Gamma>2)" using vc by (simp add: v2)
+  ultimately have "(x,T1)#\<Gamma>2 \<turnstile> t : T2" using ih by simp
+  with vc show "\<Gamma>2 \<turnstile> Lam [x].t : T1\<rightarrow>T2" by auto
+qed (auto) (* app case *)
+
+text {************************************************************ 
+  
+  5.) Exercise
+
+  A proof for context omposition
+
+*}
+
+lemma 
+  shows "(E1 \<circ> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
+by (induct E1) (simp_all)
+
+text {******************************************************************
+
+  6.) EXERCISE
+  ------------
+
+  A proof for the assoiativity of \<circ>.
+
+*}
+
+lemma 
+  shows "(Es1@Es2)\<down> = (Es2\<down>) \<circ> (Es1\<down>)"
+proof (induct Es1)
+  case Nil
+  show "([]@Es2)\<down> = Es2\<down> \<circ> []\<down>" using neut_hole by simp
+next
+  case (Cons E Es1)
+  have ih: "(Es1@Es2)\<down> = Es2\<down> \<circ> Es1\<down>" by fact
+  have "((E#Es1)@Es2)\<down> = (Es1@Es2)\<down> \<circ> E" by simp
+  also have "\<dots> = (Es2\<down> \<circ> Es1\<down>) \<circ> E" using ih by simp
+  also have "\<dots> = Es2\<down> \<circ> (Es1\<down> \<circ> E)" using circ_assoc by simp
+  also have "\<dots> = Es2\<down> \<circ> (E#Es1)\<down>" by simp
+  finally show "((E#Es1)@Es2)\<down> = Es2\<down> \<circ> (E#Es1)\<down>" by simp
+qed
+
+text {******************************************************************
+
+  7.) EXERCISE
+  ------------
+
+  A proof for the substitution lemma.
+
+*}
+
+lemma 
+  assumes a: "x\<noteq>y"
+  and     b: "x\<sharp>L"
+  shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]"
+using a b
+proof (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
+  case (Var z) (* case 1: Variables*)
+  show "Var z[x::=N][y::=L] = Var z[y::=L][x::=N[y::=L]]" (is "?LHS = ?RHS")
+  proof -
+    { (*Case 1.1*)
+      assume  "z=x"
+      have "(1)": "?LHS = N[y::=L]" using `z=x` by simp
+      have "(2)": "?RHS = N[y::=L]" using `z=x` `x\<noteq>y` by simp
+      from "(1)" "(2)" have "?LHS = ?RHS"  by simp
+    }
+    moreover 
+    { (*Case 1.2*)
+      assume "z=y" and "z\<noteq>x" 
+      have "(1)": "?LHS = L"               using `z\<noteq>x` `z=y` by simp
+      have "(2)": "?RHS = L[x::=N[y::=L]]" using `z=y` by simp
+      have "(3)": "L[x::=N[y::=L]] = L"    using `x\<sharp>L` by (simp add: forget)
+      from "(1)" "(2)" "(3)" have "?LHS = ?RHS" by simp
+    }
+    moreover 
+    { (*Case 1.3*)
+      assume "z\<noteq>x" and "z\<noteq>y"
+      have "(1)": "?LHS = Var z" using `z\<noteq>x` `z\<noteq>y` by simp
+      have "(2)": "?RHS = Var z" using `z\<noteq>x` `z\<noteq>y` by simp
+      from "(1)" "(2)" have "?LHS = ?RHS" by simp
+    }
+    ultimately show "?LHS = ?RHS" by blast
+  qed
+next
+  case (Lam z M1) (* case 2: lambdas *)
+  have ih: "\<lbrakk>x\<noteq>y; x\<sharp>L\<rbrakk> \<Longrightarrow> M1[x::=N][y::=L] = M1[y::=L][x::=N[y::=L]]" by fact
+  have fs: "z\<sharp>x" "z\<sharp>y" "z\<sharp>N" "z\<sharp>L" by fact+
+  then have "z\<sharp>N[y::=L]" by (simp add: fresh_fact)
+  show "(Lam [z].M1)[x::=N][y::=L] = (Lam [z].M1)[y::=L][x::=N[y::=L]]" (is "?LHS=?RHS") 
+  proof - 
+    have "?LHS = Lam [z].(M1[x::=N][y::=L])" using `z\<sharp>x` `z\<sharp>y` `z\<sharp>N` `z\<sharp>L` by simp
+    also from ih have "\<dots> = Lam [z].(M1[y::=L][x::=N[y::=L]])" using `x\<noteq>y` `x\<sharp>L` by simp
+    also have "\<dots> = (Lam [z].(M1[y::=L]))[x::=N[y::=L]]" using `z\<sharp>x` `z\<sharp>N[y::=L]` by simp
+    also have "\<dots> = ?RHS" using  `z\<sharp>y` `z\<sharp>L` by simp
+    finally show "?LHS = ?RHS" by simp
+  qed
+next
+  case (App M1 M2) (* case 3: applications *)
+  then show "(App M1 M2)[x::=N][y::=L] = (App M1 M2)[y::=L][x::=N[y::=L]]" by simp
+qed
+
+text {******************************************************************
+  
+  8.) Exercise
+  ------------
+
+  Left out if not needed.
+*}
+
+lemma 
+  assumes a: "t \<longrightarrow>cbv t'"
+  shows "E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>"
+using a
+proof (induct E)
+  case Hole
+  have "t \<longrightarrow>cbv t'" by fact
+  then show "\<box>\<lbrakk>t\<rbrakk> \<longrightarrow>cbv \<box>\<lbrakk>t'\<rbrakk>" by simp
+next
+  case (CAppL E s)
+  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
+  have a: "t \<longrightarrow>cbv t'" by fact
+  show "(CAppL E s)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppL E s)\<lbrakk>t'\<rbrakk>" using ih a by auto
+next
+  case (CAppR s E)
+  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
+  have a: "t \<longrightarrow>cbv t'" by fact
+  show "(CAppR s E)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppR s E)\<lbrakk>t'\<rbrakk>" using ih a by auto
+qed
+
+text {******************************************************************
+  
+  9.) Exercise
+  ------------
+
+  The point of the cbv-reduction was that we can easily relatively 
+  establish the follwoing property:
+
+*}
+
+lemma 
+  assumes a: "<e,Es> \<mapsto> <e',Es'>"
+  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
+using a 
+proof (induct)
+  case (m1 t1 t2 Es)
+  show "Es\<down>\<lbrakk>App t1 t2\<rbrakk> \<longrightarrow>cbv* ((CAppL \<box> t2)#Es)\<down>\<lbrakk>t1\<rbrakk>" by (auto simp add: ctx_compose)
+next
+  case (m2 v t2 Es)
+  have "val v" by fact
+  then show "((CAppL \<box> t2)#Es)\<down>\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (CAppR v \<box> # Es)\<down>\<lbrakk>t2\<rbrakk>" 
+    by (auto simp add: ctx_compose)
+next
+  case (m3 v x t Es)
+  have "val v" by fact
+  then show "((CAppR (Lam [x].t) \<box>)#Es)\<down>\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (Es\<down>)\<lbrakk>t[x::=v]\<rbrakk>" 
+    by (auto simp add: ctx_compose intro: cbv_in_ctx)
+qed
+
+lemma
+  assumes a: "<e,Es> \<mapsto> <e',Es'>"
+  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
+using a by (induct) (auto simp add: ctx_compose intro: cbv_in_ctx)
+
+text {******************************************************************
+  
+  10.) Exercise
+  -------------
+
+  Complete the first case in the proof below. 
+
+*}
+
+lemma 
+  assumes a: "t1 \<longrightarrow>cbv t2" "t2 \<Down> t3"
+  shows "t1 \<Down> t3"
+using a
+proof(induct arbitrary: t3)
+  case (cbv1 v x t t3)
+  have a1: "val v" by fact
+  have a2: "t[x::=v] \<Down> t3" by fact
+  show "App Lam [x].t v \<Down> t3" using eval_val a1 a2 by auto
+next
+  case (cbv2 t t' t2 t3)
+  have "t \<longrightarrow>cbv t'" by fact
+  have ih: "\<And>t3. t' \<Down> t3 \<Longrightarrow> t \<Down> t3" by fact
+  have "App t' t2 \<Down> t3" by fact
+  then obtain x t'' v' 
+    where a1: "t' \<Down> Lam [x].t''" 
+      and a2: "t2 \<Down> v'" 
+      and a3: "t''[x::=v'] \<Down> t3" using e_App_elim by blast
+  have "t \<Down>  Lam [x].t''" using ih a1 by auto 
+  then show "App t t2 \<Down> t3" using a2 a3 by auto
+qed (auto dest!: e_App_elim)
+
+lemma
+  assumes a: "t1 \<longrightarrow>cbv t2" "t2 \<Down> t3"
+  shows "t1 \<Down> t3"
+using a
+by (induct arbitrary: t3)
+   (auto elim!: eval_elim intro: eval_val)
+
+
+end    
+   
+
+
+
+  
+
+  
+  
+
+