Tutorial/Tutorial1.thy
author Christian Urban <urbanc@in.tum.de>
Wed, 19 Jan 2011 23:58:12 +0100
changeset 2686 52e1e98edb34
child 2687 d0fb94035969
permissions -rw-r--r--
added a very rough version of the tutorial; all seems to work


header {* 

  Nominal Isabelle Tutorial
  =========================

  There will be hands-on exercises throughout the tutorial. Therefore
  it would be good if you have your own laptop.

  Nominal Isabelle is a definitional extension of Isabelle/HOL, which
  means it does not add any new axioms to higher-order logic. It merely
  adds new definitions and an infrastructure for 'nominal resoning'.


  The Interface
  -------------

  The Isabelle theorem prover comes with an interface for jEdit interface. 
  Unlike many other theorem prover interfaces (e.g. ProofGeneral) where you 
  try to advance a 'checked' region in a proof script, this interface immediately 
  checks the whole buffer. The text you type is also immediately checked
  as you type. Malformed text or unfinished proofs are highlighted in red 
  with a little red 'stop' signal on the left-hand side. If you drag the 
  'red-box' cursor over a line, the Output window gives further feedback. 

  Note: If a section is not parsed correctly, the work-around is to cut it 
  out and paste it back into the text (cut-out: Ctrl + X; paste in: Ctrl + V;
  Cmd is Ctrl on the Mac)

  Nominal Isabelle and the interface can be started on the command line with

     isabelle jedit -l HOL-Nominal2
     isabelle jedit -l HOL-Nominal2 A.thy B.thy ...


  Symbols
  ------- 

  Symbols can considerably improve the readability of your statements and proofs. 
  They can be input by just typing 'name-of-symbol' where 'name-of-symbol' is the 
  usual latex name of that symbol. A little window will then appear in which 
  you can select the symbol. With `Escape' you can ignore any suggestion.
  
  There are some handy short-cuts for frequently used symbols. 
  For example 

      short-cut  symbol

          =>      \<Rightarrow>
          ==>     \<Longrightarrow>
          -->     \<longrightarrow>
          !       \<forall>        
          ?       \<exists>
          /\      \<and>
          \/      \<or>
          ~       \<not>
          ~=      \<noteq>
          :       \<in>
          ~:      \<notin>

  For nominal two important symbols are

          \<sharp>       sharp     (freshness)
          \<bullet>       bullet    (permutations)
*}

theory Tutorial1
imports Lambda
begin

section {* Theories *}

text {*
  All formal developments in Isabelle are part of a theory. A theory 
  needs to have a name and must import some pre-existing theory (as indicated 
  above). The imported theory will normally be the theory Nominal2 (which 
  contains many useful concepts like set-theory, lists, nominal theory etc).

  For the purpose of the tutorial we import the theory Lambda.thy which
  contains already some useful definitions for (alpha-equated) lambda terms.
*}



section {* Types *}

text {*
  Isabelle is based on simple types including some polymorphism. It also includes 
  some overloading, which means that sometimes explicit type annotations need to 
  be given.

    - Base types include: nat, bool, string

    - Type formers include: 'a list, ('a \<times> 'b), 'c set   

    - Type variables are written like in ML with an apostrophe: 'a, 'b, \<dots>

  Types known to Isabelle can be queried using the command "typ".
*}

typ nat
typ bool
typ string           
typ lam             -- {* alpha-equated lambda terms defined in Lambda.thy *}
typ name            -- {* type of (object) variables defined in Lambda.thy *}
typ "('a \<times> 'b)"     -- {* pair type *}
typ "'c set"        -- {* set type *}
typ "'a list"       -- {* list type *}
typ "nat \<Rightarrow> bool"   -- {* type of functions from natural numbers to booleans *}


text {* Some malformed types: *}

(*
typ boolean     -- {* undeclared type *} 
typ set         -- {* type argument missing *}
*)


section {* Terms *}

text {*
   Every term in Isabelle needs to be well-typed. However they can have 
   polymorphic type. Whether a term is accepted can be queried using
   the command "term". 
*}

term c                 -- {* a variable of polymorphic type *}
term "1::nat"          -- {* the constant 1 in natural numbers *}
term 1                 -- {* the constant 1 with polymorphic type *}
term "{1, 2, 3::nat}"  -- {* the set containing natural numbers 1, 2 and 3 *}
term "[1, 2, 3]"       -- {* the list containing the polymorphic numbers 1, 2 and 3 *}
term "(True, ''c'')"   -- {* a pair consisting of the boolean true and the string "c" *}
term "Suc 0"           -- {* successor of 0, in other words 1::nat *}
term "Lam [x].Var x"   -- {* a lambda-term *}
term "App t1 t2"       -- {* another lambda-term *}
term "x::name"         -- {* an (object) variable of type name *}
term "atom (x::name)"  -- {* atom is an overloded function *}

text {* 
  Lam [x].Var is the syntax we made up for lambda abstractions. You can have
  your own syntax. We also came up with "name". If you prefer, you can call
  it identifiers or have more than one type for (object languag) variables.

  Isabelle provides some useful colour feedback about constants (black), 
  free variables (blue) and bound variables (green).
*}

term "True"    -- {* a constant that is defined in HOL...written in black *}
term "true"    -- {* not recognised as a constant, therefore it is interpreted
                     as a free variable, written in blue *}
term "\<forall>x. P x" -- {* x is bound (green), P is free (blue) *}


text {* Formulae in Isabelle/HOL are terms of type bool *}

term "True"
term "True \<and> False"
term "True \<or> B"
term "{1,2,3} = {3,2,1}"
term "\<forall>x. P x"
term "A \<longrightarrow> B"
term "atom a \<sharp> t"

text {*
  When working with Isabelle, one deals with an object logic (that is HOL) and 
  Isabelle's rule framework (called Pure). Occasionally one has to pay attention 
  to this fact. But for the moment we ignore this completely.
*}

term "A \<longrightarrow> B"  -- {* versus *}
term "A \<Longrightarrow> B"

term "\<forall>x. P x"  -- {* versus *}
term "\<And>x. P x"



section {* Inductive Definitions: Transitive Closures of Beta-Reduction *}

text {*
  The theory Lmabda alraedy contains a definition for beta-reduction, written

     t \<longrightarrow>b t'

  In this section we want to define inductively the transitive closure of
  beta-reduction.

  Inductive definitions in Isabelle start with the keyword "inductive" and a predicate 
  name.  One can optionally provide a type for the predicate. Clauses of the inductive
  predicate are introduced by "where" and more than two clauses need to be 
  separated by "|". One can also give a name to each clause and indicate that it 
  should be added to the hints database ("[intro]"). A typical clause has some 
  premises and a conclusion. This is written in Isabelle as:

   "premise \<Longrightarrow> conclusion"
   "premise1 \<Longrightarrow> premise2 \<Longrightarrow> \<dots> premisen \<Longrightarrow> conclusion"

  There is an alternative way of writing the latter clause as

   "\<lbrakk>premise1; premise2; \<dots> premisen\<rbrakk> \<Longrightarrow> conclusion"

  If no premise is present, then one just writes

   "conclusion"

  Below we give two definitions for the transitive closure
*}

inductive
  beta_star1 :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<longrightarrow>b* _" [60, 60] 60)
where
  bs1_refl: "t \<longrightarrow>b* t"
| bs1_trans: "\<lbrakk>t1 \<longrightarrow>b t2; t2 \<longrightarrow>b* t3\<rbrakk> \<Longrightarrow> t1 \<longrightarrow>b* t3"


inductive
  beta_star2 :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<longrightarrow>b** _" [60, 60] 60)
where
  bs2_refl: "t \<longrightarrow>b** t"
| bs2_step: "t1 \<longrightarrow>b t2 \<Longrightarrow> t1 \<longrightarrow>b** t2"
| bs2_trans: "\<lbrakk>t1 \<longrightarrow>b** t2; t2 \<longrightarrow>b** t3\<rbrakk> \<Longrightarrow> t1 \<longrightarrow>b** t3"

section {* Theorems *}

text {*
  A central concept in Isabelle is that of theorems. Isabelle's theorem
  database can be queried using 
*}

thm bs1_refl
thm bs2_trans
thm conjI
thm conjunct1

text {* 
  Notice that theorems usually contain schematic variables (e.g. ?P, ?Q, \<dots>). 
  These schematic variables can be substituted with any term (of the right type 
  of course). 

  When defining the predicates beta_star, Isabelle provides us automatically 
  with the following theorems that state how they can be introduced and what 
  constitutes an induction over them. 
*}

thm beta_star1.intros
thm beta_star2.induct


section {* Lemmas / Theorems / Corollaries *}

text {*
  Whether to use lemma, theorem or corollary makes no semantic difference 
  in Isabelle. 

  A lemma starts with "lemma" and consists of a statement ("shows \<dots>") and 
  optionally a lemma name, some type-information for variables ("fixes \<dots>") 
  and some assumptions ("assumes \<dots>"). 

  Lemmas also need to have a proof, but ignore this 'detail' for the moment.

  Examples are
*}

lemma alpha_equ:
  shows "Lam [x].Var x = Lam [y].Var y"
  by (simp add: lam.eq_iff Abs1_eq_iff lam.fresh fresh_at_base)

lemma Lam_freshness:
  assumes a: "x \<noteq> y"
  and     b: "atom y \<sharp> Lam [x].t"
  shows "atom y \<sharp> t"
  using a b by (simp add: lam.fresh Abs_fresh_iff) 

lemma neutral_element:
  fixes x::"nat"
  shows "x + 0 = x"
  by simp

text {*
  Note that in the last statement, the explicit type annotation is important 
  in order for Isabelle to be able to figure out what 0 stands for (e.g. a 
  natural number, a vector, etc) and which lemmas to apply.
*}


section {* Isar Proofs *}

text {*
  Isar is a language for writing formal proofs that can be understood by humans 
  and by Isabelle. An Isar proof can be thought of as a sequence of 'stepping stones' 
  that start with some assumptions and lead to the goal to be established. Every such 
  stepping stone is introduced by "have" followed by the statement of the stepping 
  stone. An exception is the goal to be proved, which need to be introduced with "show".

      have "statement"  \<dots>
      show "goal_to_be_proved" \<dots>

  Since proofs usually do not proceed in a linear fashion, labels can be given 
  to every stepping stone and they can be used later to explicitly refer to this 
  corresponding stepping stone ("using").

      have label: "statement1"  \<dots>
      \<dots>
      have "later_statement" using label \<dots>

  Each stepping stone (or have-statement) needs to have a justification. The 
  simplest justification is "sorry" which admits any stepping stone, even false 
  ones (this is good during the development of proofs). 

      have "outrageous_false_statement" sorry

  Assumptions can be 'justified' using "by fact". 

      have "assumption" by fact

  Derived facts can be justified using 

      have "statement" by simp    -- simplification 
      have "statement" by auto    -- proof search and simplification 
      have "statement" by blast   -- only proof search 

  If facts or lemmas are needed in order to justify a have-statement, then
  one can feed these facts into the proof by using "using label \<dots>" or 
  "using theorem-name \<dots>". More than one label at a time is allowed. For
  example

      have "statement" using label1 label2 theorem_name by auto

  Induction proofs in Isar are set up by indicating over which predicate(s) 
  the induction proceeds ("using a b") followed by the command "proof (induct)". 
  In this way, Isabelle uses defaults for which induction should be performed. 
  These defaults can be overridden by giving more information, like the variable 
  over which a structural induction should proceed, or a specific induction principle, 
  such as well-founded inductions. 

  After the induction is set up, the proof proceeds by cases. In Isar these 
  cases can be given in any order. Most commonly they are started with "case" and the 
  name of the case, and optionally some legible names for the variables used inside 
  the case. 

  In each "case", we need to establish a statement introduced by "show". Once 
  this has been done, the next case can be started using "next". When all cases 
  are completed, the proof can be finished using "qed".

  This means a typical induction proof has the following pattern

     proof (induct)
       case \<dots>
       \<dots>
       show \<dots>
     next
       case \<dots>
       \<dots>
       show \<dots>
     \<dots>
     qed
*}


subsection {* Exercise I *}

text {*
  Remove the sorries in the proof below and fill in the proper
  justifications. 
*}


lemma
  assumes a: "t1 \<longrightarrow>b* t2" 
  shows "t1 \<longrightarrow>b** t2"
  using a
proof (induct)
  case (bs1_refl t)
  show "t \<longrightarrow>b** t" using bs2_refl by blast
next
  case (bs1_trans t1 t2 t3)
  have beta: "t1 \<longrightarrow>b t2" by fact
  have ih: "t2 \<longrightarrow>b** t3" by fact
  have a: "t1 \<longrightarrow>b** t2" using beta bs2_step by blast
  show "t1 \<longrightarrow>b** t3" using a ih bs2_trans by blast
qed


subsection {* Exercise II *}

text {*
  Again remove the sorries in the proof below and fill in the proper
  justifications. 
*}

lemma bs1_trans2:
  assumes a: "t1 \<longrightarrow>b* t2"
  and     b: "t2 \<longrightarrow>b* t3"
  shows "t1 \<longrightarrow>b* t3"
using a b
proof (induct)
  case (bs1_refl t1)
  have a: "t1 \<longrightarrow>b* t3" by fact
  show "t1 \<longrightarrow>b* t3" using a by blast
next
  case (bs1_trans t1 t2 t3')
  have ih1: "t1 \<longrightarrow>b t2" by fact
  have ih2: "t3' \<longrightarrow>b* t3 \<Longrightarrow> t2 \<longrightarrow>b* t3" by fact
  have asm: "t3' \<longrightarrow>b* t3" by fact
  have a: "t2 \<longrightarrow>b* t3" using ih2 asm by blast
  show "t1 \<longrightarrow>b* t3" using ih1 a beta_star1.bs1_trans by blast
qed
  
lemma
  assumes a: "t1 \<longrightarrow>b** t2"
  shows "t1 \<longrightarrow>b* t2"
using a
proof (induct)
  case (bs2_refl t)
  show "t \<longrightarrow>b* t" using bs1_refl by blast
next
  case (bs2_step t1 t2)
  have ih: "t1 \<longrightarrow>b t2" by fact
  have a: "t2 \<longrightarrow>b* t2" using bs1_refl by blast
  show "t1 \<longrightarrow>b* t2" using ih a bs1_trans by blast
next
  case (bs2_trans t1 t2 t3)
  have ih1: "t1 \<longrightarrow>b* t2" by fact
  have ih2: "t2 \<longrightarrow>b* t3" by fact
  show "t1 \<longrightarrow>b* t3" using ih1 ih2 bs1_trans2 by blast  
qed
  
text {* 
  Just like gotos in the Basic programming language, labels often reduce 
  the readability of proofs. Therefore one can use in Isar the notation
  "then have" in order to feed a have-statement to the proof of 
  the next have-statement. This is used in teh second case below.
*}

lemma 
  assumes a: "t1 \<longrightarrow>b* t2"
  and     b: "t2 \<longrightarrow>b* t3"
  shows "t1 \<longrightarrow>b* t3"
using a b
proof (induct)
  case (bs1_refl t1)
  show "t1 \<longrightarrow>b* t3" by fact
next
  case (bs1_trans t1 t2 t3')
  have ih1: "t1 \<longrightarrow>b t2" by fact
  have ih2: "t3' \<longrightarrow>b* t3 \<Longrightarrow> t2 \<longrightarrow>b* t3" by fact
  have "t3' \<longrightarrow>b* t3" by fact
  then have "t2 \<longrightarrow>b* t3" using ih2 by blast
  then show "t1 \<longrightarrow>b* t3" using ih1 beta_star1.bs1_trans by blast
qed

text {* 
  The label ih2 cannot be got rid of in this way, because it is used 
  two lines below and we cannot rearange them. We can still avoid the
  label by feeding a sequence of facts into a proof using the 
  "moreover"-chaining mechanism:

   have "statement_1" \<dots>
   moreover
   have "statement_2" \<dots>
   \<dots>
   moreover
   have "statement_n" \<dots>
   ultimately have "statement" \<dots>

  In this chain, all "statement_i" can be used in the proof of the final 
  "statement". With this we can simplify our proof further to:
*}

lemma 
  assumes a: "t1 \<longrightarrow>b* t2"
  and     b: "t2 \<longrightarrow>b* t3"
  shows "t1 \<longrightarrow>b* t3"
using a b
proof (induct)
  case (bs1_refl t1)
  show "t1 \<longrightarrow>b* t3" by fact
next
  case (bs1_trans t1 t2 t3')
  have "t3' \<longrightarrow>b* t3 \<Longrightarrow> t2 \<longrightarrow>b* t3" by fact
  moreover
  have "t3' \<longrightarrow>b* t3" by fact
  ultimately 
  have "t2 \<longrightarrow>b* t3" by blast
  moreover 
  have "t1 \<longrightarrow>b t2" by fact
  ultimately show "t1 \<longrightarrow>b* t3" using beta_star1.bs1_trans by blast
qed


text {* 
  While automatic proof procedures in Isabelle are not able to prove statements
  like "P = NP" assuming usual definitions for P and NP, they can automatically
  discharge the lemmas we just proved. For this we only have to set up the induction
  and auto will take care of the rest. This means we can write:
*}

lemma
  assumes a: "t1 \<longrightarrow>b* t2" 
  shows "t1 \<longrightarrow>b** t2"
  using a
by (induct) (auto intro: beta_star2.intros)

lemma 
  assumes a: "t1 \<longrightarrow>b* t2"
  and     b: "t2 \<longrightarrow>b* t3"
  shows "t1 \<longrightarrow>b* t3"
using a b
by (induct) (auto intro: beta_star1.intros)

lemma
  assumes a: "t1 \<longrightarrow>b** t2"
  shows "t1 \<longrightarrow>b* t2"
using a
by (induct) (auto intro: bs1_trans2 beta_star1.intros)

inductive
  eval :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<Down> _" [60, 60] 60) 
where
  e_Lam:  "Lam [x].t \<Down> Lam [x].t"
| e_App:  "\<lbrakk>t1 \<Down> Lam [x].t; t2 \<Down> v'; t[x::=v'] \<Down> v\<rbrakk> \<Longrightarrow> App t1 t2 \<Down> v"

declare eval.intros[intro]

text {* 
  Values are also defined using inductive. In our case values
  are just lambda-abstractions. *}

inductive
  val :: "lam \<Rightarrow> bool" 
where
  v_Lam[intro]:   "val (Lam [x].t)"


section {* Datatypes: Evaluation Contexts *}

text {*

  Datatypes can be defined in Isabelle as follows: we have to provide the name 
  of the datatype and list its type-constructors. Each type-constructor needs 
  to have the information about the types of its arguments, and optionally 
  can also contain some information about pretty syntax. For example, we like
  to write "\<box>" for holes.
*}

datatype ctx = 
    Hole ("\<box>")  
  | CAppL "ctx" "lam"
  | CAppR "lam" "ctx"

text {* Now Isabelle knows about: *}

typ ctx
term "\<box>"
term "CAppL"
term "CAppL \<box> (Var x)"

text {* 

  1.) MINI EXERCISE
  -----------------

  Try and see what happens if you apply a Hole to a Hole? 

*}

type_synonym ctxs = "ctx list"

inductive
  machine :: "lam \<Rightarrow> ctxs \<Rightarrow>lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto> <_,_>" [60, 60, 60, 60] 60)
where
  m1[intro]: "<App t1 t2,Es> \<mapsto> <t1,(CAppL \<box> t2) # Es>"
| m2[intro]: "val v \<Longrightarrow> <v,(CAppL \<box> t2) # Es> \<mapsto> <t2,(CAppR v \<box>) # Es>"
| m3[intro]: "val v \<Longrightarrow> <v,(CAppR (Lam [x].t) \<box>) # Es> \<mapsto> <t[x ::= v],Es>"


text {*
  Since the machine defined above only performs a single reduction,
  we need to define the transitive closure of this machine. *}

inductive 
  machines :: "lam \<Rightarrow> ctxs \<Rightarrow> lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto>* <_,_>" [60, 60, 60, 60] 60)
where
  ms1[intro]: "<t,Es> \<mapsto>* <t,Es>"
| ms2[intro]: "\<lbrakk><t1,Es1> \<mapsto> <t2,Es2>; <t2,Es2> \<mapsto>* <t3,Es3>\<rbrakk> \<Longrightarrow> <t1,Es1> \<mapsto>* <t3,Es3>"

lemma 
  assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>" 
  and     b: "<e2, Es2> \<mapsto>* <e3, Es3>"
  shows "<e1, Es1> \<mapsto>* <e3, Es3>"
using a b
proof(induct)
  case (ms1 e1 Es1)
  have c: "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
  show "<e1, Es1> \<mapsto>* <e3, Es3>" sorry
next
  case (ms2 e1 Es1 e2 Es2 e2' Es2') 
  have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
  have d1: "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
  have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
  
  show "<e1, Es1> \<mapsto>* <e3, Es3>" sorry
qed

text {* 
  Just like gotos in the Basic programming language, labels can reduce 
  the readability of proofs. Therefore one can use in Isar the notation
  "then have" in order to feed a have-statement to the proof of 
  the next have-statement. In the proof below this has been used
  in order to get rid of the labels c and d1. 
*}

lemma 
  assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>" 
  and     b: "<e2, Es2> \<mapsto>* <e3, Es3>"
  shows "<e1, Es1> \<mapsto>* <e3, Es3>"
using a b
proof(induct)
  case (ms1 e1 Es1)
  show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
next
  case (ms2 e1 Es1 e2 Es2 e2' Es2')
  have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
  have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
  then have d3: "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
  have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
  show "<e1, Es1> \<mapsto>* <e3, Es3>" using d2 d3 by auto
qed

lemma 
  assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>" 
  and     b: "<e2, Es2> \<mapsto>* <e3, Es3>"
  shows "<e1, Es1> \<mapsto>* <e3, Es3>"
using a b
proof(induct)
  case (ms1 e1 Es1)
  show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
next
  case (ms2 e1 Es1 e2 Es2 e2' Es2')
  have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
  have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
  then have "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
  moreover
  have "<e1, Es1> \<mapsto> <e2, Es2>" by fact
  ultimately show "<e1, Es1> \<mapsto>* <e3, Es3>" by auto
qed


lemma ms3[intro]:
  assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>" 
  and     b: "<e2, Es2> \<mapsto>* <e3, Es3>"
  shows "<e1, Es1> \<mapsto>* <e3, Es3>"
using a b by (induct) (auto)

lemma eval_to_val:
  assumes a: "t \<Down> t'"
  shows "val t'"
using a by (induct) (auto)


theorem 
  assumes a: "t \<Down> t'"
  shows "<t, []> \<mapsto>* <t', []>"
using a 
proof (induct)
  case (e_Lam x t)
  (* no assumptions *)
  show "<Lam [x].t, []> \<mapsto>* <Lam [x].t, []>" sorry
next
  case (e_App t1 x t t2 v' v)
  (* all assumptions in this case *)
  have a1: "t1 \<Down> Lam [x].t" by fact
  have ih1: "<t1, []> \<mapsto>* <Lam [x].t, []>" by fact
  have a2: "t2 \<Down> v'" by fact
  have ih2: "<t2, []> \<mapsto>* <v', []>" by fact
  have a3: "t[x::=v'] \<Down> v" by fact
  have ih3: "<t[x::=v'], []> \<mapsto>* <v, []>" by fact
  (* your details *)
  show "<App t1 t2, []> \<mapsto>* <v, []>" sorry
qed

text {* 
  Again the automatic tools in Isabelle can discharge automatically 
  of the routine work in these proofs. We can write: *}

theorem eval_implies_machines_ctx:
  assumes a: "t \<Down> t'"
  shows "<t, Es> \<mapsto>* <t', Es>"
using a
by (induct arbitrary: Es)
   (metis eval_to_val machine.intros ms1 ms2 ms3 v_Lam)+

corollary eval_implies_machines:
  assumes a: "t \<Down> t'"
  shows "<t, []> \<mapsto>* <t', []>"
using a eval_implies_machines_ctx by simp


nominal_datatype ty =
  tVar "string"
| tArr "ty" "ty" ("_ \<rightarrow> _" [100, 100] 100)


text {* 
  Having defined them as nominal datatypes gives us additional
  definitions and theorems that come with types (see below).
  *}

text {* 
  We next define the type of typing contexts, a predicate that
  defines valid contexts (i.e. lists that contain only unique
  variables) and the typing judgement.

*}

type_synonym ty_ctx = "(name \<times> ty) list"

inductive
  valid :: "ty_ctx \<Rightarrow> bool"
where
  v1[intro]: "valid []"
| v2[intro]: "\<lbrakk>valid \<Gamma>; atom x \<sharp> \<Gamma>\<rbrakk>\<Longrightarrow> valid ((x, T) # \<Gamma>)"

inductive
  typing :: "ty_ctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _" [60, 60, 60] 60) 
where
  t_Var[intro]:  "\<lbrakk>valid \<Gamma>; (x, T) \<in> set \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var x : T"
| t_App[intro]:  "\<lbrakk>\<Gamma> \<turnstile> t1 : T1 \<rightarrow> T2; \<Gamma> \<turnstile> t2 : T1\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> App t1 t2 : T2"
| t_Lam[intro]:  "\<lbrakk>atom x \<sharp> \<Gamma>; (x, T1) # \<Gamma> \<turnstile> t : T2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Lam [x].t : T1 \<rightarrow> T2"


text {*
  The predicate x \<sharp> \<Gamma>, read as x fresh for \<Gamma>, is defined by Nominal Isabelle.
  Freshness is defined for lambda-terms, products, lists etc. For example
  we have:
  *}

lemma
  fixes x::"name"
  shows "atom x \<sharp> Lam [x].t"
  and   "atom x \<sharp> (t1, t2) \<Longrightarrow> atom x \<sharp> App t1 t2"
  and   "atom x \<sharp> Var y \<Longrightarrow> atom x \<sharp> y" 
  and   "\<lbrakk>atom x \<sharp> t1; atom x \<sharp> t2\<rbrakk> \<Longrightarrow> atom x \<sharp> (t1, t2)"
  and   "\<lbrakk>atom x \<sharp> l1; atom x \<sharp> l2\<rbrakk> \<Longrightarrow> atom x \<sharp> (l1 @ l2)"
  and   "atom x \<sharp> y \<Longrightarrow> x \<noteq> y"
  by (simp_all add: lam.fresh fresh_append fresh_at_base) 

text {* We can also prove that every variable is fresh for a type *}

lemma ty_fresh:
  fixes x::"name"
  and   T::"ty"
  shows "atom x \<sharp> T"
by (induct T rule: ty.induct)
   (simp_all add: ty.fresh pure_fresh)

text {* 
  In order to state the weakening lemma in a convenient form, we overload
  the subset-notation and define the abbreviation below. Abbreviations behave
  like definitions, except that they are always automatically folded and
  unfolded.
*}

abbreviation
  "sub_ty_ctx" :: "ty_ctx \<Rightarrow> ty_ctx \<Rightarrow> bool" ("_ \<sqsubseteq> _" [60, 60] 60) 
where
  "\<Gamma>1 \<sqsubseteq> \<Gamma>2 \<equiv> \<forall>x. x \<in> set \<Gamma>1 \<longrightarrow> x \<in> set \<Gamma>2"

text {***************************************************************** 

  4.) Exercise
  ------------

  Fill in the details and give a proof of the weakening lemma. 

*}

lemma 
  assumes a: "\<Gamma>1 \<turnstile> t : T"
  and     b: "valid \<Gamma>2" 
  and     c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
  shows "\<Gamma>2 \<turnstile> t : T"
using a b c
proof (induct arbitrary: \<Gamma>2)
  case (t_Var \<Gamma>1 x T)
  have a1: "valid \<Gamma>1" by fact
  have a2: "(x, T) \<in> set \<Gamma>1" by fact
  have a3: "valid \<Gamma>2" by fact
  have a4: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact

  show "\<Gamma>2 \<turnstile> Var x : T" sorry
next
  case (t_Lam x \<Gamma>1 T1 t T2) 
  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
  have a0: "atom x \<sharp> \<Gamma>1" by fact
  have a1: "valid \<Gamma>2" by fact
  have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact

  show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
qed (auto)


text {* 
  Despite the frequent claim that the weakening lemma is trivial,
  routine or obvious, the proof in the lambda-case does not go 
  smoothly through. Painful variable renamings seem to be necessary. 
  But the proof using renamings is horribly complicated. It is really 
  interesting whether people who claim this proof is  trivial, routine 
  or obvious had this proof in mind. 

  BTW: The following two commands help already with showing that validity 
  and typing are invariant under variable (permutative) renamings. 
*}

equivariance valid
equivariance typing

lemma not_to_be_tried_at_home_weakening: 
  assumes a: "\<Gamma>1 \<turnstile> t : T"
  and     b: "valid \<Gamma>2" 
  and     c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
  shows "\<Gamma>2 \<turnstile> t : T"
using a b c
proof (induct arbitrary: \<Gamma>2)
  case (t_Lam x \<Gamma>1 T1 t T2) (* lambda case *)
  have fc0: "atom x \<sharp> \<Gamma>1" by fact
  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
  obtain c::"name" where fc1: "atom c \<sharp> (x, t, \<Gamma>1, \<Gamma>2)" by (rule obtain_fresh)
  have "Lam [c].((c \<leftrightarrow> x) \<bullet> t) = Lam [x].t" using fc1 (* we then alpha-rename the lambda-abstraction *)
    by (auto simp add: lam.eq_iff Abs1_eq_iff flip_def)
  moreover
  have "\<Gamma>2 \<turnstile> Lam [c].((c \<leftrightarrow> x) \<bullet> t) : T1 \<rightarrow> T2" (* we can then alpha-rename our original goal *)
  proof - 
    (* we establish (x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) and valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)) *)
    have "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)" 
    proof -
      have "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
      then have "(c \<leftrightarrow> x) \<bullet> ((x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))" using fc0 fc1
	by (perm_simp) (simp add: flip_fresh_fresh)
      then show "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)" by (rule permute_boolE)
    qed
    moreover 
    have "valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))" 
    proof -
      have "valid \<Gamma>2" by fact
      then show "valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))" using fc1
	by (auto simp add: fresh_permute_left atom_eqvt valid.eqvt)	
    qed
    (* these two facts give us by induction hypothesis the following *)
    ultimately have "(x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) \<turnstile> t : T2" using ih by simp 
    (* we now apply renamings to get to our goal *)
    then have "(c \<leftrightarrow> x) \<bullet> ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) \<turnstile> t : T2)" by (rule permute_boolI)
    then have "(c, T1) # \<Gamma>2 \<turnstile> ((c \<leftrightarrow> x) \<bullet> t) : T2" using fc1
      by (perm_simp) (simp add: flip_fresh_fresh ty_fresh)
    then show "\<Gamma>2 \<turnstile> Lam [c].((c \<leftrightarrow> x) \<bullet> t) : T1 \<rightarrow> T2" using fc1 by auto
  qed
  ultimately show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" by simp
qed (auto) (* var and app cases *)


text {* 
  The remedy to the complicated proof of the weakening proof
  shown above is to use a stronger induction principle that
  has the usual variable convention already build in. The
  following command derives this induction principle for us.
  (We shall explain what happens here later.)

*}

nominal_inductive typing
  avoids t_Lam: "x"
  unfolding fresh_star_def
  by (simp_all add: fresh_Pair lam.fresh ty_fresh)

text {* Compare the two induction principles *}
thm typing.induct
thm typing.strong_induct

text {* 
  We can use the stronger induction principle by replacing
  the line

  proof (induct arbitrary: \<Gamma>2)

  with 
  
  proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)

  Try now the proof.

*}
  

lemma weakening: 
  assumes a: "\<Gamma>1 \<turnstile> t : T"
  and     b: "valid \<Gamma>2" 
  and     c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
  shows "\<Gamma>2 \<turnstile> t : T"
using a b c
proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
  case (t_Var \<Gamma>1 x T)  (* variable case *)
  have "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact 
  moreover  
  have "valid \<Gamma>2" by fact 
  moreover 
  have "(x, T)\<in> set \<Gamma>1" by fact
  ultimately show "\<Gamma>2 \<turnstile> Var x : T" by auto
next
  case (t_Lam x \<Gamma>1 T1 t T2) 
  have vc: "atom x \<sharp> \<Gamma>2" by fact   (* additional fact *)
  have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
  have a0: "atom x \<sharp> \<Gamma>1" by fact
  have a1: "valid \<Gamma>2" by fact
  have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact

  show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
qed (auto) (* app case *)


text {***************************************************************** 

  Function Definitions: Filling a Lambda-Term into a Context
  ----------------------------------------------------------

  Many functions over datatypes can be defined by recursion on the
  structure. For this purpose, Isabelle provides "fun". To use it one needs 
  to give a name for the function, its type, optionally some pretty-syntax 
  and then some equations defining the function. Like in "inductive",
  "fun" expects that more than one such equation is separated by "|".

*}

fun
  filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>" [100, 100] 100)
where
  "\<box>\<lbrakk>t\<rbrakk> = t"
| "(CAppL E t')\<lbrakk>t\<rbrakk> = App (E\<lbrakk>t\<rbrakk>) t'"
| "(CAppR t' E)\<lbrakk>t\<rbrakk> = App t' (E\<lbrakk>t\<rbrakk>)"

text {* 
  After this definition Isabelle will be able to simplify
  statements like: *}

lemma 
  shows "(CAppL \<box> (Var x))\<lbrakk>Var y\<rbrakk> = App (Var y) (Var x)"
  by simp


fun 
 ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<cdot> _" [101,100] 100)
where
  "\<box> \<cdot> E' = E'"
| "(CAppL E t') \<cdot> E' = CAppL (E \<cdot> E') t'"
| "(CAppR t' E) \<cdot> E' = CAppR t' (E \<cdot> E')"

fun
  ctx_composes :: "ctxs \<Rightarrow> ctx" ("_\<down>" [110] 110)
where
    "[]\<down> = \<box>"
  | "(E # Es)\<down> = (Es\<down>) \<cdot> E"

text {*  
  Notice that we not just have given a pretty syntax for the functions, but
  also some precedences..The numbers inside the [\<dots>] stand for the precedences
  of the arguments; the one next to it the precedence of the whole term.
  
  This means we have to write (Es1 \<cdot> Es2) \<cdot> Es3 otherwise Es1 \<cdot> Es2 \<cdot> Es3 is
  interpreted as Es1 \<cdot> (Es2 \<cdot> Es3).
*}

text {******************************************************************
  
  Structural Inductions over Contexts
  ------------------------------------

  So far we have had a look at an induction over an inductive predicate. 
  Another important induction principle is structural inductions for 
  datatypes. To illustrate structural inductions we prove some facts
  about context composition, some of which we will need later on.

*}

text {******************************************************************

  5.) EXERCISE
  ------------

  Complete the proof and remove the sorries.

*}

lemma ctx_compose:
  shows "(E1 \<cdot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
proof (induct E1)
  case Hole
  show "\<box> \<cdot> E2\<lbrakk>t\<rbrakk> = \<box>\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
next
  case (CAppL E1 t')
  have ih: "(E1 \<cdot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
  show "((CAppL E1 t') \<cdot> E2)\<lbrakk>t\<rbrakk> = (CAppL E1 t')\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
next
  case (CAppR t' E1)
  have ih: "(E1 \<cdot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
  show "((CAppR t' E1) \<cdot> E2)\<lbrakk>t\<rbrakk> = (CAppR t' E1)\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
qed

lemma neut_hole:
  shows "E \<cdot> \<box> = E"
by (induct E) (simp_all)

lemma circ_assoc:
  fixes E1 E2 E3::"ctx"
  shows "(E1 \<cdot> E2) \<cdot> E3 = E1 \<cdot> (E2 \<cdot> E3)"
by (induct E1) (simp_all)

lemma
  shows "(Es1 @ Es2)\<down> = (Es2\<down>) \<cdot> (Es1\<down>)"
proof (induct Es1)
  case Nil
  show "([] @ Es2)\<down> = Es2\<down> \<cdot> []\<down>" sorry
next
  case (Cons E Es1)
  have ih: "(Es1 @ Es2)\<down> = Es2\<down> \<cdot> Es1\<down>" by fact

  show "((E # Es1) @ Es2)\<down> = Es2\<down> \<cdot> (E # Es1)\<down>" sorry
qed


text {* 
  The last proof involves several steps of equational reasoning.
  Isar provides some convenient means to express such equational 
  reasoning in a much cleaner fashion using the "also have" 
  construction. *}

lemma 
  shows "(Es1 @ Es2)\<down> = (Es2\<down>) \<cdot> (Es1\<down>)"
proof (induct Es1)
  case Nil
  show "([] @ Es2)\<down> = Es2\<down> \<cdot> []\<down>" using neut_hole by simp
next
  case (Cons E Es1)
  have ih: "(Es1 @ Es2)\<down> = Es2\<down> \<cdot> Es1\<down>" by fact
  have "((E # Es1) @ Es2)\<down> = (Es1 @ Es2)\<down> \<cdot> E" by simp
  also have "\<dots> = (Es2\<down> \<cdot> Es1\<down>) \<cdot> E" using ih by simp
  also have "\<dots> = Es2\<down> \<cdot> (Es1\<down> \<cdot> E)" using circ_assoc by simp
  also have "\<dots> = Es2\<down> \<cdot> (E # Es1)\<down>" by simp
  finally show "((E # Es1) @ Es2)\<down> = Es2\<down> \<cdot> (E # Es1)\<down>" by simp
qed

text {******************************************************************
  
  Formalising Barendregt's Proof of the Substitution Lemma
  --------------------------------------------------------

  Barendregt's proof needs in the variable case a case distinction.
  One way to do this in Isar is to use blocks. A block is some sequent
  or reasoning steps enclosed in curly braces

  { \<dots>

    have "statement"
  }

  Such a block can contain local assumptions like

  { assume "A"
    assume "B"
    \<dots>
    have "C" by \<dots>
  }

  Where "C" is the last have-statement in this block. The behaviour 
  of such a block to the 'outside' is the implication

   \<lbrakk>A; B\<rbrakk> \<Longrightarrow> "C" 

  Now if we want to prove a property "smth" using the case-distinctions
  P\<^isub>1, P\<^isub>2 and P\<^isub>3 then we can use the following reasoning:

    { assume "P\<^isub>1"
      \<dots>
      have "smth"
    }
    moreover
    { assume "P\<^isub>2"
      \<dots>
      have "smth"
    }
    moreover
    { assume "P\<^isub>3"
      \<dots>
      have "smth"
    }
    ultimately have "smth" by blast

  The blocks establish the implications

    P\<^isub>1 \<Longrightarrow> smth
    P\<^isub>2 \<Longrightarrow> smth
    P\<^isub>3 \<Longrightarrow> smth

  If we know that P\<^isub>1, P\<^isub>2 and P\<^isub>3 cover all the cases, that is P\<^isub>1 \<or> P\<^isub>2 \<or> P\<^isub>3 is
  true, then we have 'ultimately' established the property "smth" 
  
*}

text {******************************************************************
  
  7.) Exercise
  ------------

  Fill in the cases 1.2 and 1.3 and the equational reasoning 
  in the lambda-case.
*}

lemma forget:
  shows "atom x \<sharp> t \<Longrightarrow> t[x ::= s] = t"
apply(nominal_induct t avoiding: x s rule: lam.strong_induct)
apply(auto simp add: lam.fresh fresh_at_base)
done

lemma fresh_fact:
  fixes z::"name"
  assumes a: "atom z \<sharp> s"
  and b: "z = y \<or> atom z \<sharp> t"
  shows "atom z \<sharp> t[y ::= s]"
using a b
apply (nominal_induct t avoiding: z y s rule: lam.strong_induct)
apply (auto simp add: lam.fresh fresh_at_base)
done

thm forget
thm fresh_fact

lemma 
  assumes a: "x \<noteq> y"
  and     b: "atom x \<sharp> L"
  shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]"
using a b
proof (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
  case (Var z)
  have a1: "x \<noteq> y" by fact
  have a2: "atom x \<sharp> L" by fact
  show "Var z[x::=N][y::=L] = Var z[y::=L][x::=N[y::=L]]" (is "?LHS = ?RHS")
  proof -
    { (*Case 1.1*)
      assume c1: "z=x"
      have "(1)": "?LHS = N[y::=L]" using c1 by simp
      have "(2)": "?RHS = N[y::=L]" using c1 a1 by simp
      have "?LHS = ?RHS" using "(1)" "(2)" by simp
    }
    moreover 
    { (*Case 1.2*)
      assume c2: "z = y" "z \<noteq> x" 
      
      have "?LHS = ?RHS" sorry
    }
    moreover 
    { (*Case 1.3*)
      assume c3: "z \<noteq> x" "z \<noteq> y"
      
      have "?LHS = ?RHS" sorry
    }
    ultimately show "?LHS = ?RHS" by blast
  qed
next
  case (Lam z M1) (* case 2: lambdas *)
  have ih: "\<lbrakk>x \<noteq> y; atom x \<sharp> L\<rbrakk> \<Longrightarrow> M1[x::=N][y::=L] = M1[y::=L][x::=N[y::=L]]" by fact
  have a1: "x \<noteq> y" by fact
  have a2: "atom x \<sharp> L" by fact
  have fs: "atom z \<sharp> x" "atom z \<sharp> y" "atom z \<sharp> N" "atom z \<sharp> L" by fact+
  then have b: "atom z \<sharp> N[y::=L]" by (simp add: fresh_fact)
  show "(Lam [z].M1)[x::=N][y::=L] = (Lam [z].M1)[y::=L][x::=N[y::=L]]" (is "?LHS=?RHS") 
  proof - 
    have "?LHS = \<dots>" sorry

    also have "\<dots> = ?RHS" sorry
    finally show "?LHS = ?RHS" by simp
  qed
next
  case (App M1 M2) (* case 3: applications *)
  then show "(App M1 M2)[x::=N][y::=L] = (App M1 M2)[y::=L][x::=N[y::=L]]" by simp
qed

text {* 
  Again the strong induction principle enables Isabelle to find
  the proof of the substitution lemma automatically. 
*}

lemma substitution_lemma_version:  
  assumes asm: "x \<noteq> y" "atom x \<sharp> L"
  shows "M[x::=N][y::=L] = M[y::=L][x::=N[y::=L]]"
  using asm 
by (nominal_induct M avoiding: x y N L rule: lam.strong_induct)
   (auto simp add: fresh_fact forget)

text {******************************************************************
  
  The CBV Reduction Relation (Small-Step Semantics) 
  -------------------------------------------------

  In order to establish the property that the CK Machine
  calculates a nomrmalform which corresponds to the
  evaluation relation, we introduce the call-by-value
  small-step semantics.

*}

inductive
  cbv :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<longrightarrow>cbv _" [60, 60] 60) 
where
  cbv1: "\<lbrakk>val v; atom x \<sharp> v\<rbrakk> \<Longrightarrow> App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]"
| cbv2[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t t2 \<longrightarrow>cbv App t' t2"
| cbv3[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t2 t \<longrightarrow>cbv App t2 t'"

equivariance val
equivariance cbv
nominal_inductive cbv
  avoids cbv1: "x"
  unfolding fresh_star_def
  by (simp_all add: lam.fresh Abs_fresh_iff fresh_Pair fresh_fact)

text {*
  In order to satisfy the vc-condition we have to formulate
  this relation with the additional freshness constraint
  x\<sharp>v. Though this makes the definition vc-ompatible, it
  makes the definition less useful. We can with some pain
  show that the more restricted rule is equivalent to the
  usual rule. *}

lemma subst_rename: 
  assumes a: "atom y \<sharp> t"
  shows "t[x ::= s] = ((y \<leftrightarrow> x) \<bullet>t)[y ::= s]"
using a 
apply (nominal_induct t avoiding: x y s rule: lam.strong_induct)
apply (auto simp add: lam.fresh fresh_at_base)
done

thm subst_rename

lemma better_cbv1[intro]: 
  assumes a: "val v" 
  shows "App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]"
proof -
  obtain y::"name" where fs: "atom y \<sharp> (x, t, v)" by (rule obtain_fresh)
  have "App (Lam [x].t) v = App (Lam [y].((y \<leftrightarrow> x) \<bullet> t)) v" using fs
    by (auto simp add: lam.eq_iff Abs1_eq_iff' flip_def fresh_Pair fresh_at_base)
  also have "\<dots> \<longrightarrow>cbv  ((y \<leftrightarrow> x) \<bullet> t)[y ::= v]" using fs a by (auto intro: cbv1)
  also have "\<dots> = t[x ::= v]" using fs by (simp add: subst_rename[symmetric])
  finally show "App (Lam [x].t) v \<longrightarrow>cbv t[x ::= v]" by simp
qed

text {*
  The transitive closure of the cbv-reduction relation: *}

inductive 
  "cbvs" :: "lam \<Rightarrow> lam \<Rightarrow> bool" (" _ \<longrightarrow>cbv* _" [60, 60] 60)
where
  cbvs1[intro]: "e \<longrightarrow>cbv* e"
| cbvs2[intro]: "\<lbrakk>e1\<longrightarrow>cbv e2; e2 \<longrightarrow>cbv* e3\<rbrakk> \<Longrightarrow> e1 \<longrightarrow>cbv* e3"

lemma cbvs3[intro]:
  assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3"
  shows "e1 \<longrightarrow>cbv* e3"
using a by (induct) (auto) 

text {******************************************************************
  
  8.) Exercise
  ------------

  If more simple exercises are needed, then complete the following proof. 

*}

lemma cbv_in_ctx:
  assumes a: "t \<longrightarrow>cbv t'"
  shows "E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>"
using a
proof (induct E)
  case Hole
  have "t \<longrightarrow>cbv t'" by fact
  then show "\<box>\<lbrakk>t\<rbrakk> \<longrightarrow>cbv \<box>\<lbrakk>t'\<rbrakk>" sorry
next
  case (CAppL E s)
  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
  have a: "t \<longrightarrow>cbv t'" by fact
  show "(CAppL E s)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppL E s)\<lbrakk>t'\<rbrakk>" sorry
next
  case (CAppR s E)
  have ih: "t \<longrightarrow>cbv t' \<Longrightarrow> E\<lbrakk>t\<rbrakk> \<longrightarrow>cbv E\<lbrakk>t'\<rbrakk>" by fact
  have a: "t \<longrightarrow>cbv t'" by fact
  show "(CAppR s E)\<lbrakk>t\<rbrakk> \<longrightarrow>cbv (CAppR s E)\<lbrakk>t'\<rbrakk>" sorry
qed

  
text {******************************************************************
  
  9.) Exercise
  ------------

  The point of the cbv-reduction was that we can easily relatively 
  establish the follwoing property:

*}

lemma machine_implies_cbvs_ctx:
  assumes a: "<e, Es> \<mapsto> <e', Es'>"
  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
using a 
proof (induct)
  case (m1 t1 t2 Es)

  show "Es\<down>\<lbrakk>App t1 t2\<rbrakk> \<longrightarrow>cbv* ((CAppL \<box> t2) # Es)\<down>\<lbrakk>t1\<rbrakk>"  sorry
next
  case (m2 v t2 Es)
  have "val v" by fact

  show "((CAppL \<box> t2) # Es)\<down>\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (CAppR v \<box> # Es)\<down>\<lbrakk>t2\<rbrakk>" sorry
next
  case (m3 v x t Es)
  have "val v" by fact
 
  show "(((CAppR (Lam [x].t) \<box>) # Es)\<down>)\<lbrakk>v\<rbrakk> \<longrightarrow>cbv* (Es\<down>)\<lbrakk>(t[x ::= v])\<rbrakk>" sorry
qed

text {* 
  It is not difficult to extend the lemma above to
  arbitrary reductions sequences of the CK machine. *}

lemma machines_implies_cbvs_ctx:
  assumes a: "<e, Es> \<mapsto>* <e', Es'>"
  shows "(Es\<down>)\<lbrakk>e\<rbrakk> \<longrightarrow>cbv* (Es'\<down>)\<lbrakk>e'\<rbrakk>"
using a 
by (induct) (auto dest: machine_implies_cbvs_ctx)

text {* 
  So whenever we let the CL machine start in an initial
  state and it arrives at a final state, then there exists
  a corresponding cbv-reduction sequence. *}

corollary machines_implies_cbvs:
  assumes a: "<e, []> \<mapsto>* <e', []>"
  shows "e \<longrightarrow>cbv* e'"
using a by (auto dest: machines_implies_cbvs_ctx)

text {*
  We now want to relate the cbv-reduction to the evaluation
  relation. For this we need two auxiliary lemmas. *}

lemma eval_val:
  assumes a: "val t"
  shows "t \<Down> t"
using a by (induct) (auto)

lemma e_App_elim:
  assumes a: "App t1 t2 \<Down> v"
  shows "\<exists>x t v'. t1 \<Down> Lam [x].t \<and> t2 \<Down> v' \<and> t[x::=v'] \<Down> v"
using a by (cases) (auto simp add: lam.eq_iff lam.distinct) 

text {******************************************************************
  
  10.) Exercise
  -------------

  Complete the first case in the proof below. 

*}

lemma cbv_eval:
  assumes a: "t1 \<longrightarrow>cbv t2" "t2 \<Down> t3"
  shows "t1 \<Down> t3"
using a
proof(induct arbitrary: t3)
  case (cbv1 v x t t3)
  have a1: "val v" by fact
  have a2: "t[x ::= v] \<Down> t3" by fact

  show "App (Lam [x].t) v \<Down> t3" sorry
next
  case (cbv2 t t' t2 t3)
  have ih: "\<And>t3. t' \<Down> t3 \<Longrightarrow> t \<Down> t3" by fact
  have "App t' t2 \<Down> t3" by fact
  then obtain x t'' v' 
    where a1: "t' \<Down> Lam [x].t''" 
      and a2: "t2 \<Down> v'" 
      and a3: "t''[x ::= v'] \<Down> t3" using e_App_elim by blast
  have "t \<Down>  Lam [x].t''" using ih a1 by auto 
  then show "App t t2 \<Down> t3" using a2 a3 by auto
qed (auto dest!: e_App_elim)


text {* 
  Next we extend the lemma above to arbitray initial
  sequences of cbv-reductions. *}

lemma cbvs_eval:
  assumes a: "t1 \<longrightarrow>cbv* t2" "t2 \<Down> t3"
  shows "t1 \<Down> t3"
using a by (induct) (auto intro: cbv_eval)

text {* 
  Finally, we can show that if from a term t we reach a value 
  by a cbv-reduction sequence, then t evaluates to this value. *}

lemma cbvs_implies_eval:
  assumes a: "t \<longrightarrow>cbv* v" "val v"
  shows "t \<Down> v"
using a
by (induct) (auto intro: eval_val cbvs_eval)

text {* 
  All facts tied together give us the desired property about
  K machines. *}

theorem machines_implies_eval:
  assumes a: "<t1, []> \<mapsto>* <t2, []>" 
  and     b: "val t2" 
  shows "t1 \<Down> t2"
proof -
  have "t1 \<longrightarrow>cbv* t2" using a by (simp add: machines_implies_cbvs)
  then show "t1 \<Down> t2" using b by (simp add: cbvs_implies_eval)
qed

lemma valid_elim:
  assumes a: "valid ((x, T) # \<Gamma>)"
  shows "atom x \<sharp> \<Gamma> \<and> valid \<Gamma>"
using a by (cases) (auto)

lemma valid_insert:
  assumes a: "valid (\<Delta> @ [(x, T)] @ \<Gamma>)"
  shows "valid (\<Delta> @ \<Gamma>)" 
using a
by (induct \<Delta>)
   (auto simp add: fresh_append fresh_Cons dest!: valid_elim)

lemma fresh_list: 
  shows "atom y \<sharp> xs = (\<forall>x \<in> set xs. atom y \<sharp> x)"
by (induct xs) (simp_all add: fresh_Nil fresh_Cons)

lemma context_unique:
  assumes a1: "valid \<Gamma>"
  and     a2: "(x, T) \<in> set \<Gamma>"
  and     a3: "(x, U) \<in> set \<Gamma>"
  shows "T = U" 
using a1 a2 a3
by (induct) (auto simp add: fresh_list fresh_Pair fresh_at_base)

lemma type_substitution_aux:
  assumes a: "(\<Delta> @ [(x, T')] @ \<Gamma>) \<turnstile> e : T"
  and     b: "\<Gamma> \<turnstile> e' : T'"
  shows "(\<Delta> @ \<Gamma>) \<turnstile> e[x ::= e'] : T" 
using a b 
proof (nominal_induct \<Gamma>'\<equiv>"\<Delta> @ [(x, T')] @ \<Gamma>" e T avoiding: x e' \<Delta> rule: typing.strong_induct)
  case (t_Var y T x e' \<Delta>)
  have a1: "valid (\<Delta> @ [(x, T')] @ \<Gamma>)" by fact
  have a2: "(y,T) \<in> set (\<Delta> @ [(x, T')] @ \<Gamma>)" by fact 
  have a3: "\<Gamma> \<turnstile> e' : T'" by fact
  from a1 have a4: "valid (\<Delta> @ \<Gamma>)" by (rule valid_insert)
  { assume eq: "x = y"
    from a1 a2 have "T = T'" using eq by (auto intro: context_unique)
    with a3 have "\<Delta> @ \<Gamma> \<turnstile> Var y[x::=e'] : T" using eq a4 by (auto intro: weakening)
  }
  moreover
  { assume ineq: "x \<noteq> y"
    from a2 have "(y, T) \<in> set (\<Delta> @ \<Gamma>)" using ineq by simp
    then have "\<Delta> @ \<Gamma> \<turnstile> Var y[x::=e'] : T" using ineq a4 by auto
  }
  ultimately show "\<Delta> @ \<Gamma> \<turnstile> Var y[x::=e'] : T" by blast
qed (force simp add: fresh_append fresh_Cons)+

corollary type_substitution:
  assumes a: "(x,T') # \<Gamma> \<turnstile> e : T"
  and     b: "\<Gamma> \<turnstile> e' : T'"
  shows "\<Gamma> \<turnstile> e[x::=e'] : T"
using a b type_substitution_aux[where \<Delta>="[]"]
by (auto)

lemma t_App_elim:
  assumes a: "\<Gamma> \<turnstile> App t1 t2 : T"
  shows "\<exists>T'. \<Gamma> \<turnstile> t1 : T' \<rightarrow> T \<and> \<Gamma> \<turnstile> t2 : T'"
using a
by (cases) (auto simp add: lam.eq_iff lam.distinct)

lemma t_Lam_elim:
  assumes ty: "\<Gamma> \<turnstile> Lam [x].t : T" 
  and     fc: "atom x \<sharp> \<Gamma>" 
  shows "\<exists>T1 T2. T = T1 \<rightarrow> T2 \<and> (x, T1) # \<Gamma> \<turnstile> t : T2"
using ty fc
apply(cases)
apply(auto simp add: lam.eq_iff lam.distinct ty.eq_iff)
apply(auto simp add: Abs1_eq_iff)
apply(rule_tac p="(x \<leftrightarrow> xa)" in permute_boolE)
apply(perm_simp)
apply(simp add: flip_def swap_fresh_fresh ty_fresh)
done

theorem cbv_type_preservation:
  assumes a: "t \<longrightarrow>cbv t'"
  and     b: "\<Gamma> \<turnstile> t : T" 
  shows "\<Gamma> \<turnstile> t' : T"
using a b
by (nominal_induct avoiding: \<Gamma> T rule: cbv.strong_induct)
   (auto dest!: t_Lam_elim t_App_elim simp add: type_substitution ty.eq_iff)

corollary cbvs_type_preservation:
  assumes a: "t \<longrightarrow>cbv* t'"
  and     b: "\<Gamma> \<turnstile> t : T" 
  shows "\<Gamma> \<turnstile> t' : T"
using a b
by (induct) (auto intro: cbv_type_preservation)

text {* 
  The Type-Preservation Property for the Machine and Evaluation Relation. *}

theorem machine_type_preservation:
  assumes a: "<t, []> \<mapsto>* <t', []>"
  and     b: "\<Gamma> \<turnstile> t : T" 
  shows "\<Gamma> \<turnstile> t' : T"
proof -
  from a have "t \<longrightarrow>cbv* t'" by (simp add: machines_implies_cbvs)
  then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: cbvs_type_preservation)
qed

theorem eval_type_preservation:
  assumes a: "t \<Down> t'"
  and     b: "\<Gamma> \<turnstile> t : T" 
  shows "\<Gamma> \<turnstile> t' : T"
proof -
  from a have "<t, []> \<mapsto>* <t', []>" by (simp add: eval_implies_machines)
  then show "\<Gamma> \<turnstile> t' : T" using b by (simp add: machine_type_preservation)
qed

text {* The Progress Property *}

lemma canonical_tArr:
  assumes a: "[] \<turnstile> t : T1 \<rightarrow> T2"
  and     b: "val t"
  shows "\<exists>x t'. t = Lam [x].t'"
using b a by (induct) (auto) 

theorem progress:
  assumes a: "[] \<turnstile> t : T"
  shows "(\<exists>t'. t \<longrightarrow>cbv t') \<or> (val t)"
using a
by (induct \<Gamma>\<equiv>"[]::ty_ctx" t T)
   (auto intro: cbv.intros dest!: canonical_tArr)



end