--- a/Tutorial/Tutorial1.thy Sun Jan 23 07:15:59 2011 +0900
+++ b/Tutorial/Tutorial1.thy Sun Jan 23 07:17:35 2011 +0900
@@ -1,4 +1,3 @@
-
header {*
Nominal Isabelle Tutorial at POPL'11
@@ -183,7 +182,7 @@
text {*
In this section we want to define inductively the evaluation
- relation for lambda terms.
+ relation and for cbv-reduction relation.
Inductive definitions in Isabelle start with the keyword "inductive"
and a predicate name. One can optionally provide a type for the predicate.
@@ -213,10 +212,8 @@
e_Lam[intro]: "Lam [x].t \<Down> Lam [x].t"
| e_App[intro]: "\<lbrakk>t1 \<Down> Lam [x].t; t2 \<Down> v'; t[x::=v'] \<Down> v\<rbrakk> \<Longrightarrow> App t1 t2 \<Down> v"
-
text {*
- Values are also defined using inductive. In our case values
- are just lambda-abstractions.
+ Values and cbv are also defined using inductive.
*}
inductive
@@ -224,7 +221,6 @@
where
v_Lam[intro]: "val (Lam [x].t)"
-
section {* Theorems *}
text {*
@@ -362,6 +358,7 @@
Two very simple example proofs are as follows.
*}
+subsection {* EXERCISE 1 *}
lemma eval_val:
assumes a: "val t"
@@ -372,7 +369,11 @@
show "Lam [x]. t \<Down> Lam [x]. t" sorry
qed
-lemma eavl_to_val:
+subsection {* EXERCISE 2 *}
+
+text {* Fill the gaps in the proof below. *}
+
+lemma eval_to_val:
assumes a: "t \<Down> t'"
shows "val t'"
using a
@@ -381,16 +382,96 @@
show "val (Lam [x].t)" sorry
next
case (e_App t1 x t t2 v v')
+ -- {* all assumptions in this case *}
have "t1 \<Down> Lam [x].t" by fact
have ih1: "val (Lam [x]. t)" by fact
have "t2 \<Down> v" by fact
have ih2: "val v" by fact
have "t [x ::= v] \<Down> v'" by fact
have ih3: "val v'" by fact
+
show "val v'" sorry
qed
+
+
+section {* Datatypes: Evaluation Contexts*}
+
+text {*
+ Datatypes can be defined in Isabelle as follows: we have to provide the name
+ of the datatype and a list its type-constructors. Each type-constructor needs
+ to have the information about the types of its arguments, and optionally
+ can also contain some information about pretty syntax. For example, we like
+ to write "\<box>" for holes.
+*}
-
+datatype ctx =
+ Hole ("\<box>")
+ | CAppL "ctx" "lam"
+ | CAppR "lam" "ctx"
+
+text {* Now Isabelle knows about: *}
+
+typ ctx
+term "\<box>"
+term "CAppL"
+term "CAppL \<box> (Var x)"
+
+subsection {* MINI EXERCISE *}
+
+text {*
+ Try and see what happens if you apply a Hole to a Hole?
+*}
+
+
+section {* Machines for Evaluation *}
+
+type_synonym ctxs = "ctx list"
+
+inductive
+ machine :: "lam \<Rightarrow> ctxs \<Rightarrow>lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto> <_,_>" [60, 60, 60, 60] 60)
+where
+ m1: "<App t1 t2, Es> \<mapsto> <t1, (CAppL \<box> t2) # Es>"
+| m2: "val v \<Longrightarrow> <v, (CAppL \<box> t2) # Es> \<mapsto> <t2, (CAppR v \<box>) # Es>"
+| m3: "val v \<Longrightarrow> <v, (CAppR (Lam [x].t) \<box>) # Es> \<mapsto> <t[x ::= v], Es>"
+
+text {*
+ Since the machine defined above only performs a single reduction,
+ we need to define the transitive closure of this machine. *}
+
+inductive
+ machines :: "lam \<Rightarrow> ctxs \<Rightarrow> lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto>* <_,_>" [60, 60, 60, 60] 60)
+where
+ ms1: "<t,Es> \<mapsto>* <t,Es>"
+| ms2: "\<lbrakk><t1,Es1> \<mapsto> <t2,Es2>; <t2,Es2> \<mapsto>* <t3,Es3>\<rbrakk> \<Longrightarrow> <t1,Es1> \<mapsto>* <t3,Es3>"
+
+declare machine.intros[intro] machines.intros[intro]
+
+section {* EXERCISE 3 *}
+
+text {*
+ We need to show that the machines-relation is transitive.
+ Fill in the gaps below.
+*}
+
+lemma ms3[intro]:
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b
+proof(induct)
+ case (ms1 e1 Es1)
+ have c: "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
+
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" sorry
+next
+ case (ms2 e1 Es1 e2 Es2 e2' Es2')
+ have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
+ have d1: "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
+ have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
+
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" sorry
+qed
+
text {*
Just like gotos in the Basic programming language, labels often reduce
the readability of proofs. Therefore one can use in Isar the notation
@@ -398,7 +479,22 @@
the next have-statement. This is used in teh second case below.
*}
-
+lemma
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b
+proof(induct)
+ case (ms1 e1 Es1)
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
+next
+ case (ms2 e1 Es1 e2 Es2 e2' Es2')
+ have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
+ have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
+ then have d3: "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
+ have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" using d2 d3 by auto
+qed
text {*
The label ih2 cannot be got rid of in this way, because it is used
@@ -418,108 +514,6 @@
"statement". With this we can simplify our proof further to:
*}
-
-text {*
- While automatic proof procedures in Isabelle are not able to prove statements
- like "P = NP" assuming usual definitions for P and NP, they can automatically
- discharge the lemmas we just proved. For this we only have to set up the induction
- and auto will take care of the rest. This means we can write:
-*}
-
-
-
-
-section {* Datatypes: Evaluation Contexts *}
-
-text {*
-
- Datatypes can be defined in Isabelle as follows: we have to provide the name
- of the datatype and list its type-constructors. Each type-constructor needs
- to have the information about the types of its arguments, and optionally
- can also contain some information about pretty syntax. For example, we like
- to write "\<box>" for holes.
-*}
-
-datatype ctx =
- Hole ("\<box>")
- | CAppL "ctx" "lam"
- | CAppR "lam" "ctx"
-
-text {* Now Isabelle knows about: *}
-
-typ ctx
-term "\<box>"
-term "CAppL"
-term "CAppL \<box> (Var x)"
-
-subsection {* MINI EXERCISE *}
-
-text {*
- Try and see what happens if you apply a Hole to a Hole?
-*}
-
-type_synonym ctxs = "ctx list"
-
-inductive
- machine :: "lam \<Rightarrow> ctxs \<Rightarrow>lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto> <_,_>" [60, 60, 60, 60] 60)
-where
- m1[intro]: "<App t1 t2,Es> \<mapsto> <t1,(CAppL \<box> t2) # Es>"
-| m2[intro]: "val v \<Longrightarrow> <v,(CAppL \<box> t2) # Es> \<mapsto> <t2,(CAppR v \<box>) # Es>"
-| m3[intro]: "val v \<Longrightarrow> <v,(CAppR (Lam [x].t) \<box>) # Es> \<mapsto> <t[x ::= v],Es>"
-
-
-text {*
- Since the machine defined above only performs a single reduction,
- we need to define the transitive closure of this machine. *}
-
-inductive
- machines :: "lam \<Rightarrow> ctxs \<Rightarrow> lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto>* <_,_>" [60, 60, 60, 60] 60)
-where
- ms1[intro]: "<t,Es> \<mapsto>* <t,Es>"
-| ms2[intro]: "\<lbrakk><t1,Es1> \<mapsto> <t2,Es2>; <t2,Es2> \<mapsto>* <t3,Es3>\<rbrakk> \<Longrightarrow> <t1,Es1> \<mapsto>* <t3,Es3>"
-
-lemma
- assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
- and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
- shows "<e1, Es1> \<mapsto>* <e3, Es3>"
-using a b
-proof(induct)
- case (ms1 e1 Es1)
- have c: "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
- then show "<e1, Es1> \<mapsto>* <e3, Es3>" by simp
-next
- case (ms2 e1 Es1 e2 Es2 e2' Es2')
- have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
- have d1: "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
- have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
- show "<e1, Es1> \<mapsto>* <e3, Es3>" using d1 d2 ih by blast
-qed
-
-text {*
- Just like gotos in the Basic programming language, labels can reduce
- the readability of proofs. Therefore one can use in Isar the notation
- "then have" in order to feed a have-statement to the proof of
- the next have-statement. In the proof below this has been used
- in order to get rid of the labels c and d1.
-*}
-
-lemma
- assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
- and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
- shows "<e1, Es1> \<mapsto>* <e3, Es3>"
-using a b
-proof(induct)
- case (ms1 e1 Es1)
- show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
-next
- case (ms2 e1 Es1 e2 Es2 e2' Es2')
- have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
- have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
- then have d3: "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
- have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
- show "<e1, Es1> \<mapsto>* <e3, Es3>" using d2 d3 by auto
-qed
-
lemma
assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
@@ -539,16 +533,36 @@
qed
-lemma ms3[intro]:
+text {*
+ While automatic proof procedures in Isabelle are not able to prove statements
+ like "P = NP" assuming usual definitions for P and NP, they can automatically
+ discharge the lemmas we just proved. For this we only have to set up the induction
+ and auto will take care of the rest. This means we can write:
+*}
+
+lemma
+ assumes a: "val t"
+ shows "t \<Down> t"
+using a by (induct) (auto)
+
+lemma
+ assumes a: "t \<Down> t'"
+ shows "val t'"
+using a by (induct) (auto)
+
+lemma
assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
shows "<e1, Es1> \<mapsto>* <e3, Es3>"
using a b by (induct) (auto)
-lemma eval_to_val: (* fixme: done above *)
- assumes a: "t \<Down> t'"
- shows "val t'"
-using a by (induct) (auto)
+
+section {* EXERCISE 4 *}
+
+text {*
+ The point of the machine is that it simulates the evaluation
+ relation. Therefore we like to prove the following:
+*}
theorem
assumes a: "t \<Down> t'"
@@ -567,10 +581,12 @@
have ih2: "<t2, []> \<mapsto>* <v', []>" by fact
have a3: "t[x::=v'] \<Down> v" by fact
have ih3: "<t[x::=v'], []> \<mapsto>* <v, []>" by fact
+
-- {* your reasoning *}
show "<App t1 t2, []> \<mapsto>* <v, []>" sorry
qed
+
text {*
Again the automatic tools in Isabelle can discharge automatically
of the routine work in these proofs. We can write:
@@ -589,7 +605,6 @@
using a eval_implies_machines_ctx by simp
-
section {* Function Definitions: Filling a Lambda-Term into a Context *}
text {*
@@ -607,6 +622,7 @@
| "(CAppL E t')\<lbrakk>t\<rbrakk> = App (E\<lbrakk>t\<rbrakk>) t'"
| "(CAppR t' E)\<lbrakk>t\<rbrakk> = App t' (E\<lbrakk>t\<rbrakk>)"
+
text {*
After this definition Isabelle will be able to simplify
statements like:
@@ -617,7 +633,7 @@
by simp
fun
- ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" ("_ \<odot> _" [99,98] 99)
+ ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" (infixr "\<odot>" 99)
where
"\<box> \<odot> E' = E'"
| "(CAppL E t') \<odot> E' = CAppL (E \<odot> E') t'"
@@ -666,11 +682,19 @@
show "((CAppR t' E1) \<odot> E2)\<lbrakk>t\<rbrakk> = (CAppR t' E1)\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" sorry
qed
+
+subsection {* EXERCISE 6 *}
+
+text {*
+ Remove the sorries in the ctx_append proof below. You can make
+ use of the following two properties.
+*}
+
lemma neut_hole:
shows "E \<odot> \<box> = E"
by (induct E) (simp_all)
-lemma odot_assoc: (* fixme call compose *)
+lemma compose_assoc:
shows "(E1 \<odot> E2) \<odot> E3 = E1 \<odot> (E2 \<odot> E3)"
by (induct E1) (simp_all)
@@ -686,7 +710,6 @@
show "((E # Es1) @ Es2)\<down> = Es2\<down> \<odot> (E # Es1)\<down>" sorry
qed
-
text {*
The last proof involves several steps of equational reasoning.
Isar provides some convenient means to express such equational
@@ -694,7 +717,6 @@
construction.
*}
-
lemma
shows "(Es1 @ Es2)\<down> = (Es2\<down>) \<odot> (Es1\<down>)"
proof (induct Es1)
@@ -703,14 +725,14 @@
next
case (Cons E Es1)
have ih: "(Es1 @ Es2)\<down> = Es2\<down> \<odot> Es1\<down>" by fact
- have "((E # Es1) @ Es2)\<down> = (Es1 @ Es2)\<down> \<odot> E" by simp
+ have "((E # Es1) @ Es2)\<down> = (E # (Es1 @ Es2))\<down>" by simp
+ also have "\<dots> = (Es1 @ Es2)\<down> \<odot> E" by simp
also have "\<dots> = (Es2\<down> \<odot> Es1\<down>) \<odot> E" using ih by simp
- also have "\<dots> = Es2\<down> \<odot> (Es1\<down> \<odot> E)" using odot_assoc by simp
+ also have "\<dots> = Es2\<down> \<odot> (Es1\<down> \<odot> E)" using compose_assoc by simp
also have "\<dots> = Es2\<down> \<odot> (E # Es1)\<down>" by simp
finally show "((E # Es1) @ Es2)\<down> = Es2\<down> \<odot> (E # Es1)\<down>" by simp
qed
-
end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/Tutorial/Tutorial1s.thy Sun Jan 23 07:17:35 2011 +0900
@@ -0,0 +1,752 @@
+
+header {*
+
+ Nominal Isabelle Tutorial at POPL'11
+ ====================================
+
+ Nominal Isabelle is a definitional extension of Isabelle/HOL, which
+ means it does not add any new axioms to higher-order logic. It just
+ adds new definitions and an infrastructure for 'nominal resoning'.
+
+
+ The jEdit Interface
+ -------------------
+
+ The Isabelle theorem prover comes with an interface for jEdit.
+ Unlike many other theorem prover interfaces (e.g. ProofGeneral) where you
+ advance a 'checked' region in a proof script, this interface immediately
+ checks the whole buffer. The text you type is also immediately checked.
+ Malformed text or unfinished proofs are highlighted in red with a little
+ red 'stop' signal on the left-hand side. If you drag the 'red-box' cursor
+ over a line, the Output window gives further feedback.
+
+ Note: If a section is not parsed correctly, the work-around is to cut it
+ out and paste it back into the text (cut-out: Ctrl + X; paste in: Ctrl + V;
+ Cmd is Ctrl on the Mac)
+
+ Nominal Isabelle and jEdit can be started by typing on the command line
+
+ isabelle jedit -l HOL-Nominal2
+ isabelle jedit -l HOL-Nominal2 A.thy B.thy ...
+
+
+ Symbols
+ -------
+
+ Symbols can considerably improve the readability of your statements and proofs.
+ They can be input by just typing 'name-of-symbol' where 'name-of-symbol' is the
+ usual latex name of that symbol. A little window will then appear in which
+ you can select the symbol. With `Escape' you can ignore any suggestion.
+
+ There are some handy short-cuts for frequently used symbols.
+ For example
+
+ short-cut symbol
+
+ => \<Rightarrow>
+ ==> \<Longrightarrow>
+ --> \<longrightarrow>
+ ! \<forall>
+ ? \<exists>
+ /\ \<and>
+ \/ \<or>
+ ~ \<not>
+ ~= \<noteq>
+ : \<in>
+ ~: \<notin>
+
+ For nominal the following two symbols have a special meaning
+
+ \<sharp> sharp (freshness)
+ \<bullet> bullet (permutation application)
+*}
+
+theory Tutorial1s
+imports Lambda
+begin
+
+section {* Theories *}
+
+text {*
+ All formal developments in Isabelle are part of a theory. A theory
+ needs to have a name and must import some pre-existing theory. The
+ imported theory will normally be Nominal2 (which provides many useful
+ concepts like set-theory, lists, nominal things etc). For the purpose
+ of this tutorial we import the theory Lambda.thy, which contains
+ already some useful definitions for (alpha-equated) lambda terms.
+*}
+
+
+
+section {* Types *}
+
+text {*
+ Isabelle is based on simple types including some polymorphism. It also
+ includes overloading, which means that sometimes explicit type annotations
+ need to be given.
+
+ - Base types include: nat, bool, string, lam (defined in the Lambda theory)
+
+ - Type formers include: 'a list, ('a \<times> 'b), 'c set
+
+ - Type variables are written like in ML with an apostrophe: 'a, 'b, \<dots>
+
+ Types known to Isabelle can be queried using the command "typ".
+*}
+
+typ nat
+typ bool
+typ string
+typ lam -- {* alpha-equated lambda terms defined in Lambda.thy *}
+typ name -- {* type of (object) variables defined in Lambda.thy *}
+typ "('a \<times> 'b)" -- {* pair type *}
+typ "'c set" -- {* set type *}
+typ "'a list" -- {* list type *}
+typ "lam \<Rightarrow> nat" -- {* type of functions from lambda terms to natural numbers *}
+
+
+text {* Some malformed types - note the "stop" signal on the left margin *}
+
+(*
+typ boolean -- {* undeclared type *}
+typ set -- {* type argument missing *}
+*)
+
+
+section {* Terms *}
+
+text {*
+ Every term in Isabelle needs to be well-typed. However they can have
+ polymorphic type. Whether a term is accepted can be queried using
+ the command "term".
+*}
+
+term c -- {* a variable of polymorphic type *}
+term "1::nat" -- {* the constant 1 in natural numbers *}
+term 1 -- {* the constant 1 with polymorphic type *}
+term "{1, 2, 3::nat}" -- {* the set containing natural numbers 1, 2 and 3 *}
+term "[1, 2, 3]" -- {* the list containing the polymorphic numbers 1, 2 and 3 *}
+term "(True, ''c'')" -- {* a pair consisting of the boolean true and the string "c" *}
+term "Suc 0" -- {* successor of 0, in other words 1::nat *}
+term "Lam [x].Var x" -- {* a lambda-term *}
+term "App t1 t2" -- {* another lambda-term *}
+term "x::name" -- {* an (object) variable of type name *}
+term "atom (x::name)" -- {* atom is an overloded function *}
+
+text {*
+ Lam [x].Var is the syntax we made up for lambda abstractions. If you
+ prefer, you can have your own syntax (but \<lambda> is already taken up for
+ Isabelle's functions). We also came up with the type "name" for variables.
+ You can introduce your own types of object variables using the
+ command atom_decl:
+*}
+
+atom_decl ident
+atom_decl ty_var
+
+text {*
+ Isabelle provides some useful colour feedback about its constants (black),
+ free variables (blue) and bound variables (green).
+*}
+
+term "True" -- {* a constant defined somewhere...written in black *}
+term "true" -- {* not recognised as a constant, therefore it is interpreted
+ as a free variable, written in blue *}
+term "\<forall>x. P x" -- {* x is bound (green), P is free (blue) *}
+
+
+text {* Formulae in Isabelle/HOL are terms of type bool *}
+
+term "True"
+term "True \<and> False"
+term "True \<or> B"
+term "{1,2,3} = {3,2,1}"
+term "\<forall>x. P x"
+term "A \<longrightarrow> B"
+term "atom a \<sharp> t" -- {* freshness in Nominal *}
+
+text {*
+ When working with Isabelle, one deals with an object logic (that is HOL) and
+ Isabelle's rule framework (called Pure). Occasionally one has to pay attention
+ to this fact. But for the moment we ignore this completely.
+*}
+
+term "A \<longrightarrow> B" -- {* versus *}
+term "A \<Longrightarrow> B"
+
+term "\<forall>x. P x" -- {* versus *}
+term "\<And>x. P x"
+
+
+section {* Inductive Definitions: Evaluation Relation *}
+
+text {*
+ In this section we want to define inductively the evaluation
+ relation and for cbv-reduction relation.
+
+ Inductive definitions in Isabelle start with the keyword "inductive"
+ and a predicate name. One can optionally provide a type for the predicate.
+ Clauses of the inductive predicate are introduced by "where" and more than
+ two clauses need to be separated by "|". One can also give a name to each
+ clause and indicate that it should be added to the hints database ("[intro]").
+ A typical clause has some premises and a conclusion. This is written in
+ Isabelle as:
+
+ "premise \<Longrightarrow> conclusion"
+ "premise1 \<Longrightarrow> premise2 \<Longrightarrow> \<dots> premisen \<Longrightarrow> conclusion"
+
+ There is an alternative way of writing the latter clause as
+
+ "\<lbrakk>premise1; premise2; \<dots> premisen\<rbrakk> \<Longrightarrow> conclusion"
+
+ If no premise is present, then one just writes
+
+ "conclusion"
+
+ Below we give two definitions for the transitive closure
+*}
+
+inductive
+ eval :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<Down> _" [60, 60] 60)
+where
+ e_Lam[intro]: "Lam [x].t \<Down> Lam [x].t"
+| e_App[intro]: "\<lbrakk>t1 \<Down> Lam [x].t; t2 \<Down> v'; t[x::=v'] \<Down> v\<rbrakk> \<Longrightarrow> App t1 t2 \<Down> v"
+
+text {*
+ Values and cbv are also defined using inductive.
+*}
+
+inductive
+ val :: "lam \<Rightarrow> bool"
+where
+ v_Lam[intro]: "val (Lam [x].t)"
+
+section {* Theorems *}
+
+text {*
+ A central concept in Isabelle is that of theorems. Isabelle's theorem
+ database can be queried using
+*}
+
+thm e_App
+thm e_Lam
+thm conjI
+thm conjunct1
+
+text {*
+ Notice that theorems usually contain schematic variables (e.g. ?P, ?Q, \<dots>).
+ These schematic variables can be substituted with any term (of the right type
+ of course).
+
+ When defining the predicates beta_star, Isabelle provides us automatically
+ with the following theorems that state how they can be introduced and what
+ constitutes an induction over them.
+*}
+
+thm eval.intros
+thm eval.induct
+
+
+section {* Lemmas / Theorems / Corollaries *}
+
+text {*
+ Whether to use lemma, theorem or corollary makes no semantic difference
+ in Isabelle.
+
+ A lemma starts with "lemma" and consists of a statement ("shows \<dots>") and
+ optionally a lemma name, some type-information for variables ("fixes \<dots>")
+ and some assumptions ("assumes \<dots>").
+
+ Lemmas also need to have a proof, but ignore this 'detail' for the moment.
+
+ Examples are
+*}
+
+lemma alpha_equ:
+ shows "Lam [x].Var x = Lam [y].Var y"
+ by (simp add: lam.eq_iff Abs1_eq_iff lam.fresh fresh_at_base)
+
+lemma Lam_freshness:
+ assumes a: "atom y \<sharp> Lam [x].t"
+ shows "(y = x) \<or> (y \<noteq> x \<and> atom y \<sharp> t)"
+ using a by (auto simp add: lam.fresh Abs_fresh_iff)
+
+lemma neutral_element:
+ fixes x::"nat"
+ shows "x + 0 = x"
+ by simp
+
+text {*
+ Note that in the last statement, the explicit type annotation is important
+ in order for Isabelle to be able to figure out what 0 stands for (e.g. a
+ natural number, a vector, etc) and which lemmas to apply.
+*}
+
+
+section {* Isar Proofs *}
+
+text {*
+ Isar is a language for writing formal proofs that can be understood by humans
+ and by Isabelle. An Isar proof can be thought of as a sequence of 'stepping stones'
+ that start with some assumptions and lead to the goal to be established. Every such
+ stepping stone is introduced by "have" followed by the statement of the stepping
+ stone. An exception is the goal to be proved, which need to be introduced with "show".
+
+ have "statement" \<dots>
+ show "goal_to_be_proved" \<dots>
+
+ Since proofs usually do not proceed in a linear fashion, labels can be given
+ to every stepping stone and they can be used later to explicitly refer to this
+ corresponding stepping stone ("using").
+
+ have label: "statement1" \<dots>
+ \<dots>
+ have "later_statement" using label \<dots>
+
+ Each stepping stone (or have-statement) needs to have a justification. The
+ simplest justification is "sorry" which admits any stepping stone, even false
+ ones (this is good during the development of proofs).
+
+ have "outrageous_false_statement" sorry
+
+ Assumptions can be 'justified' using "by fact".
+
+ have "assumption" by fact
+
+ Derived facts can be justified using
+
+ have "statement" by simp -- simplification
+ have "statement" by auto -- proof search and simplification
+ have "statement" by blast -- only proof search
+
+ If facts or lemmas are needed in order to justify a have-statement, then
+ one can feed these facts into the proof by using "using label \<dots>" or
+ "using theorem-name \<dots>". More than one label at a time is allowed. For
+ example
+
+ have "statement" using label1 label2 theorem_name by auto
+
+ Induction proofs in Isar are set up by indicating over which predicate(s)
+ the induction proceeds ("using a b") followed by the command "proof (induct)".
+ In this way, Isabelle uses defaults for which induction should be performed.
+ These defaults can be overridden by giving more information, like the variable
+ over which a structural induction should proceed, or a specific induction principle,
+ such as well-founded inductions.
+
+ After the induction is set up, the proof proceeds by cases. In Isar these
+ cases can be given in any order. Most commonly they are started with "case" and the
+ name of the case, and optionally some legible names for the variables used inside
+ the case.
+
+ In each "case", we need to establish a statement introduced by "show". Once
+ this has been done, the next case can be started using "next". When all cases
+ are completed, the proof can be finished using "qed".
+
+ This means a typical induction proof has the following pattern
+
+ proof (induct)
+ case \<dots>
+ \<dots>
+ show \<dots>
+ next
+ case \<dots>
+ \<dots>
+ show \<dots>
+ \<dots>
+ qed
+
+ Two very simple example proofs are as follows.
+*}
+
+subsection {* EXERCISE 1 *}
+
+lemma eval_val:
+ assumes a: "val t"
+ shows "t \<Down> t"
+using a
+proof (induct)
+ case (v_Lam x t)
+ show "Lam [x]. t \<Down> Lam [x]. t" by auto
+qed
+
+subsection {* EXERCISE 2 *}
+
+text {* Fill the gaps in the proof below. *}
+
+lemma eval_to_val:
+ assumes a: "t \<Down> t'"
+ shows "val t'"
+using a
+proof (induct)
+ case (e_Lam x t)
+ show "val (Lam [x].t)" by auto
+next
+ case (e_App t1 x t t2 v v')
+ -- {* all assumptions in this case *}
+ have "t1 \<Down> Lam [x].t" by fact
+ have ih1: "val (Lam [x]. t)" by fact
+ have "t2 \<Down> v" by fact
+ have ih2: "val v" by fact
+ have "t [x ::= v] \<Down> v'" by fact
+ have ih3: "val v'" by fact
+
+ show "val v'" using ih3 by auto
+qed
+
+
+section {* Datatypes: Evaluation Contexts*}
+
+text {*
+ Datatypes can be defined in Isabelle as follows: we have to provide the name
+ of the datatype and a list its type-constructors. Each type-constructor needs
+ to have the information about the types of its arguments, and optionally
+ can also contain some information about pretty syntax. For example, we like
+ to write "\<box>" for holes.
+*}
+
+datatype ctx =
+ Hole ("\<box>")
+ | CAppL "ctx" "lam"
+ | CAppR "lam" "ctx"
+
+text {* Now Isabelle knows about: *}
+
+typ ctx
+term "\<box>"
+term "CAppL"
+term "CAppL \<box> (Var x)"
+
+subsection {* MINI EXERCISE *}
+
+text {*
+ Try and see what happens if you apply a Hole to a Hole?
+*}
+
+
+section {* Machines for Evaluation *}
+
+type_synonym ctxs = "ctx list"
+
+inductive
+ machine :: "lam \<Rightarrow> ctxs \<Rightarrow>lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto> <_,_>" [60, 60, 60, 60] 60)
+where
+ m1: "<App t1 t2, Es> \<mapsto> <t1, (CAppL \<box> t2) # Es>"
+| m2: "val v \<Longrightarrow> <v, (CAppL \<box> t2) # Es> \<mapsto> <t2, (CAppR v \<box>) # Es>"
+| m3: "val v \<Longrightarrow> <v, (CAppR (Lam [x].t) \<box>) # Es> \<mapsto> <t[x ::= v], Es>"
+
+text {*
+ Since the machine defined above only performs a single reduction,
+ we need to define the transitive closure of this machine. *}
+
+inductive
+ machines :: "lam \<Rightarrow> ctxs \<Rightarrow> lam \<Rightarrow> ctxs \<Rightarrow> bool" ("<_,_> \<mapsto>* <_,_>" [60, 60, 60, 60] 60)
+where
+ ms1: "<t,Es> \<mapsto>* <t,Es>"
+| ms2: "\<lbrakk><t1,Es1> \<mapsto> <t2,Es2>; <t2,Es2> \<mapsto>* <t3,Es3>\<rbrakk> \<Longrightarrow> <t1,Es1> \<mapsto>* <t3,Es3>"
+
+declare machine.intros[intro] machines.intros[intro]
+
+section {* EXERCISE 3 *}
+
+text {*
+ We need to show that the machines-relation is transitive.
+ Fill in the gaps below.
+*}
+
+lemma ms3[intro]:
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b
+proof(induct)
+ case (ms1 e1 Es1)
+ have c: "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
+ then show "<e1, Es1> \<mapsto>* <e3, Es3>" by simp
+next
+ case (ms2 e1 Es1 e2 Es2 e2' Es2')
+ have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
+ have d1: "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
+ have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
+ have d3: "<e2, Es2> \<mapsto>* <e3, Es3>" using ih d1 by auto
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" using d2 d3 by auto
+qed
+
+text {*
+ Just like gotos in the Basic programming language, labels often reduce
+ the readability of proofs. Therefore one can use in Isar the notation
+ "then have" in order to feed a have-statement to the proof of
+ the next have-statement. This is used in teh second case below.
+*}
+
+lemma
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b
+proof(induct)
+ case (ms1 e1 Es1)
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
+next
+ case (ms2 e1 Es1 e2 Es2 e2' Es2')
+ have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
+ have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
+ then have d3: "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
+ have d2: "<e1, Es1> \<mapsto> <e2, Es2>" by fact
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" using d2 d3 by auto
+qed
+
+text {*
+ The label ih2 cannot be got rid of in this way, because it is used
+ two lines below and we cannot rearange them. We can still avoid the
+ label by feeding a sequence of facts into a proof using the
+ "moreover"-chaining mechanism:
+
+ have "statement_1" \<dots>
+ moreover
+ have "statement_2" \<dots>
+ \<dots>
+ moreover
+ have "statement_n" \<dots>
+ ultimately have "statement" \<dots>
+
+ In this chain, all "statement_i" can be used in the proof of the final
+ "statement". With this we can simplify our proof further to:
+*}
+
+lemma
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b
+proof(induct)
+ case (ms1 e1 Es1)
+ show "<e1, Es1> \<mapsto>* <e3, Es3>" by fact
+next
+ case (ms2 e1 Es1 e2 Es2 e2' Es2')
+ have ih: "<e2', Es2'> \<mapsto>* <e3, Es3> \<Longrightarrow> <e2, Es2> \<mapsto>* <e3, Es3>" by fact
+ have "<e2', Es2'> \<mapsto>* <e3, Es3>" by fact
+ then have "<e2, Es2> \<mapsto>* <e3, Es3>" using ih by simp
+ moreover
+ have "<e1, Es1> \<mapsto> <e2, Es2>" by fact
+ ultimately show "<e1, Es1> \<mapsto>* <e3, Es3>" by auto
+qed
+
+
+text {*
+ While automatic proof procedures in Isabelle are not able to prove statements
+ like "P = NP" assuming usual definitions for P and NP, they can automatically
+ discharge the lemmas we just proved. For this we only have to set up the induction
+ and auto will take care of the rest. This means we can write:
+*}
+
+lemma
+ assumes a: "val t"
+ shows "t \<Down> t"
+using a by (induct) (auto)
+
+lemma
+ assumes a: "t \<Down> t'"
+ shows "val t'"
+using a by (induct) (auto)
+
+lemma
+ assumes a: "<e1, Es1> \<mapsto>* <e2, Es2>"
+ and b: "<e2, Es2> \<mapsto>* <e3, Es3>"
+ shows "<e1, Es1> \<mapsto>* <e3, Es3>"
+using a b by (induct) (auto)
+
+
+section {* EXERCISE 4 *}
+
+text {*
+ The point of the machine is that it simulates the evaluation
+ relation. Therefore we like to prove the following:
+*}
+
+theorem
+ assumes a: "t \<Down> t'"
+ shows "<t, Es> \<mapsto>* <t', Es>"
+using a
+proof (induct arbitrary: Es)
+ case (e_Lam x t Es)
+ -- {* no assumptions *}
+ show "<Lam [x].t, Es> \<mapsto>* <Lam [x].t, Es>" by auto
+next
+ case (e_App t1 x t t2 v' v Es)
+ -- {* all assumptions in this case *}
+ have a1: "t1 \<Down> Lam [x].t" by fact
+ have ih1: "\<And>Es. <t1, Es> \<mapsto>* <Lam [x].t, Es>" by fact
+ have a2: "t2 \<Down> v'" by fact
+ have ih2: "\<And>Es. <t2, Es> \<mapsto>* <v', Es>" by fact
+ have a3: "t[x::=v'] \<Down> v" by fact
+ have ih3: "\<And>Es. <t[x::=v'], Es> \<mapsto>* <v, Es>" by fact
+ -- {* your reasoning *}
+ have "<App t1 t2, Es> \<mapsto>* <t1, CAppL \<box> t2 # Es>" by auto
+ moreover
+ have "<t1, CAppL \<box> t2 # Es> \<mapsto>* <Lam [x].t, CAppL \<box> t2 # Es>" using ih1 by auto
+ moreover
+ have "<Lam [x].t, CAppL \<box> t2 # Es> \<mapsto>* <t2, CAppR (Lam [x].t) \<box> # Es>" by auto
+ moreover
+ have "<t2, CAppR (Lam [x].t) \<box> # Es> \<mapsto>* <v', CAppR (Lam [x].t) \<box> # Es>"
+ using ih2 by auto
+ moreover
+ have "val v'" using a2 eval_to_val by auto
+ then have "<v', CAppR (Lam [x].t) \<box> # Es> \<mapsto>* <t[x::=v'], Es>" by auto
+ moreover
+ have "<t[x::=v'], Es> \<mapsto>* <v, Es>" using ih3 by auto
+ ultimately show "<App t1 t2, Es> \<mapsto>* <v, Es>" by blast
+qed
+
+
+text {*
+ Again the automatic tools in Isabelle can discharge automatically
+ of the routine work in these proofs. We can write:
+*}
+
+theorem eval_implies_machines_ctx:
+ assumes a: "t \<Down> t'"
+ shows "<t, Es> \<mapsto>* <t', Es>"
+using a
+by (induct arbitrary: Es)
+ (metis eval_to_val machine.intros ms1 ms2 ms3 v_Lam)+
+
+corollary eval_implies_machines:
+ assumes a: "t \<Down> t'"
+ shows "<t, []> \<mapsto>* <t', []>"
+using a eval_implies_machines_ctx by simp
+
+
+section {* Function Definitions: Filling a Lambda-Term into a Context *}
+
+text {*
+ Many functions over datatypes can be defined by recursion on the
+ structure. For this purpose, Isabelle provides "fun". To use it one needs
+ to give a name for the function, its type, optionally some pretty-syntax
+ and then some equations defining the function. Like in "inductive",
+ "fun" expects that more than one such equation is separated by "|".
+*}
+
+fun
+ filling :: "ctx \<Rightarrow> lam \<Rightarrow> lam" ("_\<lbrakk>_\<rbrakk>" [100, 100] 100)
+where
+ "\<box>\<lbrakk>t\<rbrakk> = t"
+| "(CAppL E t')\<lbrakk>t\<rbrakk> = App (E\<lbrakk>t\<rbrakk>) t'"
+| "(CAppR t' E)\<lbrakk>t\<rbrakk> = App t' (E\<lbrakk>t\<rbrakk>)"
+
+
+text {*
+ After this definition Isabelle will be able to simplify
+ statements like:
+*}
+
+lemma
+ shows "(CAppL \<box> (Var x))\<lbrakk>Var y\<rbrakk> = App (Var y) (Var x)"
+ by simp
+
+fun
+ ctx_compose :: "ctx \<Rightarrow> ctx \<Rightarrow> ctx" (infixr "\<odot>" 99)
+where
+ "\<box> \<odot> E' = E'"
+| "(CAppL E t') \<odot> E' = CAppL (E \<odot> E') t'"
+| "(CAppR t' E) \<odot> E' = CAppR t' (E \<odot> E')"
+
+fun
+ ctx_composes :: "ctxs \<Rightarrow> ctx" ("_\<down>" [110] 110)
+where
+ "[]\<down> = \<box>"
+ | "(E # Es)\<down> = (Es\<down>) \<odot> E"
+
+text {*
+ Notice that we not just have given a pretty syntax for the functions, but
+ also some precedences. The numbers inside the [\<dots>] stand for the precedences
+ of the arguments; the one next to it the precedence of the whole term.
+
+ This means we have to write (Es1 \<odot> Es2) \<odot> Es3 otherwise Es1 \<odot> Es2 \<odot> Es3 is
+ interpreted as Es1 \<odot> (Es2 \<odot> Es3).
+*}
+
+section {* Structural Inductions over Contexts *}
+
+text {*
+ So far we have had a look at an induction over an inductive predicate.
+ Another important induction principle is structural inductions for
+ datatypes. To illustrate structural inductions we prove some facts
+ about context composition, some of which we will need later on.
+*}
+
+subsection {* EXERCISE 5 *}
+
+text {* Complete the proof and remove the sorries. *}
+
+lemma
+ shows "(E1 \<odot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>"
+proof (induct E1)
+ case Hole
+ show "(\<box> \<odot> E2)\<lbrakk>t\<rbrakk> = \<box>\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by simp
+next
+ case (CAppL E1 t')
+ have ih: "(E1 \<odot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
+ show "((CAppL E1 t') \<odot> E2)\<lbrakk>t\<rbrakk> = (CAppL E1 t')\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" using ih by simp
+next
+ case (CAppR t' E1)
+ have ih: "(E1 \<odot> E2)\<lbrakk>t\<rbrakk> = E1\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" by fact
+ show "((CAppR t' E1) \<odot> E2)\<lbrakk>t\<rbrakk> = (CAppR t' E1)\<lbrakk>E2\<lbrakk>t\<rbrakk>\<rbrakk>" using ih by simp
+qed
+
+subsection {* EXERCISE 6 *}
+
+text {*
+ Remove the sorries in the ctx_append proof below. You can make
+ use of the following two properties.
+*}
+
+lemma neut_hole:
+ shows "E \<odot> \<box> = E"
+by (induct E) (simp_all)
+
+lemma compose_assoc:
+ shows "(E1 \<odot> E2) \<odot> E3 = E1 \<odot> (E2 \<odot> E3)"
+by (induct E1) (simp_all)
+
+lemma
+ shows "(Es1 @ Es2)\<down> = (Es2\<down>) \<odot> (Es1\<down>)"
+proof (induct Es1)
+ case Nil
+ show "([] @ Es2)\<down> = Es2\<down> \<odot> []\<down>" using neut_hole by simp
+next
+ case (Cons E Es1)
+ have ih: "(Es1 @ Es2)\<down> = Es2\<down> \<odot> Es1\<down>" by fact
+ have eq1: "((E # Es1) @ Es2)\<down> = (E # (Es1 @ Es2))\<down>" by simp
+ have eq2: "(E # (Es1 @ Es2))\<down> = (Es1 @ Es2)\<down> \<odot> E" by simp
+ have eq3: "(Es1 @ Es2)\<down> \<odot> E = (Es2\<down> \<odot> Es1\<down>) \<odot> E" using ih by simp
+ have eq4: "(Es2\<down> \<odot> Es1\<down>) \<odot> E = Es2\<down> \<odot> (Es1\<down> \<odot> E)" using compose_assoc by simp
+ have eq5: "Es2\<down> \<odot> (Es1\<down> \<odot> E) = Es2\<down> \<odot> (E # Es1)\<down>" by simp
+ show "((E # Es1) @ Es2)\<down> = Es2\<down> \<odot> (E # Es1)\<down>" using eq1 eq2 eq3 eq4 eq5 by (simp only:)
+qed
+
+text {*
+ The last proof involves several steps of equational reasoning.
+ Isar provides some convenient means to express such equational
+ reasoning in a much cleaner fashion using the "also have"
+ construction.
+*}
+
+lemma
+ shows "(Es1 @ Es2)\<down> = (Es2\<down>) \<odot> (Es1\<down>)"
+proof (induct Es1)
+ case Nil
+ show "([] @ Es2)\<down> = Es2\<down> \<odot> []\<down>" using neut_hole by simp
+next
+ case (Cons E Es1)
+ have ih: "(Es1 @ Es2)\<down> = Es2\<down> \<odot> Es1\<down>" by fact
+ have "((E # Es1) @ Es2)\<down> = (E # (Es1 @ Es2))\<down>" by simp
+ also have "\<dots> = (Es1 @ Es2)\<down> \<odot> E" by simp
+ also have "\<dots> = (Es2\<down> \<odot> Es1\<down>) \<odot> E" using ih by simp
+ also have "\<dots> = Es2\<down> \<odot> (Es1\<down> \<odot> E)" using compose_assoc by simp
+ also have "\<dots> = Es2\<down> \<odot> (E # Es1)\<down>" by simp
+ finally show "((E # Es1) @ Es2)\<down> = Es2\<down> \<odot> (E # Es1)\<down>" by simp
+qed
+
+
+end
+
--- a/Tutorial/Tutorial2.thy Sun Jan 23 07:15:59 2011 +0900
+++ b/Tutorial/Tutorial2.thy Sun Jan 23 07:17:35 2011 +0900
@@ -1,15 +1,93 @@
+
theory Tutorial2
-imports Tutorial1
+imports Lambda
begin
-(* fixme: put height example here *)
+section {* Height of a Lambda-Term *}
+
+text {*
+ The theory Lambda defines the notions of capture-avoiding
+ substitution and the height of lambda terms.
+*}
+
+thm height.simps
+thm subst.simps
+
+subsection {* EXERCISE 7 *}
+
+text {*
+ Lets prove a property about the height of substitutions.
+
+ Assume that the height of a lambda-term is always greater
+ or equal to 1.
+*}
+
+lemma height_ge_one:
+ shows "1 \<le> height t"
+by (induct t rule: lam.induct) (auto)
+
+text {* Remove the sorries *}
+
+theorem height_subst:
+ shows "height (t[x ::= t']) \<le> height t - 1 + height t'"
+proof (induct t rule: lam.induct)
+ case (Var y)
+ show "height (Var y[x ::= t']) \<le> height (Var y) - 1 + height t'" sorry
+next
+ case (App t1 t2)
+ have ih1: "height (t1[x::=t']) \<le> (height t1) - 1 + height t'" by fact
+ have ih2: "height (t2[x::=t']) \<le> (height t2) - 1 + height t'" by fact
+
+ show "height ((App t1 t2)[x ::= t']) \<le> height (App t1 t2) - 1 + height t'" sorry
+next
+ case (Lam y t1)
+ have ih: "height (t1[x::=t']) \<le> height t1 - 1 + height t'" by fact
+ -- {* the definition of capture-avoiding substitution *}
+ thm subst.simps
+
+ show "height ((Lam [y].t1)[x ::= t']) \<le> height (Lam [y].t1) - 1 + height t'" sorry
+qed
-section {* Types *}
+text {*
+ The point is that substitutions can only be moved under a binder
+ provided certain freshness conditions are satisfied. The structural
+ induction above does not say anything about such freshness conditions.
+
+ Fortunately, Nominal derives automatically some stronger induction
+ principle for lambda terms which has the usual variable convention
+ build in.
+
+ In the proof below, we use this stronger induction principle. The
+ variable and application case are as before.
+*}
+
+
+theorem
+ shows "height (t[x ::= t']) \<le> height t - 1 + height t'"
+proof (nominal_induct t avoiding: x t' rule: lam.strong_induct)
+ case (Var y)
+ have "1 \<le> height t'" using height_ge_one by simp
+ then show "height (Var y[x ::= t']) \<le> height (Var y) - 1 + height t'" by simp
+next
+ case (App t1 t2)
+ have ih1: "height (t1[x::=t']) \<le> (height t1) - 1 + height t'"
+ and ih2: "height (t2[x::=t']) \<le> (height t2) - 1 + height t'" by fact+
+ then show "height ((App t1 t2)[x ::= t']) \<le> height (App t1 t2) - 1 + height t'" by simp
+next
+ case (Lam y t1)
+ have ih: "height (t1[x::=t']) \<le> height t1 - 1 + height t'" by fact
+ have vc: "atom y \<sharp> x" "atom y \<sharp> t'" by fact+ -- {* usual variable convention *}
+
+ show "height ((Lam [y].t1)[x ::= t']) \<le> height (Lam [y].t1) - 1 + height t'" sorry
+qed
+
+
+section {* Types and the Weakening Lemma *}
nominal_datatype ty =
tVar "string"
-| tArr "ty" "ty" ("_ \<rightarrow> _" [100, 100] 100)
+| tArr "ty" "ty" (infixr "\<rightarrow>" 100)
text {*
@@ -19,7 +97,6 @@
We next define the type of typing contexts, a predicate that
defines valid contexts (i.e. lists that contain only unique
variables) and the typing judgement.
-
*}
type_synonym ty_ctx = "(name \<times> ty) list"
@@ -79,7 +156,7 @@
"\<Gamma>1 \<sqsubseteq> \<Gamma>2 \<equiv> \<forall>x. x \<in> set \<Gamma>1 \<longrightarrow> x \<in> set \<Gamma>2"
-subsection {* EXERCISE 4 *}
+subsection {* EXERCISE 8 *}
text {*
Fill in the details and give a proof of the weakening lemma.
@@ -107,7 +184,7 @@
have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
-qed (auto) -- {* the application case *}
+qed (auto) -- {* the application case is automatic*}
text {*
@@ -170,10 +247,10 @@
text {*
- The remedy is to use a stronger induction principle that
+ The remedy is to use again a stronger induction principle that
has the usual "variable convention" already build in. The
- following command derives this induction principle for us.
- (We shall explain what happens here later.)
+ following command derives this induction principle for the typing
+ relation. (We shall explain what happens here later.)
*}
nominal_inductive typing
@@ -184,7 +261,7 @@
text {* Compare the two induction principles *}
thm typing.induct
-thm typing.strong_induct -- {* has the additional assumption {atom x} \<sharp> c *}
+thm typing.strong_induct -- {* note the additional assumption {atom x} \<sharp> c *}
text {*
We can use the stronger induction principle by replacing
@@ -198,7 +275,8 @@
Try now the proof.
*}
-
+
+subsection {* EXERCISE 9 *}
lemma weakening:
assumes a: "\<Gamma>1 \<turnstile> t : T"
@@ -207,7 +285,7 @@
shows "\<Gamma>2 \<turnstile> t : T"
using a b c
proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
- case (t_Var \<Gamma>1 x T) -- {* variable case is as before *}
+ case (t_Var \<Gamma>1 x T) -- {* again the variable case is as before *}
have "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
moreover
have "valid \<Gamma>2" by fact
@@ -221,34 +299,56 @@
have a0: "atom x \<sharp> \<Gamma>1" by fact
have a1: "valid \<Gamma>2" by fact
have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
- have "valid ((x, T1) # \<Gamma>2)" using a1 vc by auto
- moreover
- have "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # \<Gamma>2" using a2 by auto
- ultimately
- have "(x, T1) # \<Gamma>2 \<turnstile> t : T2" using ih by simp
- then show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" using vc by auto
+
+ show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
qed (auto) -- {* app case *}
-text {* unbind / inconsistency example *}
+
+section {* Unbind and an Inconsistency Example *}
+
+text {*
+ You might wonder why we had to discharge some proof
+ obligations in order to obtain the stronger induction
+ principle for the typing relation. (Remember for
+ lambda terms this stronger induction principle is
+ derived automatically.)
+
+ This proof obligation is really needed, because if we
+ assume universally a stronger induction principle, then
+ in some cases we can derive false. This is "shown" below.
+*}
+
inductive
- unbind :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<mapsto> _" [60, 60] 60)
+ unbind :: "lam \<Rightarrow> lam \<Rightarrow> bool" (infixr "\<mapsto>" 60)
where
u_Var[intro]: "Var x \<mapsto> Var x"
| u_App[intro]: "App t1 t2 \<mapsto> App t1 t2"
| u_Lam[intro]: "t \<mapsto> t' \<Longrightarrow> Lam [x].t \<mapsto> t'"
+text {* It is clear that the following judgement holds. *}
+
lemma unbind_simple:
shows "Lam [x].Var x \<mapsto> Var x"
by auto
+text {*
+ Now lets derive the strong induction principle for unbind.
+ The point is that we cannot establish the proof obligations,
+ therefore we force Isabelle to accept them by using "sorry".
+*}
+
equivariance unbind
-
nominal_inductive unbind
avoids u_Lam: "x"
sorry
+text {*
+ Using the stronger induction principle, we can establish
+ th follwoing bogus property.
+*}
+
lemma unbind_fake:
fixes y::"name"
assumes a: "t \<mapsto> t'"
@@ -265,12 +365,20 @@
then show "atom y \<sharp> t'" using ih by simp
qed
+text {*
+ And from this follows the inconsitency:
+*}
+
lemma
shows "False"
proof -
have "atom x \<sharp> Lam [x]. Var x" by (simp add: lam.fresh)
- then have "atom x \<sharp> Var x" using unbind_fake unbind_simple by auto
- then show "False" by (simp add: lam.fresh fresh_at_base)
+ moreover
+ have "Lam [x]. Var x \<mapsto> Var x" using unbind_simple by auto
+ ultimately
+ have "atom x \<sharp> Var x" using unbind_fake by auto
+ then have "x \<noteq> x" by (simp add: lam.fresh fresh_at_base)
+ then show "False" by simp
qed
end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/Tutorial/Tutorial2s.thy Sun Jan 23 07:17:35 2011 +0900
@@ -0,0 +1,354 @@
+
+theory Tutorial2s
+imports Lambda
+begin
+
+section {* Height of a Lambda-Term *}
+
+text {*
+ The theory Lambda defines the notions of capture-avoiding
+ substitution and the height of lambda terms.
+*}
+
+thm height.simps
+thm subst.simps
+
+subsection {* EXERCISE 7 *}
+
+text {*
+ Lets prove a property about the height of substitutions.
+
+ Assume that the height of a lambda-term is always greater
+ or equal to 1.
+*}
+
+lemma height_ge_one:
+ shows "1 \<le> height t"
+by (induct t rule: lam.induct) (auto)
+
+text {* Remove the sorries *}
+
+theorem
+ shows "height (t[x ::= t']) \<le> height t - 1 + height t'"
+proof (nominal_induct t avoiding: x t' rule: lam.strong_induct)
+ case (Var y)
+ have "1 \<le> height t'" using height_ge_one by simp
+ then show "height (Var y[x ::= t']) \<le> height (Var y) - 1 + height t'" by simp
+next
+ case (App t1 t2)
+ have ih1: "height (t1[x::=t']) \<le> (height t1) - 1 + height t'"
+ and ih2: "height (t2[x::=t']) \<le> (height t2) - 1 + height t'" by fact+
+ then show "height ((App t1 t2)[x ::= t']) \<le> height (App t1 t2) - 1 + height t'" by simp
+next
+ case (Lam y t1)
+ have ih: "height (t1[x::=t']) \<le> height t1 - 1 + height t'" by fact
+ moreover
+ have vc: "atom y \<sharp> x" "atom y \<sharp> t'" by fact+ -- {* usual variable convention *}
+ ultimately
+ show "height ((Lam [y].t1)[x ::= t']) \<le> height (Lam [y].t1) - 1 + height t'" by simp
+qed
+
+
+section {* Types and the Weakening Lemma *}
+
+nominal_datatype ty =
+ tVar "string"
+| tArr "ty" "ty" (infixr "\<rightarrow>" 100)
+
+
+text {*
+ Having defined them as nominal datatypes gives us additional
+ definitions and theorems that come with types (see below).
+
+ We next define the type of typing contexts, a predicate that
+ defines valid contexts (i.e. lists that contain only unique
+ variables) and the typing judgement.
+*}
+
+type_synonym ty_ctx = "(name \<times> ty) list"
+
+inductive
+ valid :: "ty_ctx \<Rightarrow> bool"
+where
+ v1[intro]: "valid []"
+| v2[intro]: "\<lbrakk>valid \<Gamma>; atom x \<sharp> \<Gamma>\<rbrakk>\<Longrightarrow> valid ((x, T) # \<Gamma>)"
+
+
+inductive
+ typing :: "ty_ctx \<Rightarrow> lam \<Rightarrow> ty \<Rightarrow> bool" ("_ \<turnstile> _ : _" [60, 60, 60] 60)
+where
+ t_Var[intro]: "\<lbrakk>valid \<Gamma>; (x, T) \<in> set \<Gamma>\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Var x : T"
+| t_App[intro]: "\<lbrakk>\<Gamma> \<turnstile> t1 : T1 \<rightarrow> T2; \<Gamma> \<turnstile> t2 : T1\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> App t1 t2 : T2"
+| t_Lam[intro]: "\<lbrakk>atom x \<sharp> \<Gamma>; (x, T1) # \<Gamma> \<turnstile> t : T2\<rbrakk> \<Longrightarrow> \<Gamma> \<turnstile> Lam [x].t : T1 \<rightarrow> T2"
+
+
+text {*
+ The predicate atom x \<sharp> \<Gamma>, read as x fresh for \<Gamma>, is defined by
+ Nominal Isabelle. It is defined for lambda-terms, products, lists etc.
+ For example we have:
+*}
+
+lemma
+ fixes x::"name"
+ shows "atom x \<sharp> Lam [x].t"
+ and "atom x \<sharp> (t1, t2) \<Longrightarrow> atom x \<sharp> App t1 t2"
+ and "atom x \<sharp> Var y \<Longrightarrow> atom x \<sharp> y"
+ and "\<lbrakk>atom x \<sharp> t1; atom x \<sharp> t2\<rbrakk> \<Longrightarrow> atom x \<sharp> (t1, t2)"
+ and "\<lbrakk>atom x \<sharp> l1; atom x \<sharp> l2\<rbrakk> \<Longrightarrow> atom x \<sharp> (l1 @ l2)"
+ and "atom x \<sharp> y \<Longrightarrow> x \<noteq> y"
+ by (simp_all add: lam.fresh fresh_append fresh_at_base)
+
+text {*
+ We can also prove that every variable is fresh for a type.
+*}
+
+lemma ty_fresh:
+ fixes x::"name"
+ and T::"ty"
+ shows "atom x \<sharp> T"
+by (induct T rule: ty.induct)
+ (simp_all add: ty.fresh pure_fresh)
+
+text {*
+ In order to state the weakening lemma in a convenient form, we
+ using the following abbreviation. Abbreviations behave like
+ definitions, except that they are always automatically folded
+ and unfolded.
+*}
+
+abbreviation
+ "sub_ty_ctx" :: "ty_ctx \<Rightarrow> ty_ctx \<Rightarrow> bool" ("_ \<sqsubseteq> _" [60, 60] 60)
+where
+ "\<Gamma>1 \<sqsubseteq> \<Gamma>2 \<equiv> \<forall>x. x \<in> set \<Gamma>1 \<longrightarrow> x \<in> set \<Gamma>2"
+
+
+subsection {* EXERCISE 8 *}
+
+text {*
+ Fill in the details and give a proof of the weakening lemma.
+*}
+
+lemma
+ assumes a: "\<Gamma>1 \<turnstile> t : T"
+ and b: "valid \<Gamma>2"
+ and c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
+ shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (induct arbitrary: \<Gamma>2)
+ case (t_Var \<Gamma>1 x T)
+ have a1: "valid \<Gamma>1" by fact
+ have a2: "(x, T) \<in> set \<Gamma>1" by fact
+ have a3: "valid \<Gamma>2" by fact
+ have a4: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
+
+ show "\<Gamma>2 \<turnstile> Var x : T" sorry
+next
+ case (t_Lam x \<Gamma>1 T1 t T2)
+ have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+ have a0: "atom x \<sharp> \<Gamma>1" by fact
+ have a1: "valid \<Gamma>2" by fact
+ have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
+
+ show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" sorry
+qed (auto) -- {* the application case is automatic*}
+
+
+text {*
+ Despite the frequent claim that the weakening lemma is trivial,
+ routine or obvious, the proof in the lambda-case does not go
+ through smoothly. Painful variable renamings seem to be necessary.
+ But the proof using renamings is horribly complicated (see below).
+*}
+
+equivariance valid
+equivariance typing
+
+lemma weakening_NOT_TO_BE_TRIED_AT_HOME:
+ assumes a: "\<Gamma>1 \<turnstile> t : T"
+ and b: "valid \<Gamma>2"
+ and c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
+ shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (induct arbitrary: \<Gamma>2)
+ -- {* lambda case *}
+ case (t_Lam x \<Gamma>1 T1 t T2)
+ have fc0: "atom x \<sharp> \<Gamma>1" by fact
+ have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+ -- {* we choose a fresh variable *}
+ obtain c::"name" where fc1: "atom c \<sharp> (x, t, \<Gamma>1, \<Gamma>2)" by (rule obtain_fresh)
+ -- {* we alpha-rename the abstraction *}
+ have "Lam [c].((c \<leftrightarrow> x) \<bullet> t) = Lam [x].t" using fc1
+ by (auto simp add: lam.eq_iff Abs1_eq_iff flip_def)
+ moreover
+ -- {* we can then alpha rename the goal *}
+ have "\<Gamma>2 \<turnstile> Lam [c].((c \<leftrightarrow> x) \<bullet> t) : T1 \<rightarrow> T2"
+ proof -
+ -- {* we need to establish *}
+ -- {* (1) (x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) *}
+ -- {* (2) valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)) *}
+ have "(1)": "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)"
+ proof -
+ have "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
+ then have "(c \<leftrightarrow> x) \<bullet> ((x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))" using fc0 fc1
+ by (perm_simp) (simp add: flip_fresh_fresh)
+ then show "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2)" by (rule permute_boolE)
+ qed
+ moreover
+ have "(2)": "valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))"
+ proof -
+ have "valid \<Gamma>2" by fact
+ then show "valid ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2))" using fc1
+ by (auto simp add: fresh_permute_left atom_eqvt valid.eqvt)
+ qed
+ -- {* these two facts give us by induction hypothesis the following *}
+ ultimately have "(x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) \<turnstile> t : T2" using ih by simp
+ -- {* we now apply renamings to get to our goal *}
+ then have "(c \<leftrightarrow> x) \<bullet> ((x, T1) # ((c \<leftrightarrow> x) \<bullet> \<Gamma>2) \<turnstile> t : T2)" by (rule permute_boolI)
+ then have "(c, T1) # \<Gamma>2 \<turnstile> ((c \<leftrightarrow> x) \<bullet> t) : T2" using fc1
+ by (perm_simp) (simp add: flip_fresh_fresh ty_fresh)
+ then show "\<Gamma>2 \<turnstile> Lam [c].((c \<leftrightarrow> x) \<bullet> t) : T1 \<rightarrow> T2" using fc1 by auto
+ qed
+ ultimately show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" by simp
+qed (auto) -- {* var and app cases, luckily, are automatic *}
+
+
+text {*
+ The remedy is to use again a stronger induction principle that
+ has the usual "variable convention" already build in. The
+ following command derives this induction principle for the typing
+ relation. (We shall explain what happens here later.)
+*}
+
+nominal_inductive typing
+ avoids t_Lam: "x"
+ unfolding fresh_star_def
+ by (simp_all add: fresh_Pair lam.fresh ty_fresh)
+
+text {* Compare the two induction principles *}
+
+thm typing.induct
+thm typing.strong_induct -- {* note the additional assumption {atom x} \<sharp> c *}
+
+text {*
+ We can use the stronger induction principle by replacing
+ the line
+
+ proof (induct arbitrary: \<Gamma>2)
+
+ with
+
+ proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
+
+ Try now the proof.
+*}
+
+subsection {* EXERCISE 9 *}
+
+lemma weakening:
+ assumes a: "\<Gamma>1 \<turnstile> t : T"
+ and b: "valid \<Gamma>2"
+ and c: "\<Gamma>1 \<sqsubseteq> \<Gamma>2"
+ shows "\<Gamma>2 \<turnstile> t : T"
+using a b c
+proof (nominal_induct avoiding: \<Gamma>2 rule: typing.strong_induct)
+ case (t_Var \<Gamma>1 x T) -- {* variable case is as before *}
+ have "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
+ moreover
+ have "valid \<Gamma>2" by fact
+ moreover
+ have "(x, T)\<in> set \<Gamma>1" by fact
+ ultimately show "\<Gamma>2 \<turnstile> Var x : T" by auto
+next
+ case (t_Lam x \<Gamma>1 T1 t T2)
+ have vc: "atom x \<sharp> \<Gamma>2" by fact -- {* additional fact afforded by the strong induction *}
+ have ih: "\<And>\<Gamma>3. \<lbrakk>valid \<Gamma>3; (x, T1) # \<Gamma>1 \<sqsubseteq> \<Gamma>3\<rbrakk> \<Longrightarrow> \<Gamma>3 \<turnstile> t : T2" by fact
+ have a0: "atom x \<sharp> \<Gamma>1" by fact
+ have a1: "valid \<Gamma>2" by fact
+ have a2: "\<Gamma>1 \<sqsubseteq> \<Gamma>2" by fact
+ have "valid ((x, T1) # \<Gamma>2)" using a1 vc by auto
+ moreover
+ have "(x, T1) # \<Gamma>1 \<sqsubseteq> (x, T1) # \<Gamma>2" using a2 by auto
+ ultimately
+ have "(x, T1) # \<Gamma>2 \<turnstile> t : T2" using ih by simp
+ then show "\<Gamma>2 \<turnstile> Lam [x].t : T1 \<rightarrow> T2" using vc by auto
+qed (auto) -- {* app case *}
+
+
+
+section {* Unbind and an Inconsistency Example *}
+
+text {*
+ You might wonder why we had to discharge some proof
+ obligations in order to obtain the stronger induction
+ principle for the typing relation. (Remember for
+ lambda terms this stronger induction principle is
+ derived automatically.)
+
+ This proof obligation is really needed, because if we
+ assume universally a stronger induction principle, then
+ in some cases we can derive false. This is "shown" below.
+*}
+
+
+inductive
+ unbind :: "lam \<Rightarrow> lam \<Rightarrow> bool" (infixr "\<mapsto>" 60)
+where
+ u_Var[intro]: "Var x \<mapsto> Var x"
+| u_App[intro]: "App t1 t2 \<mapsto> App t1 t2"
+| u_Lam[intro]: "t \<mapsto> t' \<Longrightarrow> Lam [x].t \<mapsto> t'"
+
+text {* It is clear that the following judgement holds. *}
+
+lemma unbind_simple:
+ shows "Lam [x].Var x \<mapsto> Var x"
+ by auto
+
+text {*
+ Now lets derive the strong induction principle for unbind.
+ The point is that we cannot establish the proof obligations,
+ therefore we force Isabelle to accept them by using "sorry".
+*}
+
+equivariance unbind
+nominal_inductive unbind
+ avoids u_Lam: "x"
+ sorry
+
+text {*
+ Using the stronger induction principle, we can establish
+ th follwoing bogus property.
+*}
+
+lemma unbind_fake:
+ fixes y::"name"
+ assumes a: "t \<mapsto> t'"
+ and b: "atom y \<sharp> t"
+ shows "atom y \<sharp> t'"
+using a b
+proof (nominal_induct avoiding: y rule: unbind.strong_induct)
+ case (u_Lam t t' x y)
+ have ih: "atom y \<sharp> t \<Longrightarrow> atom y \<sharp> t'" by fact
+ have asm: "atom y \<sharp> Lam [x]. t" by fact
+ have fc: "atom x \<sharp> y" by fact
+ then have in_eq: "x \<noteq> y" by (simp add: fresh_at_base)
+ then have "atom y \<sharp> t" using asm by (simp add: lam.fresh)
+ then show "atom y \<sharp> t'" using ih by simp
+qed
+
+text {*
+ And from this follows the inconsitency:
+*}
+
+lemma
+ shows "False"
+proof -
+ have "atom x \<sharp> Lam [x]. Var x" by (simp add: lam.fresh)
+ moreover
+ have "Lam [x]. Var x \<mapsto> Var x" using unbind_simple by auto
+ ultimately
+ have "atom x \<sharp> Var x" using unbind_fake by auto
+ then have "x \<noteq> x" by (simp add: lam.fresh fresh_at_base)
+ then show "False" by simp
+qed
+
+end
--- a/Tutorial/Tutorial4.thy Sun Jan 23 07:15:59 2011 +0900
+++ b/Tutorial/Tutorial4.thy Sun Jan 23 07:17:35 2011 +0900
@@ -5,18 +5,62 @@
section {* The CBV Reduction Relation (Small-Step Semantics) *}
+
+inductive
+ cbv :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<longrightarrow>cbv _" [60, 60] 60)
+where
+ cbv1: "\<lbrakk>val v; atom x \<sharp> v\<rbrakk> \<Longrightarrow> App (Lam [x].t) v \<longrightarrow>cbv t[x ::= v]"
+| cbv2: "t \<longrightarrow>cbv t' \<Longrightarrow> App t t2 \<longrightarrow>cbv App t' t2"
+| cbv3: "t \<longrightarrow>cbv t' \<Longrightarrow> App t2 t \<longrightarrow>cbv App t2 t'"
+
+inductive
+ "cbv_star" :: "lam \<Rightarrow> lam \<Rightarrow> bool" (" _ \<longrightarrow>cbv* _" [60, 60] 60)
+where
+ cbvs1: "e \<longrightarrow>cbv* e"
+| cbvs2: "\<lbrakk>e1\<longrightarrow>cbv e2; e2 \<longrightarrow>cbv* e3\<rbrakk> \<Longrightarrow> e1 \<longrightarrow>cbv* e3"
+
+declare cbv.intros[intro] cbv_star.intros[intro]
+
+subsection {* EXERCISE 3 *}
+
+text {*
+ Show that cbv* is transitive, by filling the gaps in the
+ proof below.
+*}
+
+lemma
+ assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3"
+ shows "e1 \<longrightarrow>cbv* e3"
+using a
+proof (induct)
+ case (cbvs1 e1)
+ have asm: "e1 \<longrightarrow>cbv* e3" by fact
+ show "e1 \<longrightarrow>cbv* e3" sorry
+next
+ case (cbvs2 e1 e2 e3')
+ have "e1 \<longrightarrow>cbv e2" by fact
+ have "e2 \<longrightarrow>cbv* e3'" by fact
+ have "e3' \<longrightarrow>cbv* e3 \<Longrightarrow> e2 \<longrightarrow>cbv* e3" by fact
+ have "e3' \<longrightarrow>cbv* e3" by fact
+
+ show "e1 \<longrightarrow>cbv* e3" sorry
+qed
+
+lemma cbvs3 [intro]:
+ assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3"
+ shows "e1 \<longrightarrow>cbv* e3"
+using a by (induct) (auto)
+
+
+
+
+
text {*
In order to help establishing the property that the CK Machine
calculates a nomrmalform that corresponds to the evaluation
relation, we introduce the call-by-value small-step semantics.
*}
-inductive
- cbv :: "lam \<Rightarrow> lam \<Rightarrow> bool" ("_ \<longrightarrow>cbv _" [60, 60] 60)
-where
- cbv1: "\<lbrakk>val v; atom x \<sharp> v\<rbrakk> \<Longrightarrow> App (Lam [x].t) v \<longrightarrow>cbv t[x ::= v]"
-| cbv2[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t t2 \<longrightarrow>cbv App t' t2"
-| cbv3[intro]: "t \<longrightarrow>cbv t' \<Longrightarrow> App t2 t \<longrightarrow>cbv App t2 t'"
equivariance val
equivariance cbv
@@ -44,7 +88,7 @@
lemma better_cbv1 [intro]:
assumes a: "val v"
- shows "App (Lam [x].t) v \<longrightarrow>cbv t[x::=v]"
+ shows "App (Lam [x].t) v \<longrightarrow>cbv t[x ::= v]"
proof -
obtain y::"name" where fs: "atom y \<sharp> (x, t, v)" by (rule obtain_fresh)
have "App (Lam [x].t) v = App (Lam [y].((y \<leftrightarrow> x) \<bullet> t)) v" using fs
@@ -58,16 +102,8 @@
The transitive closure of the cbv-reduction relation:
*}
-inductive
- "cbvs" :: "lam \<Rightarrow> lam \<Rightarrow> bool" (" _ \<longrightarrow>cbv* _" [60, 60] 60)
-where
- cbvs1[intro]: "e \<longrightarrow>cbv* e"
-| cbvs2[intro]: "\<lbrakk>e1\<longrightarrow>cbv e2; e2 \<longrightarrow>cbv* e3\<rbrakk> \<Longrightarrow> e1 \<longrightarrow>cbv* e3"
-lemma cbvs3 [intro]:
- assumes a: "e1 \<longrightarrow>cbv* e2" "e2 \<longrightarrow>cbv* e3"
- shows "e1 \<longrightarrow>cbv* e3"
-using a by (induct) (auto)
+
subsection {* EXERCISE 8 *}