updated to new Isabelle
authorChristian Urban <urbanc@in.tum.de>
Thu, 27 May 2010 10:39:07 +0200
changeset 426 d94755882e36
parent 425 ce43c04d227d
child 427 94538ddcac9b
updated to new Isabelle
ProgTutorial/Base.thy
ProgTutorial/Helper/Command/Command.thy
ProgTutorial/Package/Ind_Interface.thy
ProgTutorial/Package/simple_inductive_package.ML
ProgTutorial/Parsing.thy
ProgTutorial/Recipes/Antiquotes.thy
ProgTutorial/Solutions.thy
ProgTutorial/antiquote_setup.ML
progtutorial.pdf
--- a/ProgTutorial/Base.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Base.thy	Thu May 27 10:39:07 2010 +0200
@@ -98,9 +98,9 @@
 
 ML {*
 val _ =
-  OuterSyntax.command "ML" "eval ML text within theory"
-    (OuterKeyword.tag "TutorialML" OuterKeyword.thy_decl)
-    (OuterParse.position OuterParse.text >> 
+  Outer_Syntax.command "ML" "eval ML text within theory"
+    (Keyword.tag "TutorialML" Keyword.thy_decl)
+    (Parse.position Parse.text >> 
       (fn (txt, pos) =>
         Toplevel.generic_theory
          (ML_Context.exec 
@@ -108,35 +108,31 @@
               #> propagate_env #> Context.map_theory (write_file_ml_blk txt))))
 
 val _ =
-  OuterSyntax.command "ML_prf" "ML text within proof" 
-    (OuterKeyword.tag "TutorialML" OuterKeyword.prf_decl)
-    (OuterParse.ML_source >> (fn (txt, pos) =>
+  Outer_Syntax.command "ML_prf" "ML text within proof" 
+    (Keyword.tag "TutorialML" Keyword.prf_decl)
+    (Parse.ML_source >> (fn (txt, pos) =>
       Toplevel.proof (Proof.map_context (Context.proof_map
         (ML_Context.exec (fn () => ML_Context.eval true pos txt))) #> propagate_env_prf)))
 
 val _ =
-  OuterSyntax.command "ML_val" "diagnostic ML text" 
-  (OuterKeyword.tag "TutorialML" OuterKeyword.diag)
-    (OuterParse.ML_source >> IsarCmd.ml_diag true)
+  Outer_Syntax.command "ML_val" "diagnostic ML text" 
+  (Keyword.tag "TutorialML" Keyword.diag)
+    (Parse.ML_source >> IsarCmd.ml_diag true)
 *}
 
 ML {*
 val _ =
-  OuterSyntax.command "setup" "ML theory setup" 
-    (OuterKeyword.tag_ml OuterKeyword.thy_decl)
-      (OuterParse.ML_source >> (fn (txt, pos) => 
+  Outer_Syntax.command "setup" "ML theory setup" 
+    (Keyword.tag_ml Keyword.thy_decl)
+      (Parse.ML_source >> (fn (txt, pos) => 
         (Toplevel.theory (IsarCmd.global_setup (txt, pos) #> write_file_setup_blk txt))))
 *}
 
-ML {* Context.proof_map *}
-ML {* IsarCmd.local_setup ; Context.map_proof (write_file_lsetup_blk "") *}
-
-
 ML {*
 val _ =
-  OuterSyntax.local_theory "local_setup" "ML local theory setup" 
-    (OuterKeyword.tag_ml OuterKeyword.thy_decl)
-      (OuterParse.ML_source >> (fn (txt, pos) => 
+  Outer_Syntax.local_theory "local_setup" "ML local theory setup" 
+    (Keyword.tag_ml Keyword.thy_decl)
+      (Parse.ML_source >> (fn (txt, pos) => 
          IsarCmd.local_setup (txt, pos) #> write_file_lsetup_blk txt));
 *}
 
--- a/ProgTutorial/Helper/Command/Command.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Helper/Command/Command.thy	Thu May 27 10:39:07 2010 +0200
@@ -5,9 +5,9 @@
 ML {*
 let
    val do_nothing = Scan.succeed (Local_Theory.theory I)
-   val kind = OuterKeyword.thy_decl
+   val kind = Keyword.thy_decl
 in
-   OuterSyntax.local_theory "foobar" "description of foobar" kind do_nothing
+   Outer_Syntax.local_theory "foobar" "description of foobar" kind do_nothing
 end
 *}
 
@@ -15,10 +15,10 @@
 let
   fun trace_prop str =
      Local_Theory.theory (fn lthy => (tracing str; lthy))
-  val trace_prop_parser = OuterParse.prop >> trace_prop
-  val kind = OuterKeyword.thy_decl
+  val trace_prop_parser = Parse.prop >> trace_prop
+  val kind = Keyword.thy_decl
 in
-  OuterSyntax.local_theory "foobar_trace" "traces a proposition"
+  Outer_Syntax.local_theory "foobar_trace" "traces a proposition"
     kind trace_prop_parser
 end
 *}
@@ -31,10 +31,10 @@
      in
        Proof.theorem NONE (K I) [[(prop,[])]] lthy
      end;
-   val prove_prop_parser = OuterParse.prop >> prove_prop
-   val kind = OuterKeyword.thy_goal
+   val prove_prop_parser = Parse.prop >> prove_prop
+   val kind = Keyword.thy_goal
 in
-   OuterSyntax.local_theory_to_proof "foobar_goal" "proving a proposition"
+   Outer_Syntax.local_theory_to_proof "foobar_goal" "proving a proposition"
      kind prove_prop_parser
 end
 *}
@@ -54,22 +54,13 @@
      Proof.theorem NONE (after_qed thm_name) [[(trm,[])]] lthy
    end
 
-   val parser = SpecParse.opt_thm_name ":" -- OuterParse.ML_source
+   val parser = Parse_Spec.opt_thm_name ":" -- Parse.ML_source
  
 in
-   OuterSyntax.local_theory_to_proof "foobar_prove" "proving a proposition" 
-     OuterKeyword.thy_goal (parser >> setup_proof)
+   Outer_Syntax.local_theory_to_proof "foobar_prove" "proving a proposition" 
+     Keyword.thy_goal (parser >> setup_proof)
 end*}
 
 
-(*
-ML {*
-let
-   val do_nothing = Scan.succeed (Local_Theory.theory I)
-   val kind = OuterKeyword.thy_decl
-in
-   OuterSyntax.local_theory "simple_inductive" "description of foobar" kind do_nothing
-end
-*}*)
 
 end
--- a/ProgTutorial/Package/Ind_Interface.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Package/Ind_Interface.thy	Thu May 27 10:39:07 2010 +0200
@@ -61,12 +61,12 @@
 *}
 
 ML{*val spec_parser = 
-   OuterParse.fixes -- 
+   Parse.fixes -- 
    Scan.optional 
-     (OuterParse.$$$ "where" |--
-        OuterParse.!!! 
-          (OuterParse.enum1 "|" 
-             (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
+     (Parse.$$$ "where" |--
+        Parse.!!! 
+          (Parse.enum1 "|" 
+             (Parse_Spec.opt_thm_name ":" -- Parse.prop))) []*}
 
 text {*
   which we explained in Section~\ref{sec:parsingspecs}.  There is no code included 
@@ -95,8 +95,8 @@
 where
   accpartI: "(\<And>y. R y x \<Longrightarrow> accpart' y) \<Longrightarrow> accpart' x"
 (*<*)ML %no{*fun filtered_input str = 
-  filter OuterLex.is_proper (OuterSyntax.scan Position.none str) 
-fun parse p input = Scan.finite OuterLex.stopper (Scan.error p) input*}(*>*)
+  filter Token.is_proper (Outer_Syntax.scan Position.none str) 
+fun parse p input = Scan.finite Token.stopper (Scan.error p) input*}(*>*)
 text {*
   Note that in these definitions the parameter @{text R}, standing for the
   relation, is left implicit. For the moment we will ignore this kind
@@ -133,13 +133,13 @@
   spec_parser >>
     (fn (pred_specs, rule_specs) => add_inductive_cmd pred_specs rule_specs)
 
-val _ = OuterSyntax.local_theory "simple_inductive2" 
+val _ = Outer_Syntax.local_theory "simple_inductive2" 
           "definition of simple inductive predicates"
-             OuterKeyword.thy_decl specification*}
+             Keyword.thy_decl specification*}
 
 text {*
-  We call @{ML_ind  local_theory in OuterSyntax} with the kind-indicator 
-  @{ML_ind  thy_decl in OuterKeyword} since the package does not need to open 
+  We call @{ML_ind  local_theory in Outer_Syntax} with the kind-indicator 
+  @{ML_ind  thy_decl in Keyword} since the package does not need to open 
   up any proof (see Section~\ref{sec:newcommand}). 
   The auxiliary function @{text specification} in Lines 1 to 3
   gathers the information from the parser to be processed further
--- a/ProgTutorial/Package/simple_inductive_package.ML	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Package/simple_inductive_package.ML	Thu May 27 10:39:07 2010 +0200
@@ -220,14 +220,14 @@
 
 (* @chunk parser *)
 val spec_parser = 
-   OuterParse.opt_target --
-   OuterParse.fixes -- 
-   OuterParse.for_fixes --
+   Parse.opt_target --
+   Parse.fixes -- 
+   Parse.for_fixes --
    Scan.optional 
-     (OuterParse.$$$ "where" |--
-        OuterParse.!!! 
-          (OuterParse.enum1 "|" 
-             (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []
+     (Parse.$$$ "where" |--
+        Parse.!!! 
+          (Parse.enum1 "|" 
+             (Parse_Spec.opt_thm_name ":" -- Parse.prop))) []
 (* @end *)
 
 (* @chunk syntax *)
@@ -236,8 +236,8 @@
     (fn (((loc, preds), params), specs) =>
       Toplevel.local_theory loc (add_inductive preds params specs))
 
-val _ = OuterSyntax.command "simple_inductive" "define inductive predicates"
-          OuterKeyword.thy_decl specification
+val _ = Outer_Syntax.command "simple_inductive" "define inductive predicates"
+          Keyword.thy_decl specification
 (* @end *)
 
 end;
--- a/ProgTutorial/Parsing.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Parsing.thy	Thu May 27 10:39:07 2010 +0200
@@ -40,7 +40,7 @@
   defined in the structure @{ML_struct Scan} in the file @{ML_file
   "Pure/General/scan.ML"}. The second part of the library consists of
   combinators for dealing with specific token types, which are defined in the
-  structure @{ML_struct OuterParse} in the file @{ML_file
+  structure @{ML_struct Parse} in the file @{ML_file
   "Pure/Isar/parse.ML"}. In addition specific parsers for packages are 
   defined in @{ML_file "Pure/Isar/parse_spec.ML"}. Parsers for method arguments 
   are defined in @{ML_file "Pure/Isar/args.ML"}.
@@ -244,7 +244,7 @@
   "Scan.error (!! (fn _ => \"foo\") ($$ \"h\"))"
   "Exception Error \"foo\" raised"}
 
-  This ``prefixing'' is usually done by wrappers such as @{ML_ind local_theory in OuterSyntax} 
+  This ``prefixing'' is usually done by wrappers such as @{ML_ind local_theory in Outer_Syntax} 
   (see Section~\ref{sec:newcommand} which explains this function in more detail). 
   
   Let us now return to our example of parsing @{ML "(p -- q) || r" for p q
@@ -471,31 +471,31 @@
   tokens, not strings.  These token parsers have the type:
 *}
   
-ML{*type 'a parser = OuterLex.token list -> 'a * OuterLex.token list*}
+ML{*type 'a parser = Token.T list -> 'a * Token.T list*}
 
 text {*
   The reason for using token parsers is that theory syntax, as well as the
   parsers for the arguments of proof methods, use the type @{ML_type
-  OuterLex.token}.
+  Token.T}.
 
   \begin{readmore}
   The parser functions for the theory syntax are contained in the structure
-  @{ML_struct OuterParse} defined in the file @{ML_file  "Pure/Isar/parse.ML"}.
-  The definition for tokens is in the file @{ML_file "Pure/Isar/outer_lex.ML"}.
+  @{ML_struct Parse} defined in the file @{ML_file  "Pure/Isar/parse.ML"}.
+  The definition for tokens is in the file @{ML_file "Pure/Isar/token.ML"}.
   \end{readmore}
 
-  The structure @{ML_struct  OuterLex} defines several kinds of tokens (for
-  example @{ML_ind Ident in OuterLex} for identifiers, @{ML Keyword in
-  OuterLex} for keywords and @{ML_ind Command in OuterLex} for commands). Some
+  The structure @{ML_struct  Token} defines several kinds of tokens (for
+  example @{ML_ind Ident in Token} for identifiers, @{ML Keyword in
+  Token} for keywords and @{ML_ind Command in Token} for commands). Some
   token parsers take into account the kind of tokens. The first example shows
   how to generate a token list out of a string using the function 
-  @{ML_ind scan in OuterSyntax}. It is given the argument 
+  @{ML_ind scan in Outer_Syntax}. It is given the argument 
   @{ML "Position.none"} since,
   at the moment, we are not interested in generating precise error
   messages. The following code
 
 
-@{ML_response_fake [display,gray] "OuterSyntax.scan Position.none \"hello world\"" 
+@{ML_response_fake [display,gray] "Outer_Syntax.scan Position.none \"hello world\"" 
 "[Token (\<dots>,(Ident, \"hello\"),\<dots>), 
  Token (\<dots>,(Space, \" \"),\<dots>), 
  Token (\<dots>,(Ident, \"world\"),\<dots>)]"}
@@ -505,29 +505,29 @@
   other syntactic category. The second indicates a space.
 
   We can easily change what is recognised as a keyword with the function
-  @{ML_ind keyword in OuterKeyword}. For example calling it with 
+  @{ML_ind keyword in Keyword}. For example calling it with 
 *}
 
-ML{*val _ = OuterKeyword.keyword "hello"*}
+ML{*val _ = Keyword.keyword "hello"*}
 
 text {*
   then lexing @{text [quotes] "hello world"} will produce
 
-  @{ML_response_fake [display,gray] "OuterSyntax.scan Position.none \"hello world\"" 
+  @{ML_response_fake [display,gray] "Outer_Syntax.scan Position.none \"hello world\"" 
 "[Token (\<dots>,(Keyword, \"hello\"),\<dots>), 
  Token (\<dots>,(Space, \" \"),\<dots>), 
  Token (\<dots>,(Ident, \"world\"),\<dots>)]"}
 
   Many parsing functions later on will require white space, comments and the like
   to have already been filtered out.  So from now on we are going to use the 
-  functions @{ML filter} and @{ML_ind is_proper in OuterLex} to do this. 
+  functions @{ML filter} and @{ML_ind is_proper in Token} to do this. 
   For example:
 
 @{ML_response_fake [display,gray]
 "let
-   val input = OuterSyntax.scan Position.none \"hello world\"
+   val input = Outer_Syntax.scan Position.none \"hello world\"
 in
-   filter OuterLex.is_proper input
+   filter Token.is_proper input
 end" 
 "[Token (\<dots>,(Ident, \"hello\"), \<dots>), Token (\<dots>,(Ident, \"world\"), \<dots>)]"}
 
@@ -535,7 +535,7 @@
 *}
 
 ML{*fun filtered_input str = 
-  filter OuterLex.is_proper (OuterSyntax.scan Position.none str) *}
+  filter Token.is_proper (Outer_Syntax.scan Position.none str) *}
 
 text {* 
   If you now parse
@@ -552,7 +552,7 @@
 
 @{ML_response_fake [display,gray] 
 "let 
-  val (keywords, commands) = OuterKeyword.get_lexicons ()
+  val (keywords, commands) = Keyword.get_lexicons ()
 in 
   (Scan.dest_lexicon commands, Scan.dest_lexicon keywords)
 end" 
@@ -561,23 +561,23 @@
   You might have to adjust the @{ML_ind print_depth} in order to
   see the complete list.
 
-  The parser @{ML_ind "$$$" in OuterParse} parses a single keyword. For example:
+  The parser @{ML_ind "$$$" in Parse} parses a single keyword. For example:
 
 @{ML_response [display,gray]
 "let 
   val input1 = filtered_input \"where for\"
   val input2 = filtered_input \"| in\"
 in 
-  (OuterParse.$$$ \"where\" input1, OuterParse.$$$ \"|\" input2)
+  (Parse.$$$ \"where\" input1, Parse.$$$ \"|\" input2)
 end"
 "((\"where\",\<dots>), (\"|\",\<dots>))"}
 
-  Any non-keyword string can be parsed with the function @{ML_ind reserved in OuterParse}.
+  Any non-keyword string can be parsed with the function @{ML_ind reserved in Parse}.
   For example:
 
   @{ML_response [display,gray]
 "let 
-  val p = OuterParse.reserved \"bar\"
+  val p = Parse.reserved \"bar\"
   val input = filtered_input \"bar\"
 in
   p input
@@ -590,11 +590,11 @@
 "let 
   val input = filtered_input \"| in\"
 in 
-  (OuterParse.$$$ \"|\" -- OuterParse.$$$ \"in\") input
+  (Parse.$$$ \"|\" -- Parse.$$$ \"in\") input
 end"
 "((\"|\", \"in\"), [])"}
 
-  The parser @{ML "OuterParse.enum s p" for s p} parses a possibly empty 
+  The parser @{ML "Parse.enum s p" for s p} parses a possibly empty 
   list of items recognised by the parser @{text p}, where the items being parsed
   are separated by the string @{text s}. For example:
 
@@ -602,45 +602,45 @@
 "let 
   val input = filtered_input \"in | in | in foo\"
 in 
-  (OuterParse.enum \"|\" (OuterParse.$$$ \"in\")) input
+  (Parse.enum \"|\" (Parse.$$$ \"in\")) input
 end" 
 "([\"in\", \"in\", \"in\"], [\<dots>])"}
 
-  The function @{ML_ind enum1 in OuterParse} works similarly, except that the
+  The function @{ML_ind enum1 in Parse} works similarly, except that the
   parsed list must be non-empty. Note that we had to add a string @{text
   [quotes] "foo"} at the end of the parsed string, otherwise the parser would
   have consumed all tokens and then failed with the exception @{text
   "MORE"}. Like in the previous section, we can avoid this exception using the
   wrapper @{ML Scan.finite}. This time, however, we have to use the
-  ``stopper-token'' @{ML OuterLex.stopper}. We can write:
+  ``stopper-token'' @{ML Token.stopper}. We can write:
 
 @{ML_response [display,gray]
 "let 
   val input = filtered_input \"in | in | in\"
-  val p = OuterParse.enum \"|\" (OuterParse.$$$ \"in\")
+  val p = Parse.enum \"|\" (Parse.$$$ \"in\")
 in 
-  Scan.finite OuterLex.stopper p input
+  Scan.finite Token.stopper p input
 end" 
 "([\"in\", \"in\", \"in\"], [])"}
 
   The following function will help to run examples.
 *}
 
-ML{*fun parse p input = Scan.finite OuterLex.stopper (Scan.error p) input *}
+ML{*fun parse p input = Scan.finite Token.stopper (Scan.error p) input *}
 
 text {*
-  The function @{ML_ind "!!!" in OuterParse} can be used to force termination
+  The function @{ML_ind "!!!" in Parse} can be used to force termination
   of the parser in case of a dead end, just like @{ML "Scan.!!"} (see previous
   section). A difference, however, is that the error message of @{ML
-  "OuterParse.!!!"} is fixed to be @{text [quotes] "Outer syntax error"}
+  "Parse.!!!"} is fixed to be @{text [quotes] "Outer syntax error"}
   together with a relatively precise description of the failure. For example:
 
 @{ML_response_fake [display,gray]
 "let 
   val input = filtered_input \"in |\"
-  val parse_bar_then_in = OuterParse.$$$ \"|\" -- OuterParse.$$$ \"in\"
+  val parse_bar_then_in = Parse.$$$ \"|\" -- Parse.$$$ \"in\"
 in 
-  parse (OuterParse.!!! parse_bar_then_in) input 
+  parse (Parse.!!! parse_bar_then_in) input 
 end"
 "Exception ERROR \"Outer syntax error: keyword \"|\" expected, 
 but keyword in was found\" raised"
@@ -648,8 +648,8 @@
 
   \begin{exercise} (FIXME)
   A type-identifier, for example @{typ "'a"}, is a token of 
-  kind @{ML_ind Keyword in OuterLex}. It can be parsed using 
-  the function @{ML type_ident in OuterParse}.
+  kind @{ML_ind Keyword in Token}. It can be parsed using 
+  the function @{ML type_ident in Parse}.
   \end{exercise}
 
   (FIXME: or give parser for numbers)
@@ -662,7 +662,7 @@
 *}
 
 ML{*fun filtered_input' str = 
-       filter OuterLex.is_proper (OuterSyntax.scan (Position.line 7) str) *}
+       filter Token.is_proper (Outer_Syntax.scan (Position.line 7) str) *}
 
 text {*
   where we pretend the parsed string starts on line 7. An example is
@@ -675,14 +675,14 @@
   in which the @{text [quotes] "\\n"} causes the second token to be in 
   line 8.
 
-  By using the parser @{ML position in OuterParse} you can access the token 
+  By using the parser @{ML position in Parse} you can access the token 
   position and return it as part of the parser result. For example
 
 @{ML_response_fake [display,gray]
 "let
   val input = filtered_input' \"where\"
 in 
-  parse (OuterParse.position (OuterParse.$$$ \"where\")) input
+  parse (Parse.position (Parse.$$$ \"where\")) input
 end"
 "((\"where\", {line=7, end_line=7}), [])"}
 
@@ -712,7 +712,7 @@
 section {* Parsers for ML-Code (TBD) *}
 
 text {*
-  @{ML_ind ML_source in OuterParse}
+  @{ML_ind ML_source in Parse}
 *}
 
 section {* Context Parser (TBD) *}
@@ -747,17 +747,17 @@
 text {*
   There is usually no need to write your own parser for parsing inner syntax, that is 
   for terms and  types: you can just call the predefined parsers. Terms can 
-  be parsed using the function @{ML_ind term in OuterParse}. For example:
+  be parsed using the function @{ML_ind term in Parse}. For example:
 
 @{ML_response [display,gray]
 "let 
-  val input = OuterSyntax.scan Position.none \"foo\"
+  val input = Outer_Syntax.scan Position.none \"foo\"
 in 
-  OuterParse.term input
+  Parse.term input
 end"
 "(\"\\^E\\^Ftoken\\^Efoo\\^E\\^F\\^E\", [])"}
 
-  The function @{ML_ind prop in OuterParse} is similar, except that it gives a different
+  The function @{ML_ind prop in Parse} is similar, except that it gives a different
   error message, when parsing fails. As you can see, the parser not just returns 
   the parsed string, but also some encoded information. You can decode the
   information with the function @{ML_ind parse in YXML} in @{ML_struct YXML}. For example
@@ -771,9 +771,9 @@
 
 @{ML_response [display,gray]
 "let 
-  val input = OuterSyntax.scan (Position.line 42) \"foo\"
+  val input = Outer_Syntax.scan (Position.line 42) \"foo\"
 in 
-  YXML.parse (fst (OuterParse.term input))
+  YXML.parse (fst (Parse.term input))
 end"
 "XML.Elem (\"token\", [(\"line\", \"42\"), (\"end_line\", \"42\")], [XML.Text \"foo\"])"}
   
@@ -815,12 +815,12 @@
 *}
 
 ML %linenosgray{*val spec_parser = 
-     OuterParse.fixes -- 
+     Parse.fixes -- 
      Scan.optional 
-       (OuterParse.$$$ "where" |--
-          OuterParse.!!! 
-            (OuterParse.enum1 "|" 
-               (SpecParse.opt_thm_name ":" -- OuterParse.prop))) []*}
+       (Parse.$$$ "where" |--
+          Parse.!!! 
+            (Parse.enum1 "|" 
+               (Parse_Spec.opt_thm_name ":" -- Parse.prop))) []*}
 
 text {*
   Note that the parser must not parse the keyword \simpleinductive, even if it is
@@ -851,7 +851,7 @@
   variables with optional type-annotation and syntax-annotation, and a list of
   rules where every rule has optionally a name and an attribute.
 
-  The function @{ML_ind "fixes" in OuterParse} in Line 2 of the parser reads an 
+  The function @{ML_ind "fixes" in Parse} in Line 2 of the parser reads an 
   \isacommand{and}-separated 
   list of variables that can include optional type annotations and syntax translations. 
   For example:\footnote{Note that in the code we need to write 
@@ -863,7 +863,7 @@
   val input = filtered_input 
         \"foo::\\\"int \<Rightarrow> bool\\\" and bar::nat (\\\"BAR\\\" 100) and blonk\"
 in
-  parse OuterParse.fixes input
+  parse Parse.fixes input
 end"
 "([(foo, SOME \"\\^E\\^Ftoken\\^Eint \<Rightarrow> bool\\^E\\^F\\^E\", NoSyn), 
   (bar, SOME \"\\^E\\^Ftoken\\^Enat\\^E\\^F\\^E\", Mixfix (\"BAR\", [], 100)), 
@@ -886,20 +886,20 @@
   Lines 3 to 7 in the function @{ML spec_parser} implement the parser for a
   list of introduction rules, that is propositions with theorem annotations
   such as rule names and attributes. The introduction rules are propositions
-  parsed by @{ML_ind prop in OuterParse}. However, they can include an optional
+  parsed by @{ML_ind prop in Parse}. However, they can include an optional
   theorem name plus some attributes. For example
 
 @{ML_response [display,gray] "let 
   val input = filtered_input \"foo_lemma[intro,dest!]:\"
-  val ((name, attrib), _) = parse (SpecParse.thm_name \":\") input 
+  val ((name, attrib), _) = parse (Parse_Spec.thm_name \":\") input 
 in 
   (name, map Args.dest_src attrib)
 end" "(foo_lemma, [((\"intro\", []), \<dots>), ((\"dest\", [\<dots>]), \<dots>)])"}
  
-  The function @{ML_ind opt_thm_name in SpecParse} is the ``optional'' variant of
-  @{ML_ind thm_name in SpecParse}. Theorem names can contain attributes. The name 
+  The function @{ML_ind opt_thm_name in Parse_Spec} is the ``optional'' variant of
+  @{ML_ind thm_name in Parse_Spec}. Theorem names can contain attributes. The name 
   has to end with @{text [quotes] ":"}---see the argument of 
-  the function @{ML SpecParse.opt_thm_name} in Line 7.
+  the function @{ML Parse_Spec.opt_thm_name} in Line 7.
 
   \begin{readmore}
   Attributes and arguments are implemented in the files @{ML_file "Pure/Isar/attrib.ML"} 
@@ -909,23 +909,23 @@
 
 text_raw {*
   \begin{exercise}
-  Have a look at how the parser @{ML SpecParse.where_alt_specs} is implemented
+  Have a look at how the parser @{ML Parse_Spec.where_alt_specs} is implemented
   in file @{ML_file "Pure/Isar/parse_spec.ML"}. This parser corresponds
   to the ``where-part'' of the introduction rules given above. Below
-  we paraphrase the code of @{ML_ind where_alt_specs in SpecParse} adapted to our
+  we paraphrase the code of @{ML_ind where_alt_specs in Parse_Spec} adapted to our
   purposes. 
   \begin{isabelle}
 *}
 ML %linenosgray{*val spec_parser' = 
-     OuterParse.fixes -- 
+     Parse.fixes -- 
      Scan.optional
-     (OuterParse.$$$ "where" |-- 
-        OuterParse.!!! 
-          (OuterParse.enum1 "|" 
-             ((SpecParse.opt_thm_name ":" -- OuterParse.prop) --| 
-                  Scan.option (Scan.ahead (OuterParse.name || 
-                  OuterParse.$$$ "[") -- 
-                  OuterParse.!!! (OuterParse.$$$ "|"))))) [] *}
+     (Parse.$$$ "where" |-- 
+        Parse.!!! 
+          (Parse.enum1 "|" 
+             ((Parse_Spec.opt_thm_name ":" -- Parse.prop) --| 
+                  Scan.option (Scan.ahead (Parse.name || 
+                  Parse.$$$ "[") -- 
+                  Parse.!!! (Parse.$$$ "|"))))) [] *}
 text_raw {*
   \end{isabelle}
   Both parsers accept the same input% that's not true:
@@ -937,7 +937,7 @@
 *}
 
 text {*
-  (FIXME: @{ML OuterParse.type_args}, @{ML OuterParse.typ}, @{ML OuterParse.opt_mixfix})
+  (FIXME: @{ML Parse.type_args}, @{ML Parse.typ}, @{ML Parse.opt_mixfix})
 *}
 
 
@@ -957,13 +957,13 @@
 
 ML{*let
   val do_nothing = Scan.succeed (Local_Theory.theory I)
-  val kind = OuterKeyword.thy_decl
+  val kind = Keyword.thy_decl
 in
-  OuterSyntax.local_theory "foobar" "description of foobar" kind do_nothing
+  Outer_Syntax.local_theory "foobar" "description of foobar" kind do_nothing
 end *}
 
 text {*
-  The crucial function @{ML_ind local_theory in OuterSyntax} expects a name for the command, a
+  The crucial function @{ML_ind local_theory in Outer_Syntax} expects a name for the command, a
   short description, a kind indicator (which we will explain later more thoroughly) and a
   parser producing a local theory transition (its purpose will also explained
   later). 
@@ -994,9 +994,9 @@
   @{ML
 "let
   val do_nothing = Scan.succeed (Local_Theory.theory I)
-  val kind = OuterKeyword.thy_decl
+  val kind = Keyword.thy_decl
 in
-  OuterSyntax.local_theory \"foobar\" \"description of foobar\" kind do_nothing
+  Outer_Syntax.local_theory \"foobar\" \"description of foobar\" kind do_nothing
 end"}\\
   @{text "\<verbclose>"}\\
   \isacommand{end}
@@ -1097,10 +1097,10 @@
   nothing. The point of this command is only to show the procedure of how
   to interact with ProofGeneral. A similar procedure has to be done with any 
   other new command, and also any new keyword that is introduced with 
-  the function @{ML_ind keyword in OuterKeyword}. For example:
+  the function @{ML_ind keyword in Keyword}. For example:
 *}
 
-ML{*val _ = OuterKeyword.keyword "blink" *}
+ML{*val _ = Keyword.keyword "blink" *}
 
 text {*
   At the moment the command \isacommand{foobar} is not very useful. Let us
@@ -1112,7 +1112,7 @@
   because of @{ML_ind succeed in Scan} does not parse any argument, but immediately
   returns the simple function @{ML "Local_Theory.theory I"}. We can
   replace this code by a function that first parses a proposition (using the
-  parser @{ML OuterParse.prop}), then prints out the tracing
+  parser @{ML Parse.prop}), then prints out the tracing
   information (using a new function @{text trace_prop}) and 
   finally does nothing. For this you can write:
 *}
@@ -1121,10 +1121,10 @@
   fun trace_prop str = 
      Local_Theory.theory (fn ctxt => (tracing str; ctxt))
 
-  val kind = OuterKeyword.thy_decl
+  val kind = Keyword.thy_decl
 in
-  OuterSyntax.local_theory "foobar_trace" "traces a proposition" 
-    kind (OuterParse.prop >> trace_prop)
+  Outer_Syntax.local_theory "foobar_trace" "traces a proposition" 
+    kind (Parse.prop >> trace_prop)
 end *}
 
 text {*
@@ -1135,7 +1135,7 @@
 foobar_trace "True \<and> False"
 
 text {*
-  Note that so far we used @{ML_ind thy_decl in OuterKeyword} as the kind
+  Note that so far we used @{ML_ind thy_decl in Keyword} as the kind
   indicator for the command.  This means that the command finishes as soon as
   the arguments are processed. Examples of this kind of commands are
   \isacommand{definition} and \isacommand{declare}.  In other cases, commands
@@ -1143,16 +1143,16 @@
   ``open up'' a proof in order to prove the proposition (for example
   \isacommand{lemma}) or prove some other properties (for example
   \isacommand{function}). To achieve this kind of behaviour, you have to use
-  the kind indicator @{ML_ind thy_goal in OuterKeyword} and the function @{ML
-  "local_theory_to_proof" in OuterSyntax} to set up the command.  Note,
+  the kind indicator @{ML_ind thy_goal in Keyword} and the function @{ML
+  "local_theory_to_proof" in Outer_Syntax} to set up the command.  Note,
   however, once you change the ``kind'' of a command from @{ML thy_decl in
-  OuterKeyword} to @{ML thy_goal in OuterKeyword} then the keyword file needs
+  Keyword} to @{ML thy_goal in Keyword} then the keyword file needs
   to be re-created!
 
   Below we show the command \isacommand{foobar\_goal} which takes a
   proposition as argument and then starts a proof in order to prove
   it. Therefore in Line 9, we set the kind indicator to @{ML thy_goal in
-  OuterKeyword}.
+  Keyword}.
 *}
 
 ML%linenosgray{*let
@@ -1163,10 +1163,10 @@
       Proof.theorem NONE (K I) [[(prop, [])]] lthy
     end
   
-  val kind = OuterKeyword.thy_goal
+  val kind = Keyword.thy_goal
 in
-  OuterSyntax.local_theory_to_proof "foobar_goal" "proves a proposition" 
-    kind (OuterParse.prop >> goal_prop)
+  Outer_Syntax.local_theory_to_proof "foobar_goal" "proves a proposition" 
+    kind (Parse.prop >> goal_prop)
 end *}
 
 text {*
@@ -1214,10 +1214,10 @@
      Proof.theorem NONE (after_qed thm_name) [[(trm,[])]] lthy
    end
 
-   val parser = SpecParse.opt_thm_name ":" -- OuterParse.ML_source
+   val parser = Parse_Spec.opt_thm_name ":" -- Parse.ML_source
 in
-   OuterSyntax.local_theory_to_proof "foobar_prove" "proving a proposition" 
-     OuterKeyword.thy_goal (parser >> setup_proof)
+   Outer_Syntax.local_theory_to_proof "foobar_prove" "proving a proposition" 
+     Keyword.thy_goal (parser >> setup_proof)
 end*}
 
 foobar_prove test: {* @{prop "True"} *}
@@ -1284,7 +1284,7 @@
 *}
 
 setup {*
-Method.setup (Binding.name "tactic3") (Scan.lift (OuterParse.position Args.name) 
+Method.setup (Binding.name "tactic3") (Scan.lift (Parse.position Args.name) 
   >> tactic3)
     "ML tactic as proof method"
 *}
@@ -1456,673 +1456,4 @@
   (FIXME: explain a version of rule-tac)
 *}
 
-(*<*)
-(* THIS IS AN OLD VERSION OF THE PARSING CHAPTER BY JEREMY DAWSON *)
-chapter {* Parsing *}
-
-text {*
-
-  Lots of Standard ML code is given in this document, for various reasons,
-  including:
-  \begin{itemize}
-  \item direct quotation of code found in the Isabelle source files,
-  or simplified versions of such code
-  \item identifiers found in the Isabelle source code, with their types 
-  (or specialisations of their types)
-  \item code examples, which can be run by the reader, to help illustrate the
-  behaviour of functions found in the Isabelle source code
-  \item ancillary functions, not from the Isabelle source code, 
-  which enable the reader to run relevant code examples
-  \item type abbreviations, which help explain the uses of certain functions
-  \end{itemize}
-
-*}
-
-section {* Parsing Isar input *}
-
-text {*
-
-  The typical parsing function has the type
-  \texttt{'src -> 'res * 'src}, with input  
-  of type \texttt{'src}, returning a result 
-  of type \texttt{'res}, which is (or is derived from) the first part of the
-  input, and also returning the remainder of the input.
-  (In the common case, when it is clear what the ``remainder of the input''
-  means, we will just say that the functions ``returns'' the
-  value of type \texttt{'res}). 
-  An exception is raised if an appropriate value 
-  cannot be produced from the input.
-  A range of exceptions can be used to identify different reasons 
-  for the failure of a parse.
-  
-  This contrasts the standard parsing function in Standard ML,
-  which is of type 
-  \texttt{type ('res, 'src) reader = 'src -> ('res * 'src) option};
-  (for example, \texttt{List.getItem} and \texttt{Substring.getc}).
-  However, much of the discussion at 
-  FIX file:/home/jeremy/html/ml/SMLBasis/string-cvt.html
-  is relevant.
-
-  Naturally one may convert between the two different sorts of parsing functions
-  as follows:
-  \begin{verbatim}
-  open StringCvt ;
-  type ('res, 'src) ex_reader = 'src -> 'res * 'src
-  ex_reader : ('res, 'src) reader -> ('res, 'src) ex_reader 
-  fun ex_reader rdr src = Option.valOf (rdr src) ;
-  reader : ('res, 'src) ex_reader -> ('res, 'src) reader 
-  fun reader exrdr src = SOME (exrdr src) handle _ => NONE ;
-  \end{verbatim}
-  
-*}
-
-section{* The \texttt{Scan} structure *}
-
-text {* 
-  The source file is \texttt{src/General/scan.ML}.
-  This structure provides functions for using and combining parsing functions
-  of the type \texttt{'src -> 'res * 'src}.
-  Three exceptions are used:
-  \begin{verbatim}
-  exception MORE of string option;  (*need more input (prompt)*)
-  exception FAIL of string option;  (*try alternatives (reason of failure)*)
-  exception ABORT of string;        (*dead end*)
-  \end{verbatim}
-  Many functions in this structure (generally those with names composed of
-  symbols) are declared as infix.
-
-  Some functions from that structure are
-  \begin{verbatim}
-  |-- : ('src -> 'res1 * 'src') * ('src' -> 'res2 * 'src'') ->
-  'src -> 'res2 * 'src''
-  --| : ('src -> 'res1 * 'src') * ('src' -> 'res2 * 'src'') ->
-  'src -> 'res1 * 'src''
-  -- : ('src -> 'res1 * 'src') * ('src' -> 'res2 * 'src'') ->
-  'src -> ('res1 * 'res2) * 'src''
-  ^^ : ('src -> string * 'src') * ('src' -> string * 'src'') ->
-  'src -> string * 'src''
-  \end{verbatim}
-  These functions parse a result off the input source twice.
-
-  \texttt{|--} and \texttt{--|} 
-  return the first result and the second result, respectively.
-
-  \texttt{--} returns both.
-
-  \verb|^^| returns the result of concatenating the two results
-  (which must be strings).
-
-  Note how, although the types 
-  \texttt{'src}, \texttt{'src'} and \texttt{'src''} will normally be the same,
-  the types as shown help suggest the behaviour of the functions.
-  \begin{verbatim}
-  :-- : ('src -> 'res1 * 'src') * ('res1 -> 'src' -> 'res2 * 'src'') ->
-  'src -> ('res1 * 'res2) * 'src''
-  :|-- : ('src -> 'res1 * 'src') * ('res1 -> 'src' -> 'res2 * 'src'') ->
-  'src -> 'res2 * 'src''
-  \end{verbatim}
-  These are similar to \texttt{|--} and \texttt{--|},
-  except that the second parsing function can depend on the result of the first.
-  \begin{verbatim}
-  >> : ('src -> 'res1 * 'src') * ('res1 -> 'res2) -> 'src -> 'res2 * 'src'
-  || : ('src -> 'res_src) * ('src -> 'res_src) -> 'src -> 'res_src
-  \end{verbatim}
-  \texttt{p >> f} applies a function \texttt{f} to the result of a parse.
-  
-  \texttt{||} tries a second parsing function if the first one
-  fails by raising an exception of the form \texttt{FAIL \_}.
-  
-  \begin{verbatim}
-  succeed : 'res -> ('src -> 'res * 'src) ;
-  fail : ('src -> 'res_src) ;
-  !! : ('src * string option -> string) -> 
-  ('src -> 'res_src) -> ('src -> 'res_src) ;
-  \end{verbatim}
-  \texttt{succeed r} returns \texttt{r}, with the input unchanged.
-  \texttt{fail} always fails, raising exception \texttt{FAIL NONE}.
-  \texttt{!! f} only affects the failure mode, turning a failure that
-  raises \texttt{FAIL \_} into a failure that raises \texttt{ABORT ...}.
-  This is used to prevent recovery from the failure ---
-  thus, in \texttt{!! parse1 || parse2}, if \texttt{parse1} fails, 
-  it won't recover by trying \texttt{parse2}.
-
-  \begin{verbatim}
-  one : ('si -> bool) -> ('si list -> 'si * 'si list) ;
-  some : ('si -> 'res option) -> ('si list -> 'res * 'si list) ;
-  \end{verbatim}
-  These require the input to be a list of items:
-  they fail, raising \texttt{MORE NONE} if the list is empty.
-  On other failures they raise \texttt{FAIL NONE} 
-
-  \texttt{one p} takes the first
-  item from the list if it satisfies \texttt{p}, otherwise fails.
-
-  \texttt{some f} takes the first
-  item from the list and applies \texttt{f} to it, failing if this returns
-  \texttt{NONE}.  
-
-  \begin{verbatim}
-  many : ('si -> bool) -> 'si list -> 'si list * 'si list ; 
-  \end{verbatim}
-  \texttt{many p} takes items from the input until it encounters one 
-  which does not satisfy \texttt{p}.  If it reaches the end of the input
-  it fails, raising \texttt{MORE NONE}.
-
-  \texttt{many1} (with the same type) fails if the first item 
-  does not satisfy \texttt{p}.  
-
-  \begin{verbatim}
-  option : ('src -> 'res * 'src) -> ('src -> 'res option * 'src)
-  optional : ('src -> 'res * 'src) -> 'res -> ('src -> 'res * 'src)
-  \end{verbatim}
-  \texttt{option}: 
-  where the parser \texttt{f} succeeds with result \texttt{r} 
-  or raises \texttt{FAIL \_},
-  \texttt{option f} gives the result \texttt{SOME r} or \texttt{NONE}.
-  
-  \texttt{optional}: if parser \texttt{f} fails by raising \texttt{FAIL \_},
-  \texttt{optional f default} provides the result \texttt{default}.
-
-  \begin{verbatim}
-  repeat : ('src -> 'res * 'src) -> 'src -> 'res list * 'src
-  repeat1 : ('src -> 'res * 'src) -> 'src -> 'res list * 'src
-  bulk : ('src -> 'res * 'src) -> 'src -> 'res list * 'src 
-  \end{verbatim}
-  \texttt{repeat f} repeatedly parses an item off the remaining input until 
-  \texttt{f} fails with \texttt{FAIL \_}
-
-  \texttt{repeat1} is as for \texttt{repeat}, but requires at least one
-  successful parse.
-
-  \begin{verbatim}
-  lift : ('src -> 'res * 'src) -> ('ex * 'src -> 'res * ('ex * 'src))
-  \end{verbatim}
-  \texttt{lift} changes the source type of a parser by putting in an extra
-  component \texttt{'ex}, which is ignored in the parsing.
-
-  The \texttt{Scan} structure also provides the type \texttt{lexicon}, 
-  HOW DO THEY WORK ?? TO BE COMPLETED
-  \begin{verbatim}
-  dest_lexicon: lexicon -> string list ;
-  make_lexicon: string list list -> lexicon ;
-  empty_lexicon: lexicon ;
-  extend_lexicon: string list list -> lexicon -> lexicon ;
-  merge_lexicons: lexicon -> lexicon -> lexicon ;
-  is_literal: lexicon -> string list -> bool ;
-  literal: lexicon -> string list -> string list * string list ;
-  \end{verbatim}
-  Two lexicons, for the commands and keywords, are stored and can be retrieved
-  by:
-  \begin{verbatim}
-  val (command_lexicon, keyword_lexicon) = OuterSyntax.get_lexicons () ;
-  val commands = Scan.dest_lexicon command_lexicon ;
-  val keywords = Scan.dest_lexicon keyword_lexicon ;
-  \end{verbatim}
-*}
-
-section{* The \texttt{OuterLex} structure *}
-
-text {*
-  The source file is @{text "src/Pure/Isar/outer_lex.ML"}.
-  In some other source files its name is abbreviated:
-  \begin{verbatim}
-  structure T = OuterLex;
-  \end{verbatim}
-  This structure defines the type \texttt{token}.
-  (The types
-  \texttt{OuterLex.token},
-  \texttt{OuterParse.token} and
-  \texttt{SpecParse.token} are all the same).
-  
-  Input text is split up into tokens, and the input source type for many parsing
-  functions is \texttt{token list}.
-
-  The datatype definition (which is not published in the signature) is
-  \begin{verbatim}
-  datatype token = Token of Position.T * (token_kind * string);
-  \end{verbatim}
-  but here are some runnable examples for viewing tokens: 
-
-*}
-
-
-
-
-ML{*
-  val toks = OuterSyntax.scan Position.none
-   "theory,imports;begin x.y.z apply ?v1 ?'a 'a -- || 44 simp (* xx *) { * fff * }" ;
-*}
-
-ML{*
-  print_depth 20 ;
-*}
-
-ML{*
-  map OuterLex.text_of toks ;
-*}
-
-ML{*
-  val proper_toks = filter OuterLex.is_proper toks ;
-*}  
-
-ML{*
-  map OuterLex.kind_of proper_toks 
-*}
-
-ML{*
-  map OuterLex.unparse proper_toks ;
-*}
-
-ML{*
-  OuterLex.stopper
-*}
-
-text {*
-
-  The function \texttt{is\_proper : token -> bool} identifies tokens which are
-  not white space or comments: many parsing functions assume require spaces or
-  comments to have been filtered out.
-  
-  There is a special end-of-file token:
-  \begin{verbatim}
-  val (tok_eof : token, is_eof : token -> bool) = T.stopper ; 
-  (* end of file token *)
-  \end{verbatim}
-
-*}
-
-section {* The \texttt{OuterParse} structure *}
-
-text {*
-  The source file is \texttt{src/Pure/Isar/outer\_parse.ML}.
-  In some other source files its name is abbreviated:
-  \begin{verbatim}
-  structure P = OuterParse;
-  \end{verbatim}
-  Here the parsers use \texttt{token list} as the input source type. 
-  
-  Some of the parsers simply select the first token, provided that it is of the
-  right kind (as returned by \texttt{T.kind\_of}): these are 
-  \texttt{ command, keyword, short\_ident, long\_ident, sym\_ident, term\_var,
-  type\_ident, type\_var, number, string, alt\_string, verbatim, sync, eof}
-  Others select the first token, provided that it is one of several kinds,
-  (eg, \texttt{name, xname, text, typ}).
-
-  \begin{verbatim}
-  type 'a tlp = token list -> 'a * token list ; (* token list parser *)
-  $$$ : string -> string tlp
-  nat : int tlp ;
-  maybe : 'a tlp -> 'a option tlp ;
-  \end{verbatim}
-
-  \texttt{\$\$\$ s} returns the first token,
-  if it equals \texttt{s} \emph{and} \texttt{s} is a keyword.
-
-  \texttt{nat} returns the first token, if it is a number, and evaluates it.
-
-  \texttt{maybe}: if \texttt{p} returns \texttt{r}, 
-  then \texttt{maybe p} returns \texttt{SOME r} ;
-  if the first token is an underscore, it returns \texttt{NONE}.
-
-  A few examples:
-  \begin{verbatim}
-  P.list : 'a tlp -> 'a list tlp ; (* likewise P.list1 *)
-  P.and_list : 'a tlp -> 'a list tlp ; (* likewise P.and_list1 *)
-  val toks : token list = OuterSyntax.scan "44 ,_, 66,77" ;
-  val proper_toks = List.filter T.is_proper toks ;
-  P.list P.nat toks ; (* OK, doesn't recognize white space *)
-  P.list P.nat proper_toks ; (* fails, doesn't recognize what follows ',' *)
-  P.list (P.maybe P.nat) proper_toks ; (* fails, end of input *)
-  P.list (P.maybe P.nat) (proper_toks @ [tok_eof]) ; (* OK *)
-  val toks : token list = OuterSyntax.scan "44 and 55 and 66 and 77" ;
-  P.and_list P.nat (List.filter T.is_proper toks @ [tok_eof]) ; (* ??? *)
-  \end{verbatim}
-
-  The following code helps run examples:
-  \begin{verbatim}
-  fun parse_str tlp str = 
-  let val toks : token list = OuterSyntax.scan str ;
-  val proper_toks = List.filter T.is_proper toks @ [tok_eof] ;
-  val (res, rem_toks) = tlp proper_toks ;
-  val rem_str = String.concat
-  (Library.separate " " (List.map T.unparse rem_toks)) ;
-  in (res, rem_str) end ;
-  \end{verbatim}
-
-  Some examples from \texttt{src/Pure/Isar/outer\_parse.ML}
-  \begin{verbatim}
-  val type_args =
-  type_ident >> Library.single ||
-  $$$ "(" |-- !!! (list1 type_ident --| $$$ ")") ||
-  Scan.succeed [];
-  \end{verbatim}
-  There are three ways parsing a list of type arguments can succeed.
-  The first line reads a single type argument, and turns it into a singleton
-  list.
-  The second line reads "(", and then the remainder, ignoring the "(" ;
-  the remainder consists of a list of type identifiers (at least one),
-  and then a ")" which is also ignored.
-  The \texttt{!!!} ensures that if the parsing proceeds this far and then fails,
-  it won't try the third line (see the description of \texttt{Scan.!!}).
-  The third line consumes no input and returns the empty list.
-
-  \begin{verbatim}
-  fun triple2 (x, (y, z)) = (x, y, z);
-  val arity = xname -- ($$$ "::" |-- !!! (
-  Scan.optional ($$$ "(" |-- !!! (list1 sort --| $$$ ")")) []
-  -- sort)) >> triple2;
-  \end{verbatim}
-  The parser \texttt{arity} reads a typename $t$, then ``\texttt{::}'' (which is
-  ignored), then optionally a list $ss$ of sorts and then another sort $s$.
-  The result $(t, (ss, s))$ is transformed by \texttt{triple2} to $(t, ss, s)$.
-  The second line reads the optional list of sorts:
-  it reads first ``\texttt{(}'' and last ``\texttt{)}'', which are both ignored,
-  and between them a comma-separated list of sorts.
-  If this list is absent, the default \texttt{[]} provides the list of sorts.
-
-  \begin{verbatim}
-  parse_str P.type_args "('a, 'b) ntyp" ;
-  parse_str P.type_args "'a ntyp" ;
-  parse_str P.type_args "ntyp" ;
-  parse_str P.arity "ty :: tycl" ;
-  parse_str P.arity "ty :: (tycl1, tycl2) tycl" ;
-  \end{verbatim}
-
-*}
-
-section {* The \texttt{SpecParse} structure *}
-
-text {*
-  The source file is \texttt{src/Pure/Isar/spec\_parse.ML}.
-  This structure contains token list parsers for more complicated values.
-  For example, 
-  \begin{verbatim}
-  open SpecParse ;
-  attrib : Attrib.src tok_rdr ; 
-  attribs : Attrib.src list tok_rdr ;
-  opt_attribs : Attrib.src list tok_rdr ;
-  xthm : (thmref * Attrib.src list) tok_rdr ;
-  xthms1 : (thmref * Attrib.src list) list tok_rdr ;
-  
-  parse_str attrib "simp" ;
-  parse_str opt_attribs "hello" ;
-  val (ass, "") = parse_str attribs "[standard, xxxx, simp, intro, OF sym]" ;
-  map Args.dest_src ass ;
-  val (asrc, "") = parse_str attrib "THEN trans [THEN sym]" ;
-  
-  parse_str xthm "mythm [attr]" ;
-  parse_str xthms1 "thm1 [attr] thms2" ;
-  \end{verbatim}
-  
-  As you can see, attributes are described using types of the \texttt{Args}
-  structure, described below.
-*}
-
-section{* The \texttt{Args} structure *}
-
-text {*
-  The source file is \texttt{src/Pure/Isar/args.ML}.
-  The primary type of this structure is the \texttt{src} datatype;
-  the single constructors not published in the signature, but 
-  \texttt{Args.src} and \texttt{Args.dest\_src}
-  are in fact the constructor and destructor functions.
-  Note that the types \texttt{Attrib.src} and \texttt{Method.src}
-  are in fact \texttt{Args.src}.
-
-  \begin{verbatim}
-  src : (string * Args.T list) * Position.T -> Args.src ;
-  dest_src : Args.src -> (string * Args.T list) * Position.T ;
-  Args.pretty_src : Proof.context -> Args.src -> Pretty.T ;
-  fun pr_src ctxt src = Pretty.string_of (Args.pretty_src ctxt src) ;
-
-  val thy = ML_Context.the_context () ;
-  val ctxt = ProofContext.init thy ;
-  map (pr_src ctxt) ass ;
-  \end{verbatim}
-
-  So an \texttt{Args.src} consists of the first word, then a list of further 
-  ``arguments'', of type \texttt{Args.T}, with information about position in the
-  input.
-  \begin{verbatim}
-  (* how an Args.src is parsed *)
-  P.position : 'a tlp -> ('a * Position.T) tlp ;
-  P.arguments : Args.T list tlp ;
-
-  val parse_src : Args.src tlp =
-  P.position (P.xname -- P.arguments) >> Args.src ;
-  \end{verbatim}
-
-  \begin{verbatim}
-  val ((first_word, args), pos) = Args.dest_src asrc ;
-  map Args.string_of args ;
-  \end{verbatim}
-
-  The \texttt{Args} structure contains more parsers and parser transformers 
-  for which the input source type is \texttt{Args.T list}.  For example,
-  \begin{verbatim}
-  type 'a atlp = Args.T list -> 'a * Args.T list ;
-  open Args ;
-  nat : int atlp ; (* also Args.int *)
-  thm_sel : PureThy.interval list atlp ;
-  list : 'a atlp -> 'a list atlp ;
-  attribs : (string -> string) -> Args.src list atlp ;
-  opt_attribs : (string -> string) -> Args.src list atlp ;
-  
-  (* parse_atl_str : 'a atlp -> (string -> 'a * string) ;
-  given an Args.T list parser, to get a string parser *)
-  fun parse_atl_str atlp str = 
-  let val (ats, rem_str) = parse_str P.arguments str ;
-  val (res, rem_ats) = atlp ats ;
-  in (res, String.concat (Library.separate " "
-  (List.map Args.string_of rem_ats @ [rem_str]))) end ;
-
-  parse_atl_str Args.int "-1-," ;
-  parse_atl_str (Scan.option Args.int) "x1-," ;
-  parse_atl_str Args.thm_sel "(1-,4,13-22)" ;
-
-  val (ats as atsrc :: _, "") = parse_atl_str (Args.attribs I)
-  "[THEN trans [THEN sym], simp, OF sym]" ;
-  \end{verbatim}
-
-  From here, an attribute is interpreted using \texttt{Attrib.attribute}.
-
-  \texttt{Args} has a large number of functions which parse an \texttt{Args.src}
-  and also refer to a generic context.  
-  Note the use of \texttt{Scan.lift} for this.
-  (as does \texttt{Attrib} - RETHINK THIS)
-  
-  (\texttt{Args.syntax} shown below has type specialised)
-
-  \begin{verbatim}
-  type ('res, 'src) parse_fn = 'src -> 'res * 'src ;
-  type 'a cgatlp = ('a, Context.generic * Args.T list) parse_fn ;
-  Scan.lift : 'a atlp -> 'a cgatlp ;
-  term : term cgatlp ;
-  typ : typ cgatlp ;
-  
-  Args.syntax : string -> 'res cgatlp -> src -> ('res, Context.generic) parse_fn ;
-  Attrib.thm : thm cgatlp ;
-  Attrib.thms : thm list cgatlp ;
-  Attrib.multi_thm : thm list cgatlp ;
-  
-  (* parse_cgatl_str : 'a cgatlp -> (string -> 'a * string) ;
-  given a (Context.generic * Args.T list) parser, to get a string parser *)
-  fun parse_cgatl_str cgatlp str = 
-  let 
-    (* use the current generic context *)
-    val generic = Context.Theory thy ;
-    val (ats, rem_str) = parse_str P.arguments str ;
-    (* ignore any change to the generic context *)
-    val (res, (_, rem_ats)) = cgatlp (generic, ats) ;
-  in (res, String.concat (Library.separate " "
-      (List.map Args.string_of rem_ats @ [rem_str]))) end ;
-  \end{verbatim}
-*}
-
-section{* Attributes, and the \texttt{Attrib} structure *}
-
-text {*
-  The type \texttt{attribute} is declared in \texttt{src/Pure/thm.ML}.
-  The source file for the \texttt{Attrib} structure is
-  \texttt{src/Pure/Isar/attrib.ML}.
-  Most attributes use a theorem to change a generic context (for example, 
-  by declaring that the theorem should be used, by default, in simplification),
-  or change a theorem (which most often involves referring to the current
-  theory). 
-  The functions \texttt{Thm.rule\_attribute} and
-  \texttt{Thm.declaration\_attribute} create attributes of these kinds.
-
-  \begin{verbatim}
-  type attribute = Context.generic * thm -> Context.generic * thm;
-  type 'a trf = 'a -> 'a ; (* transformer of a given type *)
-  Thm.rule_attribute  : (Context.generic -> thm -> thm) -> attribute ;
-  Thm.declaration_attribute : (thm -> Context.generic trf) -> attribute ;
-
-  Attrib.print_attributes : theory -> unit ;
-  Attrib.pretty_attribs : Proof.context -> src list -> Pretty.T list ;
-
-  List.app Pretty.writeln (Attrib.pretty_attribs ctxt ass) ;
-  \end{verbatim}
-
-  An attribute is stored in a theory as indicated by:
-  \begin{verbatim}
-  Attrib.add_attributes : 
-  (bstring * (src -> attribute) * string) list -> theory trf ; 
-  (*
-  Attrib.add_attributes [("THEN", THEN_att, "resolution with rule")] ;
-  *)
-  \end{verbatim}
-  where the first and third arguments are name and description of the attribute,
-  and the second is a function which parses the attribute input text 
-  (including the attribute name, which has necessarily already been parsed).
-  Here, \texttt{THEN\_att} is a function declared in the code for the
-  structure \texttt{Attrib}, but not published in its signature.
-  The source file \texttt{src/Pure/Isar/attrib.ML} shows the use of 
-  \texttt{Attrib.add\_attributes} to add a number of attributes.
-
-  \begin{verbatim}
-  FullAttrib.THEN_att : src -> attribute ;
-  FullAttrib.THEN_att atsrc (generic, ML_Context.thm "sym") ;
-  FullAttrib.THEN_att atsrc (generic, ML_Context.thm "all_comm") ;
-  \end{verbatim}
-
-  \begin{verbatim}
-  Attrib.syntax : attribute cgatlp -> src -> attribute ;
-  Attrib.no_args : attribute -> src -> attribute ;
-  \end{verbatim}
-  When this is called as \texttt{syntax scan src (gc, th)}
-  the generic context \texttt{gc} is used 
-  (and potentially changed to \texttt{gc'})
-  by \texttt{scan} in parsing to obtain an attribute \texttt{attr} which would
-  then be applied to \texttt{(gc', th)}.
-  The source for parsing the attribute is the arguments part of \texttt{src},
-  which must all be consumed by the parse.
-
-  For example, for \texttt{Attrib.no\_args attr src}, the attribute parser 
-  simply returns \texttt{attr}, requiring that the arguments part of
-  \texttt{src} must be empty.
-
-  Some examples from \texttt{src/Pure/Isar/attrib.ML}, modified:
-  \begin{verbatim}
-  fun rot_att_n n (gc, th) = (gc, rotate_prems n th) ;
-  rot_att_n : int -> attribute ;
-  val rot_arg = Scan.lift (Scan.optional Args.int 1 : int atlp) : int cgatlp ;
-  val rotated_att : src -> attribute =
-  Attrib.syntax (rot_arg >> rot_att_n : attribute cgatlp) ;
-  
-  val THEN_arg : int cgatlp = Scan.lift 
-  (Scan.optional (Args.bracks Args.nat : int atlp) 1 : int atlp) ;
-
-  Attrib.thm : thm cgatlp ;
-
-  THEN_arg -- Attrib.thm : (int * thm) cgatlp ;
-
-  fun THEN_att_n (n, tht) (gc, th) = (gc, th RSN (n, tht)) ;
-  THEN_att_n : int * thm -> attribute ;
-
-  val THEN_att : src -> attribute = Attrib.syntax
-  (THEN_arg -- Attrib.thm >> THEN_att_n : attribute cgatlp);
-  \end{verbatim}
-  The functions I've called \texttt{rot\_arg} and \texttt{THEN\_arg}
-  read an optional argument, which for \texttt{rotated} is an integer, 
-  and for \texttt{THEN} is a natural enclosed in square brackets;
-  the default, if the argument is absent, is 1 in each case.
-  Functions \texttt{rot\_att\_n} and \texttt{THEN\_att\_n} turn these into
-  attributes, where \texttt{THEN\_att\_n} also requires a theorem, which is
-  parsed by \texttt{Attrib.thm}.  
-  Infix operators \texttt{--} and \texttt{>>} are in the structure \texttt{Scan}.
-
-*}
-
-section{* Methods, and the \texttt{Method} structure *}
-
-text {*
-  The source file is \texttt{src/Pure/Isar/method.ML}.
-  The type \texttt{method} is defined by the datatype declaration
-  \begin{verbatim}
-  (* datatype method = Meth of thm list -> cases_tactic; *)
-  RuleCases.NO_CASES : tactic -> cases_tactic ;
-  \end{verbatim}
-  In fact \texttt{RAW\_METHOD\_CASES} (below) is exactly the constructor
-  \texttt{Meth}.
-  A \texttt{cases\_tactic} is an elaborated version of a tactic.
-  \texttt{NO\_CASES tac} is a \texttt{cases\_tactic} which consists of a
-  \texttt{cases\_tactic} without any further case information.
-  For further details see the description of structure \texttt{RuleCases} below.
-  The list of theorems to be passed to a method consists of the current
-  \emph{facts} in the proof.
-  
-  \begin{verbatim}
-  RAW_METHOD : (thm list -> tactic) -> method ;
-  METHOD : (thm list -> tactic) -> method ;
-  
-  SIMPLE_METHOD : tactic -> method ;
-  SIMPLE_METHOD' : (int -> tactic) -> method ;
-  SIMPLE_METHOD'' : ((int -> tactic) -> tactic) -> (int -> tactic) -> method ;
-
-  RAW_METHOD_CASES : (thm list -> cases_tactic) -> method ;
-  METHOD_CASES : (thm list -> cases_tactic) -> method ;
-  \end{verbatim}
-  A method is, in its simplest form, a tactic; applying the method is to apply
-  the tactic to the current goal state.
-
-  Applying \texttt{RAW\_METHOD tacf} creates a tactic by applying 
-  \texttt{tacf} to the current {facts}, and applying that tactic to the
-  goal state.
-
-  \texttt{METHOD} is similar but also first applies
-  \texttt{Goal.conjunction\_tac} to all subgoals.
-
-  \texttt{SIMPLE\_METHOD tac} inserts the facts into all subgoals and then
-  applies \texttt{tacf}.
-
-  \texttt{SIMPLE\_METHOD' tacf} inserts the facts and then
-  applies \texttt{tacf} to subgoal 1.
-
-  \texttt{SIMPLE\_METHOD'' quant tacf} does this for subgoal(s) selected by
-  \texttt{quant}, which may be, for example,
-  \texttt{ALLGOALS} (all subgoals),
-  \texttt{TRYALL} (try all subgoals, failure is OK),
-  \texttt{FIRSTGOAL} (try subgoals until it succeeds once), 
-  \texttt{(fn tacf => tacf 4)} (subgoal 4), etc
-  (see the \texttt{Tactical} structure, FIXME) %%\cite[Chapter 4]{ref}).
-
-  A method is stored in a theory as indicated by:
-  \begin{verbatim}
-  Method.add_method : 
-  (bstring * (src -> Proof.context -> method) * string) -> theory trf ; 
-  ( *
-  * )
-  \end{verbatim}
-  where the first and third arguments are name and description of the method,
-  and the second is a function which parses the method input text 
-  (including the method name, which has necessarily already been parsed).
-
-  Here, \texttt{xxx} is a function declared in the code for the
-  structure \texttt{Method}, but not published in its signature.
-  The source file \texttt{src/Pure/Isar/method.ML} shows the use of 
-  \texttt{Method.add\_method} to add a number of methods.
-*}
-
-(*>*)
 end
--- a/ProgTutorial/Recipes/Antiquotes.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Recipes/Antiquotes.thy	Thu May 27 10:39:07 2010 +0200
@@ -78,11 +78,11 @@
    ThyOutput.output (map Pretty.str (space_explode "\n" code_txt)))
 
 val _ = ThyOutput.antiquotation "ML_checked"
-         (Scan.lift (OuterParse.position Args.name)) output_ml *}
+         (Scan.lift (Parse.position Args.name)) output_ml *}
 
 text {*
   where in Lines 1 and 2 the positional information is properly treated. The
-  parser @{ML OuterParse.position} encodes the positional information in the 
+  parser @{ML Parse.position} encodes the positional information in the 
   result.
 
   We can now write @{text "@{ML_checked \"2 + 3\"}"} in a document in order to
@@ -137,7 +137,7 @@
    end)
 
 val _ = ThyOutput.antiquotation "ML_resp" 
-          (Scan.lift (OuterParse.position (Args.name -- Args.name))) 
+          (Scan.lift (Parse.position (Args.name -- Args.name))) 
              output_ml_resp*}
 
 text {*
--- a/ProgTutorial/Solutions.thy	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/Solutions.thy	Thu May 27 10:39:07 2010 +0200
@@ -90,13 +90,13 @@
  | Add of expr * expr
 
 fun parse_basic xs =
-  (OuterParse.nat >> Number 
-   || OuterParse.$$$ "(" |-- parse_expr --| OuterParse.$$$ ")") xs
+  (Parse.nat >> Number 
+   || Parse.$$$ "(" |-- parse_expr --| Parse.$$$ ")") xs
 and parse_factor xs =
-  (parse_basic --| OuterParse.$$$ "*" -- parse_factor >> Mult 
+  (parse_basic --| Parse.$$$ "*" -- parse_factor >> Mult 
    || parse_basic) xs
 and parse_expr xs =
-  (parse_factor --| OuterParse.$$$ "+" -- parse_expr >> Add 
+  (parse_factor --| Parse.$$$ "+" -- parse_expr >> Add 
    || parse_factor) xs*}
 
 
--- a/ProgTutorial/antiquote_setup.ML	Mon May 24 20:02:11 2010 +0100
+++ b/ProgTutorial/antiquote_setup.ML	Thu May 27 10:39:07 2010 +0200
@@ -41,8 +41,8 @@
    output (split_lines txt))
 
 val parser_ml = Scan.lift (Args.name --
-  (Scan.optional (Args.$$$ "for" |-- OuterParse.!!! (Scan.repeat1 Args.name)) [] --
-   Scan.option (Args.$$$ "in"  |-- OuterParse.!!! Args.name))) 
+  (Scan.optional (Args.$$$ "for" |-- Parse.!!! (Scan.repeat1 Args.name)) [] --
+   Scan.option (Args.$$$ "in"  |-- Parse.!!! Args.name))) 
 
 (* checks and prints a single ML-item and produces an index entry *)
 fun output_ml_ind {context = ctxt, ...} (txt, stru) =
@@ -53,7 +53,7 @@
    | (SOME st, _, _) => output_indexed {main = Code txt, minor = Struct st} (split_lines txt))
 
 val parser_ml_ind = Scan.lift (Args.name --
-  Scan.option (Args.$$$ "in"  |-- OuterParse.!!! Args.name))
+  Scan.option (Args.$$$ "in"  |-- Parse.!!! Args.name))
 
 (* checks and prints structures *)
 fun gen_output_struct outfn {context = ctxt, ...} txt = 
Binary file progtutorial.pdf has changed