--- a/progs/crawler1.scala Tue Sep 20 12:13:11 2016 +0100
+++ b/progs/crawler1.scala Tue Sep 20 12:17:01 2016 +0100
@@ -32,8 +32,8 @@
}
// some starting URLs for the crawler
-//val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc"""
-val startURL = """http://www.inf.kcl.ac.uk/staff/mcburney"""
+val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc"""
+//val startURL = """http://www.inf.kcl.ac.uk/staff/mcburney"""
crawl(startURL, 2)
--- a/progs/crawler2.scala Tue Sep 20 12:13:11 2016 +0100
+++ b/progs/crawler2.scala Tue Sep 20 12:17:01 2016 +0100
@@ -13,7 +13,7 @@
// regexes for URLs and "my" domain
val http_pattern = """"https?://[^"]*"""".r
-val my_urls = """urbanc""".r (*@\label{myurlline}@*)
+val my_urls = """urbanc""".r (*@\label{myurlline}@*)
def unquote(s: String) = s.drop(1).dropRight(1)
@@ -21,11 +21,11 @@
http_pattern.findAllIn(page).map(unquote).toSet
def crawl(url: String, n: Int) : Unit = {
- if (n == 0) () (*@\label{changestartline}@*)
+ if (n == 0) () (*@\label{changestartline}@*)
else if (my_urls.findFirstIn(url) == None) {
println(s"Visiting: $n $url")
get_page(url); ()
- } (*@\label{changeendline}@*)
+ } (*@\label{changeendline}@*)
else {
println(s"Visiting: $n $url")
for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1)