--- a/crawler2.scala Sun Dec 23 00:38:56 2012 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-import io.Source
-import scala.util.matching.Regex
-
-// gets the first ~10K of a page
-def get_page(url: String) : String = {
- try {
- Source.fromURL(url).take(10000).mkString
- }
- catch {
- case e => {
- println(" Problem with: " + url)
- ""
- }
- }
-}
-
-// staring URL for the crawler
-val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc/"""
-
-// regex for URLs
-val http_pattern = """\"https?://[^\"]*\"""".r
-val my_urls = """urbanc""".r
-
-def unquote(s: String) = s.drop(1).dropRight(1)
-
-def get_all_URLs(page: String) : Set[String] = {
- (http_pattern.findAllIn(page)).map { unquote(_) }.toSet
-}
-
-// naive version - seraches until a given depth
-// visits pages potentially more than once
-def crawl(url: String, n: Int) : Unit = {
- if (n == 0) ()
- else if (my_urls.findFirstIn(url) == None) ()
- else {
- println("Visiting: " + n + " " + url)
- for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1)
- }
-}
-
-// can now deal with depth 3
-// start on command line
-crawl(startURL, 4)
-