scala/crawler3.scala
changeset 92 e85600529ca5
parent 7 73cf4406b773
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/scala/crawler3.scala	Sat Jun 15 09:11:11 2013 -0400
@@ -0,0 +1,49 @@
+import io.Source
+import scala.util.matching.Regex
+
+// gets the first ~10K of a page
+def get_page(url: String) : String = { 
+  try {
+    Source.fromURL(url).take(10000).mkString  
+  }
+  catch {
+    case e => {
+      println("  Problem with: " + url)
+      ""
+    }
+  }
+}
+
+// staring URL for the crawler
+val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc/"""
+
+// regex for URLs
+val http_pattern = """\"https?://[^\"]*\"""".r
+val my_urls = """urbanc""".r
+val email_pattern = """([a-z0-9_\.-]+)@([\da-z\.-]+)\.([a-z\.]{2,6})""".r
+
+// http://net.tutsplus.com/tutorials/other/8-regular-expressions-you-should-know/
+
+def unquote(s: String) = s.drop(1).dropRight(1)
+
+def get_all_URLs(page: String) : Set[String] = {
+  (http_pattern.findAllIn(page)).map { unquote(_) }.toSet
+}
+
+// naive version - seraches until a given depth
+// visits pages potentially more than once
+def crawl(url: String, n: Int) : Unit = {
+  if (n == 0) ()
+  //else if (my_urls.findFirstIn(url) == None) ()
+  else {
+    println("Visiting: " + n + " " + url)
+    val page = get_page(url)
+    println(email_pattern.findAllIn(page).mkString("\n"))
+    for (u <- get_all_URLs(page)) crawl(u, n - 1)
+  }
+}
+
+// can now deal with depth 3
+// start on command line
+crawl(startURL, 3)
+