--- a/progs/crawler2.scala Thu Apr 16 19:15:46 2020 +0100
+++ b/progs/crawler2.scala Wed May 06 15:37:31 2020 +0100
@@ -1,44 +1,38 @@
-// This version of the crawler only
-// checks links in the "domain" urbanc
+// This version of the crawler that also
+// "harvests" email addresses from webpages
import io.Source
import scala.util.matching.Regex
import scala.util._
-// gets the first 10K of a web-page
def get_page(url: String) : String = {
- Try(Source.fromURL(url)("ISO-8859-1").take(10000).mkString).
+ Try(Source.fromURL(url)("ISO-8859-1").take(10000).mkString).
getOrElse { println(s" Problem with: $url"); ""}
}
-// regexes for URLs and "my" domain
+// regexes for URLs, for "my" domain and for email addresses
val http_pattern = """"https?://[^"]*"""".r
-val my_urls = """urban""".r /*@\label{myurlline}@*/
-//val my_urls = """kcl.ac.uk""".r
+val email_pattern = """([a-z0-9_\.-]+)@([\da-z\.-]+)\.([a-z\.]{2,6})""".r /*@\label{emailline}@*/
def unquote(s: String) = s.drop(1).dropRight(1)
def get_all_URLs(page: String) : Set[String] =
http_pattern.findAllIn(page).map(unquote).toSet
+def print_str(s: String) =
+ if (s == "") () else println(s)
+
def crawl(url: String, n: Int) : Unit = {
- if (n == 0) () /*@\label{changestartline}@*/
- else if (my_urls.findFirstIn(url) == None) {
- println(s"Visiting: $n $url")
- get_page(url); ()
- } /*@\label{changeendline}@*/
+ if (n == 0) ()
else {
- println(s"Visiting: $n $url")
- for (u <- get_all_URLs(get_page(url)).par) crawl(u, n - 1)
+ println(s" Visiting: $n $url")
+ val page = get_page(url)
+ print_str(email_pattern.findAllIn(page).mkString("\n")) /*@\label{mainline}@*/
+ for (u <- get_all_URLs(page).par) crawl(u, n - 1)
}
}
-// starting URL for the crawler
+// staring URL for the crawler
val startURL = """https://nms.kcl.ac.uk/christian.urban/"""
-//val startURL = """https://nms.kcl.ac.uk/christian.urban/bsc-projects-17.html"""
-
-// can now deal with depth 3 and beyond
crawl(startURL, 3)
-
-