// This version of the crawler only// checks links in the "domain" urbancimport io.Sourceimport scala.util.matching.Regeximport scala.util._// gets the first 10K of a web-pagedef get_page(url: String) : String = { Try(Source.fromURL(url)("ISO-8859-1").take(10000).mkString). getOrElse { println(s" Problem with: $url"); ""}}// regexes for URLs and "my" domainval http_pattern = """"https?://[^"]*"""".rval my_urls = """urban""".r /*@\label{myurlline}@*///val my_urls = """kcl.ac.uk""".r def unquote(s: String) = s.drop(1).dropRight(1)def get_all_URLs(page: String) : Set[String] = http_pattern.findAllIn(page).map(unquote).toSetdef crawl(url: String, n: Int) : Unit = { if (n == 0) () /*@\label{changestartline}@*/ else if (my_urls.findFirstIn(url) == None) { println(s"Visiting: $n $url") get_page(url); () } /*@\label{changeendline}@*/ else { println(s"Visiting: $n $url") for (u <- get_all_URLs(get_page(url)).par) crawl(u, n - 1) }}// starting URL for the crawlerval startURL = """https://nms.kcl.ac.uk/christian.urban/"""//val startURL = """https://nms.kcl.ac.uk/christian.urban/bsc-projects-17.html"""// can now deal with depth 3 and beyondcrawl(startURL, 3)