progs/crawler2.scala
author Christian Urban <christian dot urban at kcl dot ac dot uk>
Fri, 23 Sep 2016 15:22:33 +0100
changeset 427 546f2090ce12
parent 421 7a04f2c532c1
child 428 a47c4227a0c6
permissions -rw-r--r--
updated

// This version of the crawler only
// checks links in the "domain" urbanc

import io.Source
import scala.util.matching.Regex
import scala.util._

// gets the first 10K of a web-page
def get_page(url: String) : String = {
  Try(Source.fromURL(url)("ISO-8859-1").take(10000).mkString) getOrElse 
    { println(s"  Problem with: $url"); ""}
}

// regexes for URLs and "my" domain
val http_pattern = """"https?://[^"]*"""".r
val my_urls = """urbanc""".r       /*@\label{myurlline}@*/

def unquote(s: String) = s.drop(1).dropRight(1)

def get_all_URLs(page: String) : Set[String] = 
  http_pattern.findAllIn(page).map(unquote).toSet

def crawl(url: String, n: Int) : Unit = {
  if (n == 0) ()                   /*@\label{changestartline}@*/
  else if (my_urls.findFirstIn(url) == None) { 
    println(s"Visiting: $n $url")
    get_page(url); () 
  }                                /*@\label{changeendline}@*/
  else {
    println(s"Visiting: $n $url")
    for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1)
  }
}

// starting URL for the crawler
val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc"""


// can now deal with depth 3 and beyond
crawl(startURL, 2)