equal
deleted
inserted
replaced
|
1 import io.Source |
|
2 import scala.util.matching.Regex |
|
3 |
|
4 // gets the first ~10K of a page |
|
5 def get_page(url: String) : String = { |
|
6 try { |
|
7 Source.fromURL(url).take(10000).mkString |
|
8 } |
|
9 catch { |
|
10 case e => { |
|
11 println(" Problem with: " + url) |
|
12 "" |
|
13 } |
|
14 } |
|
15 } |
|
16 |
|
17 // staring URL for the crawler |
|
18 val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc/""" |
|
19 |
|
20 // regex for URLs |
|
21 val http_pattern = """\"https?://[^\"]*\"""".r |
|
22 |
|
23 def unquote(s: String) = s.drop(1).dropRight(1) |
|
24 |
|
25 def get_all_URLs(page: String) : Set[String] = { |
|
26 (http_pattern.findAllIn(page)).map { unquote(_) }.toSet |
|
27 } |
|
28 |
|
29 // naive version - seraches until a given depth |
|
30 // visits pages potentially more than once |
|
31 def crawl(url: String, n: Int) : Unit = { |
|
32 if (n == 0) () |
|
33 else { |
|
34 println("Visiting: " + n + " " + url) |
|
35 for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1) |
|
36 } |
|
37 } |
|
38 |
|
39 crawl(startURL, 2) |
|
40 |