author | Christian Urban <christian dot urban at kcl dot ac dot uk> |
Sat, 24 Nov 2012 14:58:27 +0000 | |
changeset 77 | 49c0beef79a1 |
parent 64 | 2d625418c011 |
permissions | -rw-r--r-- |
1 | 1 |
import io.Source |
2 |
import scala.util.matching.Regex |
|
3 |
||
4 |
// gets the first ~10K of a page |
|
5 |
def get_page(url: String) : String = { |
|
6 |
try { |
|
7 |
Source.fromURL(url).take(10000).mkString |
|
8 |
} |
|
9 |
catch { |
|
10 |
case e => { |
|
11 |
println(" Problem with: " + url) |
|
12 |
"" |
|
13 |
} |
|
14 |
} |
|
15 |
} |
|
16 |
||
17 |
||
18 |
// regex for URLs |
|
19 |
val http_pattern = """\"https?://[^\"]*\"""".r |
|
20 |
||
21 |
def unquote(s: String) = s.drop(1).dropRight(1) |
|
22 |
||
23 |
def get_all_URLs(page: String) : Set[String] = { |
|
24 |
(http_pattern.findAllIn(page)).map { unquote(_) }.toSet |
|
25 |
} |
|
26 |
||
27 |
// naive version - seraches until a given depth |
|
28 |
// visits pages potentially more than once |
|
29 |
def crawl(url: String, n: Int) : Unit = { |
|
30 |
if (n == 0) () |
|
31 |
else { |
|
32 |
println("Visiting: " + n + " " + url) |
|
33 |
for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1) |
|
34 |
} |
|
35 |
} |
|
36 |
||
3 | 37 |
// staring URL for the crawler |
38 |
val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc/""" |
|
7 | 39 |
//val startURL = """http://www.inf.kcl.ac.uk/staff/mml/""" |
40 |
||
3 | 41 |
|
42 |
// call on the command line |
|
1 | 43 |
crawl(startURL, 2) |
44 |
||
64
2d625418c011
added everything
Christian Urban <christian dot urban at kcl dot ac dot uk>
parents:
7
diff
changeset
|
45 |
crawl("""http://www.dcs.kcl.ac.uk/staff/urbanc/msc-projects-12.html""", 2) |