# HG changeset patch # User Christian Urban # Date 1348624244 -3600 # Node ID 6e7da958ba8c4889e5ef76666e3a0aacd327522b # Parent b606c9439fa6bfd8d388d2b763c850f20a1efca7 updated diff -r b606c9439fa6 -r 6e7da958ba8c app0.scala --- a/app0.scala Wed Sep 26 02:08:55 2012 +0100 +++ b/app0.scala Wed Sep 26 02:50:44 2012 +0100 @@ -3,5 +3,6 @@ def get_page(url: String) : String = { Source.fromURL(url).take(10000).mkString +get_page("""http://www.inf.kcl.ac.uk/staff/urbanc/""") diff -r b606c9439fa6 -r 6e7da958ba8c app1.scala --- a/app1.scala Wed Sep 26 02:08:55 2012 +0100 +++ b/app1.scala Wed Sep 26 02:50:44 2012 +0100 @@ -1,7 +1,3 @@ -import io.Source -import scala.util.matching.Regex - -// gets the first ~10K of a page def get_page(url: String) : String = { try { Source.fromURL(url).take(10000).mkString @@ -14,27 +10,3 @@ } } -// staring URL for the crawler -val startURL = """http://www.inf.kcl.ac.uk/staff/urbanc/""" - -// regex for URLs -val http_pattern = """\"https?://[^\"]*\"""".r - -def unquote(s: String) = s.drop(1).dropRight(1) - -def get_all_URLs(page: String) : Set[String] = { - (http_pattern.findAllIn(page)).map { unquote(_) }.toSet -} - -// naive version - seraches until a given depth -// visits pages potentially more than once -def crawl(url: String, n: Int) : Unit = { - if (n == 0) () - else { - println("Visiting: " + n + " " + url) - for (u <- get_all_URLs(get_page(url))) crawl(u, n - 1) - } -} - -crawl(startURL, 2) - diff -r b606c9439fa6 -r 6e7da958ba8c slides01.pdf Binary file slides01.pdf has changed diff -r b606c9439fa6 -r 6e7da958ba8c slides01.tex --- a/slides01.tex Wed Sep 26 02:08:55 2012 +0100 +++ b/slides01.tex Wed Sep 26 02:50:44 2012 +0100 @@ -228,16 +228,50 @@ \end{frame}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\mode{ +\begin{frame}[c] +\frametitle{\begin{tabular}{c}A Web Crawler\end{tabular}} + +\begin{enumerate} +\item given an URL, read the corresponding webpage +\item extract all links from it +\item call the web-crawler again for all these links +\end{enumerate} + +\end{frame}} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\mode{ +\begin{frame}[c] +\frametitle{\begin{tabular}{c}A Web Crawler\end{tabular}} + +\begin{enumerate} +\item given an URL, read the corresponding webpage +\item if not possible print, out a problem +\item if possible, extract all links from it +\item call the web-crawler again for all these links +\end{enumerate}\bigskip\pause + +\small (we need to bound the number of recursive calls) + +\small (the purpose is to check all links on my own webpage) +\end{frame}} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \mode{ \begin{frame}[c] \frametitle{\begin{tabular}{c}Scala\end{tabular}} -\footnotesize a simple function for reading webpages +\footnotesize a simple Scala function for reading webpages\\[-3mm] {\lstset{language=Scala}\fontsize{8}{10}\selectfont \texttt{\lstinputlisting{app0.scala}}}\pause\bigskip +\footnotesize slightly more complicated for handling errors:\\[-3mm] + \footnotesize {\lstset{language=Scala}\fontsize{8}{10}\selectfont \texttt{\lstinputlisting{app1.scala}}}