#!/usr/bin/env python
"""Web Crawler/Spider
This module implements a web crawler. This is very _basic_ only
and needs to be extended to do anything usefull with the
traversed pages.
"""
import re
import sys
import time
import math
import urllib2
import urlparse
import optparse
from cgi import escape
from traceback import format_exc
from Queue import Queue, Empty as QueueEmpty
from BeautifulSoup import BeautifulSoup
__version__ = "0.2"
__copyright__ = "CopyRight (C) 2008-2011 by James Mills"
__license__ = "MIT"
__author__ = "James Mills"
__author_email__ = "James Mills, James dot Mills st dotred dot com dot au"
USAGE = "%prog [options] <url>"
VERSION = "%prog v" + __version__
AGENT = "%s/%s" % (__name__, __version__)
class Crawler(object):
def __init__(self, root, depth, locked=True):
self.root = root
self.depth = depth
self.locked = locked
self.host = urlparse.urlparse(root)[1]
self.urls = []
self.links = 0
self.followed = 0
def crawl(self):
page = Fetcher(self.root)
page.fetch()
q = Queue()
for url in page.urls:
q.put(url)
followed = [self.root]
n = 0
while True:
try:
url = q.get()
except QueueEmpty:
break
n += 1
if url not in followed:
try:
host = urlparse.urlparse(url)[1]
if self.locked and re.match(".*%s" % self.host, host):
followed.append(url)
self.followed += 1
page = Fetcher(url)
page.fetch()
for i, url in enumerate(page):
if url not in self.urls:
self.links += 1
q.put(url)
self.urls.append(url)
if n > self.depth and self.depth > 0:
break
except Exception, e:
print "ERROR: Can't process url '%s' (%s)" % (url, e)
print format_exc()
class Fetcher(object):
def __init__(self, url):
self.url = url
self.urls = []
def __getitem__(self, x):
return self.urls[x]
def _addHeaders(self, request):
request.add_header("User-Agent", AGENT)
def open(self):
url = self.url
try:
request = urllib2.Request(url)
handle = urllib2.build_opener()
except IOError:
return None
return (request, handle)
def fetch(self):
request, handle = self.open()
self._addHeaders(request)
if handle:
try:
content = unicode(handle.open(request).read(), "utf-8",
errors="replace")
soup = BeautifulSoup(content)
tags = soup('a')
except urllib2.HTTPError, error:
if error.code == 404:
print >> sys.stderr, "ERROR: %s -> %s" % (error, error.url)
else:
print >> sys.stderr, "ERROR: %s" % error
tags = []
except urllib2.URLError, error:
print >> sys.stderr, "ERROR: %s" % error
tags = []
for tag in tags:
href = tag.get("href")
if href is not None:
url = urlparse.urljoin(self.url, escape(href))
if url not in self:
self.urls.append(url)
def getLinks(url):
page = Fetcher(url)
page.fetch()
for i, url in enumerate(page):
print "%d. %s" % (i, url)
def parse_options():
"""parse_options() -> opts, args
Parse any command-line options given returning both
the parsed options and arguments.
"""
parser = optparse.OptionParser(usage=USAGE, version=VERSION)
parser.add_option("-q", "--quiet",
action="store_true", default=False, dest="quiet",
help="Enable quiet mode")
parser.add_option("-l", "--links",
action="store_true", default=False, dest="links",
help="Get links for specified url only")
parser.add_option("-d", "--depth",
action="store", type="int", default=30, dest="depth",
help="Maximum depth to traverse")
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
raise SystemExit, 1
return opts, args
def main():
opts, args = parse_options()
url = args[0]
if opts.links:
getLinks(url)
raise SystemExit, 0
depth = opts.depth
sTime = time.time()
print "Crawling %s (Max Depth: %d)" % (url, depth)
crawler = Crawler(url, depth)
crawler.crawl()
print "\n".join(crawler.urls)
eTime = time.time()
tTime = eTime - sTime
print "Found: %d" % crawler.links
print "Followed: %d" % crawler.followed
print "Stats: (%d/s after %0.2fs)" % (
int(math.ceil(float(crawler.links) / tTime)), tTime)
if __name__ == "__main__":
main()
Diff to Previous Revision
--- revision 1 2008-11-02 17:51:18
+++ revision 2 2011-01-31 21:57:58
@@ -14,11 +14,15 @@
import urllib2
import urlparse
import optparse
+from cgi import escape
+from traceback import format_exc
+from Queue import Queue, Empty as QueueEmpty
+
from BeautifulSoup import BeautifulSoup
-__version__ = "0.1"
-__copyright__ = "CopyRight (C) 2008 by James Mills"
-__license__ = "GPL"
+__version__ = "0.2"
+__copyright__ = "CopyRight (C) 2008-2011 by James Mills"
+__license__ = "MIT"
__author__ = "James Mills"
__author_email__ = "James Mills, James dot Mills st dotred dot com dot au"
@@ -27,72 +31,6 @@
AGENT = "%s/%s" % (__name__, __version__)
-def encodeHTML(s=""):
- """encodeHTML(s) -> str
-
- Encode HTML special characters from their ASCII form to
- HTML entities.
- """
-
- return s.replace("&", "&") \
- .replace("<", "<") \
- .replace(">", ">") \
- .replace("\"", """) \
- .replace("'", "'") \
- .replace("--", "&mdash")
-
-class Queue(object):
-
- def __init__(self, size=None):
- super(Queue, self).__init__()
-
- self._queue = []
- self._size = size
-
- def __len__(self):
- return len(self._queue)
-
- def __getitem__(self, n):
- if (not self.empty()) and (0 <= (n + 1) <= len(self._queue)):
- return self._queue[(len(self._queue) - (n + 1))]
- else:
- raise StopIteration
-
- def push(self, item):
- self._queue.insert(0, item)
- if self._size is not None:
- self._queue = self._queue[:self._size]
-
- def get(self, n=0, remove=False):
- if (not self.empty()) and (0 <= (n + 1) <= len(self._queue)):
- r = self._queue[(len(self._queue) - (n + 1))]
- if remove:
- del self._queue[(len(self._queue) - (n + 1))]
- return r
- else:
- return None
-
- def pop(self, n=0):
- return self.get(n, True)
-
- def peek(self, n=0):
- return self.get(n)
-
- def top(self):
- return self.peek()
-
- def bottom(self):
- return self.peek(len(self) - 1)
-
- def empty(self):
- return self._queue == []
-
- def size(self):
- return self._size
-
- def full(self):
- return len(self) == self.size()
-
class Crawler(object):
def __init__(self, root, depth, locked=True):
@@ -100,22 +38,28 @@
self.depth = depth
self.locked = locked
self.host = urlparse.urlparse(root)[1]
+ self.urls = []
self.links = 0
self.followed = 0
def crawl(self):
page = Fetcher(self.root)
page.fetch()
- urls = Queue()
+ q = Queue()
for url in page.urls:
- urls.push(url)
+ q.put(url)
followed = [self.root]
n = 0
- while not urls.empty():
+ while True:
+ try:
+ url = q.get()
+ except QueueEmpty:
+ break
+
n += 1
- url = urls.pop()
+
if url not in followed:
try:
host = urlparse.urlparse(url)[1]
@@ -125,22 +69,21 @@
page = Fetcher(url)
page.fetch()
for i, url in enumerate(page):
- if url not in urls:
+ if url not in self.urls:
self.links += 1
- urls.push(url)
+ q.put(url)
+ self.urls.append(url)
if n > self.depth and self.depth > 0:
break
- except Exception, error:
- print "Warning: Can't process url '%s'" % url
+ except Exception, e:
+ print "ERROR: Can't process url '%s' (%s)" % (url, e)
+ print format_exc()
class Fetcher(object):
def __init__(self, url):
self.url = url
self.urls = []
-
- def __contains__(self, x):
- return x in self.urls
def __getitem__(self, x):
return self.urls[x]
@@ -150,7 +93,6 @@
def open(self):
url = self.url
- print "Following %s" % url
try:
request = urllib2.Request(url)
handle = urllib2.build_opener()
@@ -162,10 +104,10 @@
request, handle = self.open()
self._addHeaders(request)
if handle:
- soup = BeautifulSoup()
try:
- content = unicode(handle.open(request).read(), errors="ignore")
- soup.feed(content)
+ content = unicode(handle.open(request).read(), "utf-8",
+ errors="replace")
+ soup = BeautifulSoup(content)
tags = soup('a')
except urllib2.HTTPError, error:
if error.code == 404:
@@ -177,15 +119,11 @@
print >> sys.stderr, "ERROR: %s" % error
tags = []
for tag in tags:
- try:
- href = tag["href"]
- if href is not None:
- url = urlparse.urljoin(self.url, encodeHTML(href))
- if url not in self:
- print " Found: %s" % url
- self.urls.append(url)
- except KeyError:
- pass
+ href = tag.get("href")
+ if href is not None:
+ url = urlparse.urljoin(self.url, escape(href))
+ if url not in self:
+ self.urls.append(url)
def getLinks(url):
page = Fetcher(url)
@@ -238,6 +176,7 @@
print "Crawling %s (Max Depth: %d)" % (url, depth)
crawler = Crawler(url, depth)
crawler.crawl()
+ print "\n".join(crawler.urls)
eTime = time.time()
tTime = eTime - sTime