Build a Simple Web Crawler in Python

A web crawler is an automated script/ software which can gather all links of a specific web domain, or even entire web! These gathered links can be used for variety of purposes like:

  1. Checking you SEO health.
  2. Tracking the behavior of  a particular html code.
  3. Feeding links to a search engine.
  4. Tracking Code or Content changes.
  5. Putting all my webpages in cache after a new release.
  6. Creating a site map.
  7. Web / Mobile Testing (w/ an advanced crawler with Phantom.js capabilities).

I will show you how to create a web crawler in python. First create a new project in python and create 5 python files in it namely,,, and . Here are the source codes of each of these files:

import os

# Each website is a separate project (folder)
def create_project_dir(directory):
    if not os.path.exists(directory):
        print('Creating directory ' + directory)

# Create queue and crawled files (if not created)
def create_data_files(project_name, base_url):
    queue = os.path.join(project_name, 'queue.txt')
    crawled = os.path.join(project_name, "crawled.txt")
    if not os.path.isfile(queue):
        write_file(queue, base_url)
    if not os.path.isfile(crawled):
        write_file(crawled, '')

# Create a new file
def write_file(path, data):
    with open(path, 'w') as f:

# Add data onto an existing file
def append_to_file(path, data):
    with open(path, 'a') as file:
        file.write(data + '\n')

# Delete the contents of a file
def delete_file_contents(path):
    open(path, 'w').close()

# Read a file and convert each line to set items
def file_to_set(file_name):
    results = set()
    with open(file_name, 'rt') as f:
        for line in f:
            results.add(line.replace('\n', ''))
    return results

# Iterate through a set, each item will be a line in a file
def set_to_file(links, file_name):
    with open(file_name, "w") as f:
        for l in sorted(links):
            f.write(l + "\n")

from html.parser import HTMLParser
from urllib import parse

class LinkFinder(HTMLParser):
    def __init__(self, base_url, page_url):
        self.base_url = base_url
        self.page_url = page_url
        self.links = set()

    # When we call HTMLParser feed() this function is called when it encounters an opening tag <a>
    def handle_starttag(self, tag, attrs):
        if tag == 'a':
            for (attribute, value) in attrs:
                if attribute == 'href':
                    url = parse.urljoin(self.base_url, value)

    def page_links(self):
        return self.links

    def error(self, message):

from urllib.request import urlopen
from link_finder import LinkFinder
from domain import *
from general import *

class Spider:
    project_name = ''
    base_url = ''
    domain_name = ''
    queue_file = ''
    crawled_file = ''
    queue = set()
    crawled = set()

    def __init__(self, project_name, base_url, domain_name):
        Spider.project_name = project_name
        Spider.base_url = base_url
        Spider.domain_name = domain_name
        Spider.queue_file = Spider.project_name + '/queue.txt'
        Spider.crawled_file = Spider.project_name + '/crawled.txt'
        self.crawl_page('First spider', Spider.base_url)

    # Creates directory and files for project on first run and starts the spider
    def boot():
        create_data_files(Spider.project_name, Spider.base_url)
        Spider.queue = file_to_set(Spider.queue_file)
        Spider.crawled = file_to_set(Spider.crawled_file)

    # Updates user display, fills queue and updates files
    def crawl_page(thread_name, page_url):
        if page_url not in Spider.crawled:
            print(thread_name + ' now crawling ' + page_url)
            print('Queue ' + str(len(Spider.queue)) + ' | Crawled  ' + str(len(Spider.crawled)))

    # Converts raw response data into readable information and checks for proper html formatting
    def gather_links(page_url):
        html_string = ''
            response = urlopen(page_url)
            if 'text/html' in response.getheader('Content-Type'):
                html_bytes =
                html_string = html_bytes.decode("utf-8")
            finder = LinkFinder(Spider.base_url, page_url)
        except Exception as e:
            return set()
        return finder.page_links()

    # Saves queue data to project files
    def add_links_to_queue(links):
        for url in links:
            if (url in Spider.queue) or (url in Spider.crawled):
            if Spider.domain_name != get_domain_name(url):

    def update_files():
        set_to_file(Spider.queue, Spider.queue_file)
        set_to_file(Spider.crawled, Spider.crawled_file)

from urllib.parse import urlparse

# Get domain name (
def get_domain_name(url):
        results = get_sub_domain_name(url).split('.')
        # use this if your website is of form, for eg.
        # return results[-2]+'.'+results[-1]
        # use this if your website is of form, for eg.
        return results[-3] + '.' + results[-2] + '.' + results[-1]
        return ''

# Get sub domain name (
def get_sub_domain_name(url):
        return urlparse(url).netloc
        return '' (run this to run the project):

import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *

# edit the below two lines according to your site.
PROJECT_NAME = 'thecoding'

DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
queue = Queue()

def create_workers():
    for _ in range(NUMBER_OF_THREADS):
        t = threading.Thread(target=work)
        t.daemon = True

def work():
    while True:
        url = queue.get()
        Spider.crawl_page(threading.current_thread().name, url)

def create_jobs():
    for link in file_to_set(QUEUE_FILE):

def crawl():
    queued_links = file_to_set(QUEUE_FILE)
    if len(queued_links) > 0:
        print(str(len(queued_links)) + " links in the queue.")


in the above file, edit PROJECT_NAME and HOMEPAGE according to the website you want to crawl. Now run this file and you will see a folder created with 2 files: queue.txt and crawled.txt which are constantly updating as the crawler crawls the site.It might take some time to crawl sites. after the crawling is complete, you can get all the associated urls of your site in the file crawled,txt. You can also change NUMBER_OF_THREADS variable to increase no. of threads in this multi-threaded program and increase the crawling speed. Of course more computing power will be required to support multiple number of threads. You can use the crawled urls for many uses like feeding them to a search engine to improve your site SEO, creating a site map etc. Thank you!

Leave a Reply

Your email address will not be published. Required fields are marked *