Commits

Tim Tomes  committed f589c30

multiple minor code changes in preparation for public release.

  • Participants
  • Parent commits e1ff642

Comments (0)

Files changed (4)

-# OS generated files #
-######################
-.DS_Store
-.DS_Store?
-._*
-.Spotlight-V100
-.Trashes
-Icon?
-ehthumbs.db
-Thumbs.db
-
-# Project specific items #
-##########################
 phantomjs
 
 Changelog
 =========
-v1.1:
+v1.2: (11.26.12)
+ * cleaned up the code for release
+
+v1.1: (7.15.12)
  * no longer freezes on redirects to 401 authentication.
  * stores each run in a unique directory.
  * shows headers for final destination rather than redirect.
  * denotes redirect next to the status header.
 
-NOTE: Keep in mind that there is no good way to follow a JavaScript redirect in an automated fashion. Pages using JavaScript to redirect the browser will show up as a blank screen shot.
+Notes
+=====
+ * Keep in mind that there is no good way to follow a JavaScript redirect in an automated fashion. Pages using JavaScript to redirect the browser will show up as a blank screen shot.
+ * Increased verbosity will show a lot of errors from Phantomjs and PyQt4. Most of these are debugging errors and will not impact the fidelity of the report.
 This tries to do more or less the same thing as CutyCapt, but as a
 python module.
 
-Modified by Tim Tomes (@LaNMaSteR53) July 2012 to support PeepingTom:
-http://code.google.com/p/ptscripts/source/browse/trunk/peepingtom
+Modified by Tim Tomes (@LaNMaSteR53) July 2012 to support PeepingTom.
 
 This is a derived work from CutyCapt: http://cutycapt.sourceforge.net/
 

File peepingtom.py

-import sys, threading, Queue, urllib2, subprocess, httplib, re, time, os
+import sys
+import socket
+import urllib2
+import subprocess
+import re
+import time
+import os
 from urlparse import urlparse
 
 #=================================================
 def main():
     import optparse
     usage = "%prog [options]\n\n%prog - Tim Tomes (@LaNMaSteR53) (www.lanmaster53.com)"
-    parser = optparse.OptionParser(usage=usage, version="%prog 1.1")
+    parser = optparse.OptionParser(usage=usage, version="%prog 1.2")
     parser.add_option('-v', help='Enable verbose mode.', dest='verbose', default=False, action='store_true')
     parser.add_option('-i', help='File input mode. Name of input file. [IP:PORT]', dest='infile', type='string', action='store')
     parser.add_option('-u', help='Single URL input mode. URL as a string.', dest='url', type='string', action='store')
         targets.append(opts.url)
 
     dir = time.strftime('%y%m%d_%H%M%S', time.localtime())
+    print '[*] Storing data in \'%s/\'' % (dir)
     os.mkdir(dir)
     outfile = '%s/report.html' % (dir)
+    
+    socket.setdefaulttimeout(5)
 
     zombies = []
     servers = {}
-    #logic for validating list of urls and building a new list which understands the 302 redirected sites.
-    for target in targets:
-        headers = None
-        prefix = ''
-        # best guess at protocol prefix
-        if not target.startswith('http'):
-            if target.find(':') == -1: target += ':80'
-            prefix = 'http://'
-            if target.split(':')[1].find('443') != -1:
-                prefix = 'https://'
-        # drop port suffix where not needed
-        if target.endswith(':80'): target = ':'.join(target.split(':')[:-1])
-        if target.endswith(':443'): target = ':'.join(target.split(':')[:-1])
-        # build legitimate target url
-        target = prefix + target
-        code, headers = getHeaderData(target)
-        if code == 'zombie':
-            zombies.append((target, headers))
-        else:
-            filename = '%s.png' % re.sub('\W','',target)
-            servers[target] = [code, filename, headers]
-            if capture: getCapture(code, target, '%s/%s' % (dir,filename), opts)
+    # logic for validating list of urls and building a new list which understands the redirected sites.
+    try:
+        for target in targets:
+            headers = None
+            prefix = ''
+            # best guess at protocol prefix
+            if not target.startswith('http'):
+                if target.find(':') == -1: target += ':80'
+                prefix = 'http://'
+                if target.split(':')[1].find('443') != -1:
+                    prefix = 'https://'
+            # drop port suffix where not needed
+            if target.endswith(':80'): target = ':'.join(target.split(':')[:-1])
+            if target.endswith(':443'): target = ':'.join(target.split(':')[:-1])
+            # build legitimate target url
+            target = prefix + target
+            code, headers = getHeaderData(target)
+            if code == 'zombie':
+                zombies.append((target, headers))
+            else:
+                filename = '%s.png' % re.sub('\W','',target)
+                servers[target] = [code, filename, headers]
+                if capture: getCapture(code, target, '%s/%s' % (dir,filename), opts)
+    except KeyboardInterrupt:
+        print ''
     
     generatePage(servers, zombies, outfile)
     print 'Done.'
 
 def getCapture(code, url, filename, opts):
     if code != 401:
-        sys.stdout.write("[+] retrieving image for %s...\n" % url); sys.stdout.flush()
+        verbose = opts.verbose
         try:
-            if opts.pyqt:
-                proc = subprocess.Popen(['python ./capture.py %s %s' % (url, filename)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
-            elif opts.phantom:
-                proc = subprocess.Popen(['./phantomjs --ignore-ssl-errors=yes ./capture.js %s %s' % (url, filename)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
-            else:
-                print '[!] No capture mode detected.'
-                return
+            if opts.pyqt:      cmd = 'python ./capture.py %s %s' % (url, filename)
+            elif opts.phantom: cmd = './phantomjs --ignore-ssl-errors=yes ./capture.js %s %s' % (url, filename)
+            else: return
+            proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
             stdout, stderr = proc.communicate()
             response = str(stdout) + str(stderr)
             returncode = proc.returncode
-            if returncode != 0: print '[!] %d: %s' % (returncode, response)
-            elif response != 'None': print '[+] %s' % response
+            if returncode != 0:
+                print '[!] %d: %s' % (returncode, response)
+            if response != 'None':
+                if verbose: print '[+] \'%s\' => %s' % (cmd, repr(response))
         except KeyboardInterrupt:
             pass
 
     opener = urllib2.build_opener(SmartRedirectHandler) # debug with urllib2.HTTPHandler(debuglevel=1)
     urllib2.install_opener(opener)
     req = urllib2.Request(server.geturl())
-    # force head request
-    #req = HeadRequest(server.geturl())
-    # spoof user-agent
-    #req.add_header('User-agent', 'Mozilla/5.0')
-    # retrieve header information
     try:
         res = urllib2.urlopen(req)#,'',3)
-        print '[+] %s %s, Good.' % (target, res.getcode())
-        """
-    except httplib.BadStatusLine:
-        print '[+] %s bad status, visit manually.' % (target)
-        return 'zombie', res.args[0].__str__()
-        """
+        print '[*] %s %s. Good.' % (target, res.getcode())
     except Exception as res:
         try:
             res.getcode()
-            print '[+] %s %s, Good.' % (target, res.getcode())
+            print '[*] %s %s. Good.' % (target, res.getcode())
         except:
-            error = res.args[0].__str__()
-            print '[+] %s Error. Visit manually from report.\n[!] %s' % (target, error)
+            error = res.__str__()
+            print '[*] %s %s. Visit manually from report.' % (target, error)
             return 'zombie', error
 
     url = res.geturl()
     tmarkup = ''
     zmarkup = ''
     for server in servers.keys():
-        tmarkup += "<tr><td class='img'><img src='%s' /></td><td class='head'><a href='%s' target='_blank'>%s</a> %s</td></tr>\n" % (servers[server][1],server,server,servers[server][2])
+        tmarkup += "<tr>\n<td class='img'><img src='%s' /></td>\n<td class='head'><a href='%s' target='_blank'>%s</a> %s</td>\n</tr>\n" % (servers[server][1],server,server,servers[server][2])
     if len(zombies) > 0:
       zmarkup = '<tr><td><h2>Failed Requests</h2></td><td>\n'
       for server in zombies:
 table, td, th {border: 1px solid black;border-collapse: collapse;padding: 5px;font-size: .9em;font-family: tahoma;}
 table {table-layout:fixed;}
 td.img {width: 400px;white-space: nowrap;}
-td.head {vertical-align: top;word-wrap:break-word;}
+td.head {vertical-align: top;white-space: nowrap;}
 .header {font-weight: bold;}
 img {width: 400px;}
 </style>
     def http_error_301(self, req, fp, code, msg, headers):
         result = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
         result.status = code
-        result.msg = msg + ' (Redirected to here)'
+        result.msg = msg + ' (Redirect)'
         return result
-
-    def http_error_302(self, req, fp, code, msg, headers):
-        result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
-        result.status = code
-        result.msg = msg + ' (Redirected to here)'
-        return result
-
-"""
-class AvoidBasicAuthHandler(urllib2.HTTPBasicAuthHandler):
-
-    def http_error_401(self, req, fp, code, msg, headers):
-        pass
-    
-class AvoidDigestAuthHandler(urllib2.HTTPDigestAuthHandler):
-
-    def http_error_401(self, req, fp, code, msg, headers):
-        pass
-
-class AvoidRedirectHandler(urllib2.HTTPRedirectHandler):
-    
-    def http_error_302(self, req, fp, code, msg, headers):
-        pass
-    http_error_301 = http_error_303 = http_error_307 = http_error_302
-
-class HeadRequest(urllib2.Request):
-
-    def get_method(self):
-        return "HEAD"
-"""
+    http_error_302 = http_error_303 = http_error_307 = http_error_301
 
 #=================================================
 # START