Commits

gbrindisi committed 9928238 Merge

Dev branch merged

Comments (0)

Files changed (6)

core/domscanner.py

 import re
 import random
 import threading
-import csv
+from lxml import etree
 import os
 
 from core.javascript import Javascript
         self.browser = Browser()
         self._setProxies()
         self._setHeaders()
-        #self._getWhitelist()
+        self._getWhitelist()
 
     def _setHeaders(self):
         if self.engine.getOption('ua') is not None:
             self.errors[key] = [value]
 
     def _getWhitelist(self):
-        wl = csv.reader(open(os.getcwd() + "/lib/js-whitelist.csv", "rb"))
-        print wl
-        for js in wl:
-            self.whitelist.append(js[0])
-            print js[0]
+        path = os.path.split(os.path.realpath(__file__))[0]
+        path = os.path.join(path, "../lib/whitelist.xml")
+        f = open(path, "rb")
+        xml = f.read()
+        root = etree.XML(xml)
 
+        for element in root.iterfind("javascript"):
+            el = {
+                'hash' : element.find("hash").text,
+                'description': element.find("description").text,
+                'reference': element.find("reference").text
+                }
+            self.whitelist.append(el)
+        
     def _parseJavascript(self, target):
         if self.engine.getOption("ua") is "RANDOM": self._setHeaders() 
         
     def _analyzeJavascript(self):
          for js in self.javascript:
              #print "\n[+] Analyzing:\t %s" % js.link
+
+             # Check if the javascript is whitelisted
+             # and eventually skip the analysis
+             skip = False
+             for wl in self.whitelist:
+                 if wl["hash"] == js.js_hash:
+                     print "[-] Found a whitelisted script: %s" % wl["description"]
+                     skip = True
+                     break
+
+             if skip:
+                 continue
+
+
              for k, line in enumerate(js.body.split("\n")):
                 for pattern in re.finditer(SOURCES_RE, line):
                     for grp in pattern.groups():
                         if grp is None: continue
                         js.addSink(k, grp) 
                         #print "[Line: %s] Possible Sink: %s" % (k, grp)
+
     def run(self):
         """ Main code of the thread """
         while True:
                 r.printResult()
 
         # Print javascript analysis
-        if len(self.javascript) == 0:
+        if self.getOption("dom") and len(self.javascript) == 0:
             print "\n[X] No DOM XSS Found :("
-        else:
+        elif self.getOption("dom"):
             print "\n[!] Found possible dom xss in %s javascripts" % len(self.javascript)
             for js in self.javascript:
                 js.printResult()
                     errors[ek] = ev
 
         results = set(results)
-
+        
         if errors:
             print "[X] Crawl Errors:"
             for ek, ev in errors.iteritems():
 
         self._compactTargets()    
        
+        self._scanTargets()
+        
         if self.getOption('dom'):
             self._scanDOMTargets()
-        else:
-            self._scanTargets()
 
         print "[-] Scan completed in %s seconds" % (time.time() - start)
                         

core/javascript.py

 #/usr/bin/env python
 
+import hashlib
+
 class Javascript:
     """
     Used to represent a Javascript file and the result o it's
     analysis
     """
 
-    def __init__(self, link, body, is_embedded=False):
+    def __init__(self, link, body, js_hash=None, is_embedded=False):
         self.link = link
         self.body = body
         self.is_embedded = is_embedded
+        
+        # javascript fingerprinting
+        self.js_hash = js_hash
+        if self.js_hash is None:
+            self.js_hash = hashlib.md5(self.body).hexdigest()
 
         self.sources = []
         self.sinks = []

core/xmlparser.py

+try:
+    from lxml import etree
+except ImportError:
+    print "\n[X] Please install lxml module:"
+    print "    http://lxml.de/\n"
+    exit()
+
+import os
+
+class XMLparser():
+    def __init__(self, path):
+        try:
+            f = open(path)
+            self.xml = f.read()
+            f.close()
+            self.root = etree.XML(self.xml)
+        except IOError, e:
+            print "\n[X] Can't read xml: %s" % path
+            print e
+            #exit()
+
+
+    def getNodes(self, nodename, parent=None):
+        """
+        Return a list of nodes from root or another 
+        specified node
+        """
+        if parent is None:
+            parent = self.root
+        return [n for n in parent.iterfind(nodename)]
+
+
+path = "../lib/whitelist.xml"
+x = XMLparser(path)
+for js in x.getNodes("javascript"):
+    for h in x.getNodes("hash", parent=js):
+        print h.text
+

lib/whitelist.xml

+<?xml version="1.0" ?>
+
+<whitelist>
+<javascript>
+    <hash>82bd29952e7156e0c854d57b1b394a55</hash>
+    <description>jQuery Tools 1.2.5</description>
+    <reference></reference>
+</javascript>
+
+</whitelist>
 #/usr/bin/env python
 
 import os
+try:
+    import hgapi
+except ImportError:
+    print "\n[X] Please install hgapi module:"
+    print "   $ pip install \n"
+    exit()
+
 from optparse import OptionParser
 from core.target import Target
 from core.engine import Engine
 .8P  Y8. db   8D db   8D      db   8D 88  V888   .88.   88      88.     88 `88. 
 YP    YP `8888Y' `8888Y'      `8888Y' VP   V8P Y888888P 88      Y88888P 88   YD
 
-----[ version 0.8.2                        Gianluca Brindisi <g@brindi.si> ]----
+----[ version 0.8.2                       Gianluca Brindisi <g@brindi.si> ]----
                                                       http://brindi.si/g/ ]----
 
  -----------------------------------------------------------------------------
 | Authors assume no liability and are not responsible for any misuse or       | 
 | damage caused by this program.                                              |
  -----------------------------------------------------------------------------
-    """
+    """ 
 
 def main():
     banner()
     parser.add_option("--random-agent", dest="random_agent", default=False, action="store_true", help="perform scan with random user agents")
     parser.add_option("--cookie", dest="cookie", help="use a cookie to perform scans")
     parser.add_option("--dom", dest="dom", default=False, action="store_true", help="basic heuristic to detect dom xss")
+    parser.add_option("--update", dest="update", default=False, action="store_true", help="check for updates")
 
     (options, args) = parser.parse_args()
-    if options.url is None: 
+    if options.url is None and options.update is False: 
         parser.print_help() 
         exit()
 
+    # Check for updates
+    if options.update is True:
+        try:
+            print "[!] Checking for updates...\n"
+            path = os.path.split(os.path.realpath(__file__))[0]
+            repo = hgapi.Repo(path)
+            print repo.hg_command("pull")
+            print repo.hg_update("tip")
+            print "[!] Updated to rev: %s" % repo.hg_rev()
+            exit()
+        except Exception:
+            print "[X] Can't retrieve updates\n"
+            exit()
+
     # Build a first target
     if options.post is True:
         if options.post_data is not None: