Commits

mwarren  committed a54b7e5

Adding in a bunch of scripts.

  • Participants
  • Parent commits 4f681fe

Comments (0)

Files changed (12)

File ClickbankLoader.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from HalotisIntelligenceModels import *
+
+import csv
+import httplib
+import logging
+from datetime import datetime
+
+import gmail
+
+LOG_FILENAME = 'ClickbankLoader.log'
+logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,filemode='w')
+
+ACCOUNTS = [{'account':'your account',  'API_key': 'your KEY' },]
+DEV_API_KEY = 'DEV-YOUR KEY'
+
+def get_clickbank_list(API_key, DEV_key):
+    conn = httplib.HTTPSConnection('api.clickbank.com')
+    conn.putrequest('GET', '/rest/1.0/orders/list')
+    conn.putheader("Accept", 'text/csv')
+    conn.putheader("Authorization", DEV_key+':'+API_key)
+    conn.endheaders()
+    response = conn.getresponse()
+    
+    if response.status != 200:
+        logging.error('HTTP error %s' % response)
+        raise Exception(response)
+    
+    csv_data = response.read()
+    
+    return csv_data
+
+def load_clickbanklist(csv_data, account, dbconnection=CONNSTRING, echo=False):
+    engine = create_engine(dbconnection, echo=echo)
+
+    metadata = Base.metadata
+    metadata.create_all(engine) 
+
+    Session = sessionmaker(bind=engine)
+    session = Session()
+
+    data = csv.DictReader(iter(csv_data.split('\n')))
+
+    for d in data:
+        item = ClickBankList(account, **d)
+        #check for duplicates before inserting
+        checkitem = session.query(ClickBankList).filter_by(date=item.date, receipt=item.receipt, item=item.item).all()
+    
+        if not checkitem:
+            logging.info('inserting new transaction %s' % item)
+            session.add(item)
+
+    session.commit()
+    
+if  __name__=='__main__':
+    try:
+        for account in ACCOUNTS:
+            csv_data = get_clickbank_list(account['API_key'], DEV_API_KEY)
+            load_clickbanklist(csv_data, account['account'])
+    except:
+        logging.exception('Crashed')
+    
+    #email log
+    logdata = open(LOG_FILENAME, 'r').read()
+    gmail.send_email('Clickbank Loader Log', logdata)
+    

File DeliciousScraper.py

+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+
+"""
+Scraper for Del.icio.us SERP.
+
+This pulls the results for a match for a query on http://del.icio.us.
+"""
+
+import urllib2
+import re
+
+from BeautifulSoup import BeautifulSoup
+
+def get_delicious_results(query, page_limit=10):
+
+    page = 1
+    links = []
+
+    while page < page_limit :
+        url='http://delicious.com/search?p=' + '%20'.join(query.split()) + '&context=all&lc=1&page=' + str(page)
+        req = urllib2.Request(url)
+        HTML = urllib2.urlopen(req).read()
+        soup = BeautifulSoup(HTML)
+        
+        next = soup.find('a', attrs={'class':re.compile('.*next$', re.I)})
+ 
+        #links is a list of (url, title) tuples
+        links +=   [(link['href'], ''.join(link.findAll(text=True)) ) for link in soup.findAll('a', attrs={'class':re.compile('.*taggedlink.*', re.I)}) ]
+
+        if next :
+            page = page+1
+        else :
+            break
+
+    return links
+
+if __name__=='__main__':
+    links = get_delicious_results('marathon training')
+    print links
+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import urllib2, sys, urllib
+import datetime
+import logging
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from BeautifulSoup import BeautifulSoup
+
+today = datetime.date.today()
+
+from sqlalchemy import Table, Column, Integer, String, MetaData, Date, DateTime, Float
+from sqlalchemy.schema import UniqueConstraint
+from sqlalchemy.ext.declarative import declarative_base
+
+import datetime
+
+CONNSTRING='sqlite:///HalotisIntelligence.sqlite'
+
+Base = declarative_base()
+class InLink(Base):
+    __tablename__ = 'inlinks'
+    __table_args__ = (UniqueConstraint('date', 'url', 'source'),{})
+
+    id = Column(Integer, primary_key=True)
+    date = Column(Date)
+    url = Column(String)
+    source = Column(String)
+    inlinks_count = Column(Integer)
+
+    
+    def __init__(self, date, url, source, inlinks):
+        self.date = date
+        self.url = url
+        self.source = source
+        self.inlinks_count = inlinks
+
+
+    def __repr__(self):
+        return "<Inlink('%s - %s -> %s)>" % (str(self.date), self.url, str(self.inlinks_count))
+
+import gmail
+
+try:
+   import json
+except:
+   import simplejson as json # http://undefined.org/python/#simplejson
+
+LOG_FILENAME = 'InLinks.log'
+logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,filemode='w')
+
+DOMAINS = ['http://www.halotis.com',]
+
+YAHOO_APP_ID='YOUR ID'
+
+
+
+def get_alexa_linksin(domain):
+
+    page = 0
+    linksin = []
+
+    while True :
+        url='http://www.alexa.com/site/linksin;'+str(page)+'/'+domain
+        req = urllib2.Request(url)
+        HTML = urllib2.urlopen(req).read()
+        soup = BeautifulSoup(HTML)
+
+        next = soup.find(id='linksin').find('a', attrs={'class':'next'})
+
+        linksin += [(link['href'], link.string) for link in soup.find(id='linksin').findAll('a')]
+
+        if next :
+	        page = page+1
+        else :
+	        break
+
+    return linksin
+
+def yahoo_inlinks_count(query):
+    if not query.startswith('http://'):
+        logging.error('site must start with "http://"')
+        raise Exception('site must start with "http://"')
+    
+    request = 'http://search.yahooapis.com/SiteExplorerService/V1/inlinkData?appid=' + YAHOO_APP_ID + '&query=' + urllib.quote_plus(query) + '&output=json&results=0'
+
+    try:
+        results = json.load(urllib2.urlopen(request))
+    except:
+        logging.error('Web services request failed')
+        raise Exception("Web services request failed")
+        sys.exit()
+    
+    return results['ResultSet']['totalResultsAvailable']
+
+def load_inlinks(url, source, value, dbconnection=CONNSTRING, echo=False):
+    engine = create_engine(dbconnection, echo=echo)
+
+    metadata = Base.metadata
+    metadata.create_all(engine) 
+
+    Session = sessionmaker(bind=engine)
+    session = Session()
+    checkitem = session.query(InLink).filter_by(date=today, url=url, source=source).all()
+    
+    if not checkitem:
+        item = InLink(datetime.date.today(), url, source, value)
+        logging.debug('inserting value: %s' % item)
+        session.add(item)
+
+    session.commit()
+    
+if __name__=='__main__':
+    try:
+        for dom in DOMAINS:
+            logging.info( 'checking %s' % dom)
+            count =  yahoo_inlinks_count(dom)
+            logging.info( 'found %s inlinks on Yahoo!' % count)
+            load_inlinks(dom, 'Yahoo', count)
+            count = len(get_alexa_linksin(dom))
+            logging.info( 'found %s inlinks on Alexa' % count)
+            load_inlinks(dom, 'Alexa', count)
+    except:
+        logging.exception('Crashed!')
+    
+    logging.info('Finished')
+    
+    #email log
+    logdata = open(LOG_FILENAME, 'r').read()
+    gmail.send_email('Inlinks Loader Log', logdata)
+    
+
+
+
+
+
+
+

File TwitterLinks.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+try:
+   import json
+except:
+   import simplejson as json # http://undefined.org/python/#simplejson
+import twitter     #http://code.google.com/p/python-twitter/
+
+from urllib2 import urlopen
+import re
+
+SETTINGS = {'user':'your username', 'password':'your password'}
+
+def listFriendsURLs(user, password):
+    re_pattern='.*?((?:http|https)(?::\\/{2}[\\w]+)(?:[\\/|\\.]?)(?:[^\\s"]*))'	# HTTP URL
+    rg = re.compile(re_pattern,re.IGNORECASE|re.DOTALL)
+    
+    api = twitter.Api(user, password)
+    timeline = api.GetFriendsTimeline(user)
+    
+    if len(timeline) == 0:
+        print 'twitter never returned anything'
+        
+    for status in timeline:
+        m = rg.search(status.text)
+        if m:
+            httpurl=m.group(1)
+            title = getTitle(httpurl)
+            print httpurl, '-', title
+
+def getTitle(url):
+    req = urlopen(url)
+    html = req.read()
+    
+    re_pattern='<title>(.*?)</title>'
+    rg = re.compile(re_pattern,re.IGNORECASE|re.DOTALL)
+    
+    m = rg.search(html)
+    if m:
+        title = m.group(1)
+        return title.strip()
+    return None
+    
+if __name__ == '__main__':
+    listFriendsURLs(SETTINGS['user'], SETTINGS['password'])

File ezinearticles.py

+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import sys
+import urllib2
+import urllib
+import sqlite3
+
+from BeautifulSoup import BeautifulSoup # available at: http://www.crummy.com/software/BeautifulSoup/
+
+USER_AGENT = 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13'
+
+conn = sqlite3.connect("ezines.sqlite")
+conn.row_factory = sqlite3.Row
+
+c = conn.cursor()
+c.execute('CREATE TABLE IF NOT EXISTS Ezines (`url`, `title`, `summary`, `tail`, `content`, `signature`)')
+conn.commit()
+
+def transposed(lists):
+   if not lists: return []
+   return map(lambda *row: list(row), *lists)
+   
+def search(query):
+    """Runs the search on ezineartles.com and returns the HTML
+    """
+    url='http://ezinearticles.com/search/?q=' + '+'.join(query.split())
+    req = urllib2.Request(url)
+    req.add_header('User-agent', USER_AGENT)
+    HTML = urllib2.urlopen(req).read()
+    return HTML
+
+def parse_search_results(HTML):
+    """Givin the result of the search function this parses out the results into a list
+    """
+    soup = BeautifulSoup(HTML)
+    match_titles = soup.findAll(attrs={'class':'srch_title'})
+    match_sum = soup.findAll(attrs={'class':'srch_sum'})
+    match_tail = soup.findAll(attrs={'class':'srch_tail'})
+    
+    return transposed([match_titles, match_sum, match_tail])
+    
+def get_article_content(url):
+    """Parse the body and signature from the content of an article
+    """
+    req = urllib2.Request(url)
+    req.add_header('User-agent', USER_AGENT)
+    HTML = urllib2.urlopen(req).read()
+    
+    soup = BeautifulSoup(HTML)
+    return {'text':soup.find(id='body'), 'sig':soup.find(id='sig')}
+    
+def store_results(search_results):
+    """put the results into an sqlite database if they haven't already been downloaded.
+    """
+    c = conn.cursor()
+    for row in search_results:
+        title = row[0]
+        summary = row[1]
+        tail = row[2]
+        
+        link = title.find('a').get('href')
+        have_url = c.execute('SELECT url from Ezines WHERE url=?', (link, )).fetchall()
+        if not have_url:
+            content = get_article_content('http://ezinearticles.com' + link)
+            c.execute('INSERT INTO Ezines (`title`, `url`, `summary`, `tail`, `content`, `signature`) VALUES (?,?,?,?,?,?)', 
+                      (title.find('a').find(text=True), 
+                       link, 
+                       summary.find(text=True), 
+                       tail.find(text=True), 
+                       str(content['text']), 
+                       str(content['sig'])) )
+    
+    conn.commit()
+    
+if __name__=='__main__':
+    #example usage
+    page = search('seo')
+    search_results = parse_search_results(page)
+    
+    store_results(search_results)
+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+ 
+import smtplib
+from email.MIMEText import MIMEText
+ 
+GMAIL_LOGIN = 'YOUR_EMAIL@gmail.com'
+GMAIL_PASSWORD = 'YOUR PASSWORD'
+ 
+ 
+def send_email(subject, message, from_addr=GMAIL_LOGIN, to_addr=GMAIL_LOGIN):
+    msg = MIMEText(message)
+    msg['Subject'] = subject
+    msg['From'] = from_addr
+    msg['To'] = to_addr
+ 
+    server = smtplib.SMTP('smtp.gmail.com',587) #port 465 or 587
+    server.ehlo()
+    server.starttls()
+    server.ehlo()
+    server.login(GMAIL_LOGIN,GMAIL_PASSWORD)
+    server.sendmail(from_addr, to_addr, msg.as_string())
+    server.close()
+ 
+ 
+if __name__=="__main__":
+    send_email('test', 'This is a test email')

File neverblueReport.py

+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import urllib2
+import time
+import csv
+import os
+import logging
+from urllib import urlencode
+try:
+    from xml.etree import ElementTree
+except ImportError:
+    from elementtree import ElementTree
+
+import gmail
+
+LOG_FILENAME = 'NeverblueReport.log'
+logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,filemode='w')
+
+username='YOUR EMAIL'
+password='YOUR PASSWORD'
+
+url = 'https://secure.neverblue.com/service/aff/v1/rest/'
+schedule_url = url + 'reportSchedule/'
+status_url   = url + 'reportStatus/'
+download_url = url + 'reportDownloadUrl/'
+REALM = 'secure.neverblue.com'
+
+SERVER_RETRIES = 100
+SERVER_DELAY = 2
+
+def install_opener():
+    # create a password manager
+    password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
+
+    # Add the username and password.
+    password_mgr.add_password(REALM, url, username, password)
+
+    handler = urllib2.HTTPBasicAuthHandler(password_mgr)
+
+    # create "opener" (OpenerDirector instance)
+    opener = urllib2.build_opener(handler)
+
+    # Install the opener.
+    # Now all calls to urllib2.urlopen use our opener.
+    urllib2.install_opener(opener)
+
+def request_report():
+    params={'type':'date', 'relativeDate':'yesterday', 'campaign':0}
+    req = urllib2.Request(schedule_url + '?' + urlencode(params))
+
+    handle = urllib2.urlopen(req)
+    xml = handle.read()
+    tree = ElementTree.fromstring(xml)
+
+    # parse the reportJob code from the XML
+    reportJob = tree.find('reportJob').text
+    return reportJob
+
+def check_status(reportJob):
+    params = {'reportJob':reportJob}
+
+    for i in range(0, SERVER_RETRIES):
+        req = urllib2.Request(status_url + '?' + urlencode(params))
+        handle = urllib2.urlopen(req)
+        xml = handle.read()
+        tree = ElementTree.fromstring(xml)
+        reportStatus = tree.find('reportStatus').text
+        if reportStatus == 'completed':
+            break
+        time.sleep(SERVER_DELAY)
+    return reportStatus
+
+def get_results(reportJob):
+    params = {'reportJob':reportJob, 'format':'csv'}
+    req = urllib2.Request(download_url + '?' + urlencode(params))
+    handle = urllib2.urlopen(req)
+    xml = handle.read()
+    tree = ElementTree.fromstring(xml)
+    downloadURL = tree.find('downloadUrl').text
+    report = urllib2.urlopen(downloadURL).read()
+    reader = csv.DictReader( report.split( '\n' ) )
+    for row in reader:
+        logging.info('%s %s' % (row['Date'], row['Payout']))
+
+if __name__=='__main__':
+    try:
+        install_opener()
+        reportJob = request_report()
+        logging.info('report requested')
+        reportStatus = check_status(reportJob)
+        logging.info('report ready')
+        if reportStatus == 'completed':
+            get_results(reportJob)
+    except:
+        logging.exception('crashed')
+
+    logging.info('Finished')
+    
+    #email log
+    logdata = open(LOG_FILENAME, 'r').read()
+    gmail.send_email('NeverblueReport Log', logdata)        
+    

File translate.py

+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import sys
+import urllib2
+import urllib
+
+from BeautifulSoup import BeautifulSoup # available at: http://www.crummy.com/software/BeautifulSoup/
+    
+def translate(sl, tl, text):
+    """ Translates a given text from source language (sl) to
+        target language (tl) """
+
+    opener = urllib2.build_opener()
+    opener.addheaders = [('User-agent', 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0)')]
+
+    translated_page = opener.open(
+        "http://translate.google.com/translate_t?" + 
+        urllib.urlencode({'sl': sl, 'tl': tl}),
+        data=urllib.urlencode({'hl': 'en',
+                               'ie': 'UTF8',
+                               'text': text.encode('utf-8'),
+                               'sl': sl, 'tl': tl})
+    )
+    
+    translated_soup = BeautifulSoup(translated_page)
+
+    return translated_soup('div', id='result_box')[0].string
+
+if __name__=='__main__':
+    print translate('en', 'fr', u'hello')

File translateRSS.py

+#!/usr/bin/env python 
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import feedparser  # available at feedparser.org
+from translate import translate  # available at http://www.halotis.com/2009/07/20/translating-text-using-google-translate-and-python/
+import PyRSS2Gen # avaliable at http://www.dalkescientific.com/Python/PyRSS2Gen.html
+
+import datetime 
+import re
+
+def remove_html_tags(data):
+    p = re.compile(r'<.*?>')
+    return p.sub('', data)
+
+def translate_rss(sl, tl, url):
+
+    d = feedparser.parse(url)
+    
+    #unfortunately feedparser doesn't output rss so we need to create the RSS feed using PyRSS2Gen
+    items = [PyRSS2Gen.RSSItem( 
+        title = translate(sl, tl, x.title), 
+        link = x.link, 
+        description = translate(sl, tl, remove_html_tags(x.summary)), 
+        guid = x.link, 
+        pubDate = datetime.datetime( 
+            x.modified_parsed[0], 
+            x.modified_parsed[1], 
+            x.modified_parsed[2], 
+            x.modified_parsed[3], 
+            x.modified_parsed[4], 
+            x.modified_parsed[5])) 
+        for x in d.entries]
+    
+    rss = PyRSS2Gen.RSS2( 
+        title = d.feed.title, 
+        link = d.feed.link, 
+        description = translate(sl, tl, d.feed.description), 
+        lastBuildDate = datetime.datetime.now(), 
+        items = items) 
+    #emit the feed 
+    xml = rss.to_xml()
+    
+    return xml
+
+if __name__ == '__main__':
+  feed = translate_rss('en', 'fr', 'http://www.halotis.com/feed/')
+  print feed
+
+
+
+

File tweetBack.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+try:
+   import json
+except:
+   import simplejson as json # http://undefined.org/python/#simplejson
+import twitter     #http://code.google.com/p/python-twitter/
+
+import urllib
+import pickle
+import random
+import logging
+import time
+
+import gmail
+
+LOG_FILENAME = '/tmp/tweetBack.log'
+logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG,filemode='w')
+
+MAX_API_TRIES = 5
+
+SETTINGS = [
+        {'user':'twitter account', 
+         'password':'twitter password', 
+         'triggers':({'text':'search trigger', 
+                      'messages':('sample message #1',
+                                  "sample message #2",
+                                  'sample message #3')},
+                     )},
+           ]
+
+USER_LIST_FILE = 'tweetback.pck'  #make sure we don't tweet the same person twice
+
+#read stored list of twitter users that have been responded to already in a file
+try:
+    f = open(USER_LIST_FILE, 'r')
+    user_list = pickle.load(f)
+except:
+    user_list = []
+
+def search_results(query):
+    url = 'http://search.twitter.com/search.json?q=' + '+'.join(query.split())
+    tries = 0
+    while tries < MAX_API_TRIES:
+        tries = tries + 1
+        try:
+            return json.load(urllib.urlopen(url))
+        except:
+            time.sleep(61)
+    return None
+
+def tweet_back(query, tweet_reply, username, password):
+    results = search_results(query)
+    
+    api = twitter.Api(username, password)
+    try:
+        for result in results['results']:
+            if result['from_user'] not in user_list:
+                tries = 0
+                while tries < MAX_API_TRIES:
+                    tries = tries + 1
+                    try:
+                        api.PostUpdate('@' + result['from_user'] + ' ' + tweet_reply)
+                        break
+                    except:
+                        time.sleep(61)
+                logging.info('@' + result['from_user'] + ' ' + tweet_reply)
+
+                user_list.append(result['from_user'])
+                break  #just post one tweet and quit
+    except:
+        print 'Failed to post update. may have gone over the twitter API limit.. please wait and try again'
+        
+    #write the user_list to disk
+    f = open(USER_LIST_FILE, 'w')
+    pickle.dump(user_list, f)
+    
+if __name__=='__main__':
+    try:
+        for account in SETTINGS:
+            logging.info('account ' + account['user'])
+            trigger = random.choice(account['triggers'])
+            message = random.choice(trigger['messages'])
+            tweet_back(trigger['text'], message, account['user'], account['password'])
+    except:
+        logging.exception('Crashed')
+    
+    #email log
+    logdata = open(LOG_FILENAME, 'r').read()
+    gmail.send_email('TweetBack Log', logdata)

File twitter_trends.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import simplejson  # http://undefined.org/python/#simplejson
+import urllib
+
+url = 'http://search.twitter.com/trends.json'
+result = simplejson.load(urllib.urlopen(url))
+
+print [trend['name'] for trend in result['trends']]

File twitterbot2.py

+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# (C) 2009 HalOtis Marketing
+# written by Matt Warren
+# http://halotis.com/
+
+import twitter     #http://code.google.com/p/python-twitter/
+import bitly       #http://code.google.com/p/python-bitly/
+import feedparser  #available at feedparser.org
+import sqlite3
+from time import strftime
+import sys
+
+DATABASE = "tweets.sqlite"
+
+BITLY_LOGIN = "YOUR BITLY LOGIN"
+BITLY_API_KEY = "YOUR KEY"
+
+TWITTER_USER = "TWITTER ACCOUNT"
+TWITTER_PASSWORD = "TWITTER PASSWORD"
+	
+
+def print_stats():
+	conn = sqlite3.connect(DATABASE)
+	conn.row_factory = sqlite3.Row
+	c = conn.cursor()
+
+	b = bitly.Api(login=BITLY_LOGIN,apikey=BITLY_API_KEY)
+
+	c.execute('SELECT title, url, short_url from RSSContent')
+	all_links = c.fetchall()
+
+	for row in all_links:
+		
+		short_url = row['short_url']
+
+		if short_url == None:
+			short_url = b.shorten(row['url'])
+			c.execute('UPDATE RSSContent SET `short_url`=? WHERE `url`=?',(short_url,row['url']))
+
+
+		stats = b.stats(short_url)
+		print "%s - User clicks %s, total clicks: %s" % (row['title'], stats.user_clicks,stats.total_clicks)
+
+	conn.commit()
+
+def tweet_RSS(url):
+	
+	conn = sqlite3.connect(DATABASE)
+	conn.row_factory = sqlite3.Row
+	c = conn.cursor()
+	
+	#create the table if it doesn't exist
+	c.execute('CREATE TABLE IF NOT EXISTS RSSContent (`url`, `title`, `dateAdded`, `content`, `short_url`)')
+
+	api = twitter.Api(username=TWITTER_USER, password=TWITTER_PASSWORD)
+	b = bitly.Api(login=BITLY_LOGIN,apikey=BITLY_API_KEY)
+
+	d = feedparser.parse(url)
+ 
+	for entry in d.entries:
+	
+		#check for duplicates
+		c.execute('select * from RSSContent where url=?', (entry.link,))
+		if len(c.fetchall()) == 0:
+    
+			tweet_text = "%s - %s" % (entry.title, entry.summary)
+
+			shortened_link = b.shorten(entry.link)
+			
+			t = (entry.link, entry.title, strftime("%Y-%m-%d %H:%M:%S", entry.updated_parsed), entry.summary, shortened_link)
+			c.execute('insert into RSSContent (`url`, `title`,`dateAdded`, `content`, `short_url`) values (?,?,?,?,?)', t)
+			print "%s.. %s" % (tweet_text[:115], shortened_link)
+    
+			api.PostUpdate("%s.. %s" % (tweet_text[:115], shortened_link))
+    	
+	conn.commit()
+
+if __name__ == '__main__':
+  tweet_RSS('http://www.halotis.com/feed/')
+  #if sys.argv[1] == '-s':
+  #  print_stats()