Commits

Kevin Chan  committed 65897fd

Updated project scripts to latest revisions based on versions from kynded project.

  • Participants
  • Parent commits e194237

Comments (0)

Files changed (8)

+#!/usr/bin/env python
+# Copyright (c) 2012 Seth Davis http://www.curiasolutions.com/
+# s3put is Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import sys, os, time, datetime, argparse, threading, signal
+from fnmatch import fnmatch
+import boto
+
+__version__ = '0.8.1'
+
+version= """
+%(prog)s v%(version)s
+Copyright (c) 2012 Seth Davis
+http://github.com/seedifferently/boto_rsync
+"""
+
+description = """
+SOURCE and DESTINATION can either be a local path to a directory or specific
+file, a custom S3 or GS URL to a directory or specific key in the format of
+s3://bucketname/path/or/key, a S3 to S3 transfer using two S3 URLs, or a GS to
+GS transfer using two GS URLs.
+
+examples:
+    boto-rsync [OPTIONS] /local/path/ s3://bucketname/remote/path/
+ or
+    boto-rsync [OPTIONS] gs://bucketname/remote/path/or/key /local/path/
+ or
+    boto-rsync [OPTIONS] s3://bucketname/ s3://another_bucket/
+"""
+
+def usage(parser):
+    """Prints the usage string and exits."""
+    parser.print_help()
+    sys.exit(2)
+
+def get_full_path(path):
+    """
+    Returns a full path with special markers such as "~" and "$USER" expanded.
+    """
+    path = os.path.expanduser(path)
+    path = os.path.expandvars(path)
+    if path and path.endswith(os.sep):
+        path = os.path.abspath(path) + os.sep
+    else:
+        path = os.path.abspath(path)
+    return path
+
+def convert_bytes(n):
+    """Converts byte sizes into human readable forms such as KB/MB/etc."""
+    for x in ['b','K','M','G','T']:
+        if n < 1024.0:
+            return "%.1f%s" % (n, x)
+        n /= 1024.0
+    return "%.1f%s" % (n, x)
+
+def spinner(event, every):
+    """Animates an ASCII spinner."""
+    while True:
+        if event.isSet():
+            sys.stdout.write('\b \b')
+            sys.stdout.flush()
+            break
+        sys.stdout.write('\b\\')
+        sys.stdout.flush()
+        event.wait(every)
+        sys.stdout.write('\b|')
+        sys.stdout.flush()
+        event.wait(every)
+        sys.stdout.write('\b/')
+        sys.stdout.flush()
+        event.wait(every)
+        sys.stdout.write('\b-')
+        sys.stdout.flush()
+        event.wait(every)
+
+def submit_cb(bytes_so_far, total_bytes):
+    """The "progress" callback for file transfers."""
+    global speeds
+    
+    # Setup speed calculation
+    if bytes_so_far < 1:
+        speeds = []
+        speeds.append((bytes_so_far, time.time()))
+    # Skip processing if our last process was less than 850ms ago
+    elif bytes_so_far != total_bytes and (time.time() - speeds[-1][1]) < .85:
+        return
+    
+    speeds.append((bytes_so_far, time.time()))
+    
+    # Try to get ~5 seconds of data info for speed calculation
+    s1, t1 = speeds[-1]
+    for speed in reversed(speeds):
+        s2, t2 = speed
+        
+        if (t1 - t2) > 5:
+            break
+    
+    # Calculate the speed
+    if bytes_so_far == total_bytes:
+        # Calculate the overall average speed
+        seconds = int(round(speeds[-1][1] - speeds[0][1]))
+        if seconds < 1:
+            seconds = 1
+        speed = 1.0 * total_bytes / seconds
+    else:
+        # Calculate the current average speed
+        seconds = t1 - t2
+        if seconds < 1:
+            seconds = 1
+        size = s1 - s2
+        speed = 1.0 * size / seconds
+    
+    # Calculate the duration
+    try:
+        if bytes_so_far == total_bytes:
+            # Calculate time taken
+            duration = int(round(speeds[-1][1] - speeds[0][1]))
+        else:
+            # Calculate remaining time
+            duration = int(round((total_bytes - bytes_so_far) / speed))
+        duration = str(datetime.timedelta(seconds=duration))
+    except ZeroDivisionError:
+        duration = '0:00:00'
+    
+    # Calculate the progress
+    try:
+        progress = round((1.0 * bytes_so_far / total_bytes) * 100)
+    except ZeroDivisionError:
+        progress = 100
+    
+    sys.stdout.write('    %6s of %6s    %3d%%    %6s/s    %7s    \r' % (
+      convert_bytes(bytes_so_far), convert_bytes(total_bytes), progress,
+      convert_bytes(speed), duration)
+      )
+    sys.stdout.flush()
+
+def get_key_name(fullpath, prefix):
+    """Returns a key compatible name for a file."""
+    key_name = fullpath[len(prefix):]
+    l = key_name.split(os.sep)
+    key_name = '/'.join(l)
+    return key_name.lstrip('/')
+
+def signal_handler(signum, frame):
+    """Handles signals."""
+    global ev
+    
+    if signum == signal.SIGINT:
+        if ev:
+            ev.set()
+        
+        sys.stdout.write('\n')
+        sys.exit(0)
+
+def main():
+    global speeds, ev
+    
+    signal.signal(signal.SIGINT, signal_handler)
+    ev = None
+    speeds = []
+    cb = submit_cb
+    num_cb = 10
+    rename = False
+    copy_file = True
+    
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.RawDescriptionHelpFormatter,
+        usage='%(prog)s [OPTIONS] SOURCE DESTINATION',
+        description=description,
+        add_help=False
+        )
+    parser.add_argument(
+        '-a', '--access-key', metavar='KEY', dest='cloud_access_key_id',
+        help='Your Access Key ID. If not supplied, boto will look for an ' + \
+             'environment variable or a credentials file (see README.rst ' + \
+             'for more info).'
+        )
+    parser.add_argument(
+        '-s', '--secret-key', metavar='SECRET', dest='cloud_secret_access_key',
+        help='Your Secret Key. If not supplied, boto will look for an ' + \
+             'environment variable or a credentials file.'
+        )
+    parser.add_argument(
+        '--anon', action='store_true',
+        help='Connect without credentials (S3 only). Useful if working ' + \
+             'with others\' buckets that have a global read/write ACL.'
+        )
+    parser.add_argument(
+        '--endpoint', metavar='HOST', default='s3.amazonaws.com',
+        help='Specify a specific S3 endpoint to connect to via boto\'s ' + \
+             '"host" connection argument (S3 only).'
+        )
+    parser.add_argument(
+        '-g', '--grant',
+        help='A canned ACL policy that will be granted on each file ' + \
+             'transferred to S3/GS. The value provided must be one of the ' + \
+             '"canned" ACL policies supported by S3/GS: private, ' + \
+             'public-read, public-read-write (S3 only), or authenticated-read'
+        )
+    parser.add_argument(
+        '-m', '--metadata', nargs='+', default=dict(),
+        help='One or more "Name: value" pairs specifying what metadata to ' + \
+             'set on each file transferred to S3/GS. Note: Be sure to end ' + \
+             'your args with "--" if this is the last argument specified ' + \
+             'so that SOURCE and DESTINATION can be read properly. e.g. ' + \
+             '%(prog)s -m "Content-Type: audio/mpeg" "Content-Disposition: ' + \
+             'attachment" -- ./path/ s3://bucket/'
+        )
+    parser.add_argument(
+        '-r', '--reduced', action='store_true',
+        help='Enable reduced redundancy on files copied to S3.'
+        )
+    parser.add_argument(
+        '-e', '--encrypt-keys', dest='encrypt', action='store_true',
+        help='Enable server-side encryption on files copied to S3 (only ' + \
+             'applies when S3 is the destination).'
+        )
+    parser.add_argument(
+        '-p', '--preserve-acl', dest='preserve', action='store_true',
+        help='Copy the ACL from the source key to the destination key ' + \
+             '(only applies in S3/S3 and GS/GS transfer modes).'
+        )
+    parser.add_argument(
+        '-w', '--no-overwrite', action='store_true',
+        help='No files will be overwritten, if the file/key exists on the ' + \
+             'destination it will be kept. Note that this is not a sync--' + \
+             'even if the file has been updated on the source it will not ' + \
+             'be updated on the destination.'
+        )
+    parser.add_argument(
+        '--glob', action='store_true',
+        help='Interpret the tail end of SOURCE as a filename pattern and ' + \
+             'filter transfers accordingly. Note: If globbing a local ' + \
+             'path, make sure that your CLI\'s automatic filename ' + \
+             'expansion is disabled (typically accomplished by enclosing ' + \
+             'SOURCE in quotes, e.g. "/path/*.zip").'
+        )
+    parser.add_argument(
+        '--no-recurse', action='store_true',
+        help='Do not recurse into directories.'
+        )
+    parser.add_argument(
+        '--skip-dirkeys', action='store_true',
+        help='When syncing to S3 or GS, skip the creation of keys which ' + \
+             'represent "directories" (an empty key ending in "/" for S3 ' + \
+             'or "_$folder$" for GS).'
+        )
+    parser.add_argument(
+        '--ignore-empty', action='store_true',
+        help='Ignore empty (0-byte) keys/files/directories. This will skip ' + \
+             'the transferring of empty directories and keys/files whose ' + \
+             'size is 0. Warning: S3/GS often uses empty keys with special ' + \
+             'trailing characters to specify directories.'
+        )
+    parser.add_argument(
+        '--delete', action='store_true',
+        help='Delete extraneous files from destination dirs after the ' + \
+             'transfer has finished (e.g. rsync\'s --delete-after).'
+        )
+    parser.add_argument(
+        '-n', '--dry-run', action='store_true', dest='no_op',
+        help='No files will be transferred, but informational messages ' + \
+             'will be printed about what would have happened.'
+        )
+    parser.add_argument(
+        '-v', '--verbose', action='store_false', dest='quiet',
+        help='Print additional informational messages.'
+        )
+    parser.add_argument(
+        '-d', '--debug', metavar='LEVEL', choices=[0, 1, 2], default=0,
+        type=int,
+        help='Level 0 means no debug output (default), 1 means normal ' + \
+             'debug output from boto, and 2 means boto debug output plus ' + \
+             'request/response output from httplib.'
+        )
+    parser.add_argument(
+        '--version', action='version',
+        version=version % dict(prog=parser.prog, version=__version__)
+        )
+    parser.add_argument(
+        '-h', '--help', action='help',
+        help='show this help message and exit'
+        )
+    parser.add_argument('SOURCE', help=argparse.SUPPRESS)
+    parser.add_argument('DESTINATION', help=argparse.SUPPRESS)
+    
+    try:
+        args = parser.parse_args()
+    except argparse.ArgumentTypeError:
+        pass
+    
+    try:
+        cloud_access_key_id = args.cloud_access_key_id
+        cloud_secret_access_key = args.cloud_secret_access_key
+        anon = args.anon
+        endpoint = args.endpoint
+        grant = args.grant
+        metadata = args.metadata
+        if not isinstance(metadata, dict):
+            metadata = dict([meta.split(': ', 1) for meta in metadata])
+        reduced = args.reduced
+        encrypt = args.encrypt
+        preserve = args.preserve
+        no_overwrite = args.no_overwrite
+        glob = args.glob
+        no_recurse = args.no_recurse or glob
+        skip_dirkeys = args.skip_dirkeys
+        ignore_empty = args.ignore_empty
+        delete = args.delete
+        no_op = args.no_op
+        quiet = args.quiet
+        debug = args.debug
+        source = args.SOURCE
+        dest = args.DESTINATION
+    except:
+        sys.stdout.write('\nERROR: Improperly formatted arguments.\n\n')
+        usage(parser)
+    
+    if (source.startswith('s3://') and dest.startswith('gs://') or
+        source.startswith('gs://') and dest.startswith('s3://')):
+        sys.stdout.write('ERROR: You cannot directly sync between S3 and ' +
+                         'Google Storage.\n\n')
+        usage(parser)
+    elif not source.startswith('s3://') and dest.startswith('s3://'):
+        # S3 upload sync
+        cloud_service = 's3'
+        path = get_full_path(source)
+        cloud_bucket = dest[5:].split('/')[0]
+        cloud_path = dest[(len(cloud_bucket) + 5):]
+        xfer_type = 'upload'
+    elif source.startswith('s3://') and not dest.startswith('s3://'):
+        # S3 download sync
+        cloud_service = 's3'
+        cloud_bucket = source[5:].split('/')[0]
+        cloud_path = source[(len(cloud_bucket) + 5):]
+        path = get_full_path(dest)
+        xfer_type = 'download'
+    elif not source.startswith('gs://') and dest.startswith('gs://'):
+        # GS upload sync
+        cloud_service = 'gs'
+        path = get_full_path(source)
+        cloud_bucket = dest[5:].split('/')[0]
+        cloud_path = dest[(len(cloud_bucket) + 5):]
+        xfer_type = 'upload'
+    elif source.startswith('gs://') and not dest.startswith('gs://'):
+        # GS download sync
+        cloud_service = 'gs'
+        cloud_bucket = source[5:].split('/')[0]
+        cloud_path = source[(len(cloud_bucket) + 5):]
+        path = get_full_path(dest)
+        xfer_type = 'download'
+    elif source.startswith('s3://') and dest.startswith('s3://'):
+        # S3 to S3 sync
+        cloud_service = 's3'
+        cloud_bucket = source[5:].split('/')[0]
+        cloud_path = source[(len(cloud_bucket) + 5):]
+        cloud_dest_bucket = dest[5:].split('/')[0]
+        cloud_dest_path = dest[(len(cloud_dest_bucket) + 5):]
+        xfer_type = 'sync'
+    elif source.startswith('gs://') and dest.startswith('gs://'):
+        # GS to GS sync
+        cloud_service = 'gs'
+        cloud_bucket = source[5:].split('/')[0]
+        cloud_path = source[(len(cloud_bucket) + 5):]
+        cloud_dest_bucket = dest[5:].split('/')[0]
+        cloud_dest_path = dest[(len(cloud_dest_bucket) + 5):]
+        xfer_type = 'sync'
+    else:
+        usage(parser)
+    
+    # Cloud paths shouldn't have a leading slash
+    cloud_path = cloud_path.lstrip('/')
+    
+    if xfer_type in ['download', 'upload']:
+        if not os.path.isdir(path) and not os.path.split(path)[0]:
+            sys.stdout.write(
+                '\nERROR: %s is not a valid path (does it exist?)\n\n' % path
+                )
+            usage(parser)
+        elif not cloud_bucket or len(cloud_bucket) < 3:
+            sys.stdout.write('\nERROR: Bucket name is invalid\n\n')
+            usage(parser)
+    elif xfer_type in ['sync']:
+        if not cloud_bucket or len(cloud_bucket) < 3 and \
+           not cloud_dest_bucket or len(cloud_dest_bucket) < 3:
+            sys.stdout.write('\nERROR: Bucket name is invalid\n\n')
+            usage(parser)
+        
+        # Cloud paths shouldn't have a leading slash
+        cloud_dest_path = cloud_dest_path.lstrip('/')
+    
+    
+    # Connect to Cloud
+    if cloud_service == 'gs':
+        c = boto.connect_gs(gs_access_key_id=cloud_access_key_id,
+                            gs_secret_access_key=cloud_secret_access_key)
+    else:
+        if anon:
+            c = boto.connect_s3(host=endpoint, anon=True)
+        else:
+            c = boto.connect_s3(aws_access_key_id=cloud_access_key_id,
+                                aws_secret_access_key=cloud_secret_access_key,
+                                host=endpoint)
+    c.debug = debug
+    b = c.get_bucket(cloud_bucket)
+    if xfer_type in ['sync']:
+        b2 = c.get_bucket(cloud_dest_bucket)
+    
+    
+    if xfer_type == 'upload':
+        # Perform cloud "upload"
+        
+        # Check for globbing
+        if glob:
+            glob = path.split(os.sep)[-1]
+            if glob:
+                path = path[:-len(glob)]
+        
+        if os.path.isdir(path) or glob:
+            # Possible multi file upload
+            sys.stdout.write('Scanning for files to transfer...  ')
+            sys.stdout.flush()
+            
+            if cloud_path and not cloud_path.endswith('/'):
+                cloud_path += '/'
+            
+            # Start "spinner" thread
+            ev = threading.Event()
+            t1 = threading.Thread(target=spinner, args=(ev, 0.25))
+            t1.start()
+            
+            try:
+                keys = {}
+                for key in b.list(prefix=cloud_path):
+                    if no_recurse and '/' in key.name[len(cloud_path):]:
+                        continue
+                    
+                    if glob and not fnmatch(key.name.split('/')[-1], glob):
+                        continue
+                    
+                    keys[key.name] = key.size
+            except Exception, e:
+                raise e
+            finally:
+                # End "spinner" thread
+                ev.set()
+                t1.join()
+                
+                # Clean stdout
+                sys.stdout.write('\n')
+            
+            # "Walk" the directory and upload files
+            for root, dirs, files in os.walk(path):
+                if no_recurse:
+                    if root != path:
+                        continue
+                
+                # Create "subdirectories"
+                if root != path and not skip_dirkeys:
+                    create_dirkey = True
+                    
+                    if cloud_service == 'gs':
+                        key_name = cloud_path + get_key_name(root, path) + \
+                                   '_$folder$'
+                    else:
+                        key_name = cloud_path + get_key_name(root, path) + '/'
+                    
+                    if ignore_empty and not files:
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (empty directory)\n' %
+                                key_name.replace('_$folder$', '/')
+                                )
+                        create_dirkey = False
+                    elif key_name in keys:
+                        if no_overwrite:
+                            if not quiet:
+                                sys.stdout.write(
+                                    'Skipping %s (not overwriting)\n' %
+                                    key_name.replace('_$folder$', '/')
+                                    )
+                            create_dirkey = False
+                        elif key_name.endswith('/') or \
+                             key_name.endswith('_$folder$'):
+                            if not quiet:
+                                sys.stdout.write(
+                                    'Skipping %s (size matches)\n' %
+                                    key_name.replace('_$folder$', '/')
+                                    )
+                            create_dirkey = False
+                    
+                    if create_dirkey:
+                        sys.stdout.write(
+                            '%s\n' %
+                            os.path.join(root[len(path):], '').lstrip(os.sep)
+                            )
+                        if not no_op:
+                            # Setup callback
+                            num_cb = 1
+                            
+                            # Send the directory
+                            k = b.new_key(key_name)
+                            if cloud_service == 'gs':
+                                k.set_contents_from_string(
+                                    '', cb=cb, num_cb=num_cb, policy=grant
+                                    )
+                            else:
+                                k.set_contents_from_string(
+                                    '', cb=cb, num_cb=num_cb, policy=grant,
+                                    reduced_redundancy=reduced,
+                                    encrypt_key=encrypt
+                                    )
+                            keys[key_name] = 0
+                            
+                            # Clean stdout
+                            sys.stdout.write('\n')
+                
+                for file in files:
+                    if glob and not fnmatch(file, glob):
+                        continue
+                    
+                    fullpath = os.path.join(root, file)
+                    key_name = cloud_path + get_key_name(fullpath, path)
+                    file_size = os.path.getsize(fullpath)
+                    
+                    if file_size == 0:
+                        if ignore_empty:
+                            if not quiet:
+                                sys.stdout.write(
+                                    'Skipping %s (empty file)\n' %
+                                    fullpath[len(path):].lstrip(os.sep)
+                                    )
+                            continue
+                    
+                    if key_name in keys:
+                        if no_overwrite:
+                            if not quiet:
+                                sys.stdout.write(
+                                    'Skipping %s (not overwriting)\n' %
+                                    fullpath[len(path):].lstrip(os.sep)
+                                    )
+                            continue
+                        elif keys[key_name] == file_size:
+                            if not quiet:
+                                sys.stdout.write(
+                                    'Skipping %s (size matches)\n' %
+                                    fullpath[len(path):].lstrip(os.sep)
+                                    )
+                            continue
+                    
+                    sys.stdout.write(
+                        '%s\n' %
+                        fullpath[len(path):].lstrip(os.sep)
+                        )
+                    
+                    if not no_op:
+                        # Setup callback
+                        num_cb = int(file_size ** .25)
+                        
+                        # Send the file
+                        k = b.new_key(key_name)
+                        k.update_metadata(metadata)
+                        if cloud_service == 'gs':
+                            k.set_contents_from_filename(
+                                fullpath, cb=cb, num_cb=num_cb, policy=grant
+                                )
+                        else:
+                            k.set_contents_from_filename(
+                                fullpath, cb=cb, num_cb=num_cb,
+                                policy=grant, reduced_redundancy=reduced,
+                                encrypt_key=encrypt
+                                )
+                        keys[key_name] = file_size
+                        
+                        # Clean stdout
+                        sys.stdout.write('\n')
+            
+            # If specified, perform deletes
+            if delete:
+                if cloud_path and cloud_path in keys:
+                    del(keys[cloud_path])
+                
+                for root, dirs, files in os.walk(path):
+                    if no_recurse:
+                        if root != path:
+                            continue
+                    
+                    for file in files:
+                        fullpath = os.path.join(root, file)
+                        key_name = cloud_path + get_key_name(fullpath, path)
+                        if key_name in keys:
+                            del(keys[key_name])
+                    
+                    if root != path:
+                        if cloud_service == 'gs':
+                            key_name = cloud_path + get_key_name(root, path) + \
+                                       '_$folder$'
+                        else:
+                            key_name = cloud_path + get_key_name(root, path) + \
+                                       '/'
+                        
+                        if key_name in keys:
+                            del(keys[key_name])
+                
+                for key_name, key_size in keys.iteritems():
+                    sys.stdout.write(
+                        'deleting %s\n' %
+                        key_name[len(cloud_path):].replace('_$folder$', '/')
+                        )
+                    if not no_op:
+                        # Delete the key
+                        b.delete_key(key_name)
+        
+        elif os.path.isfile(path):
+            # Single file upload
+            if cloud_path and not cloud_path.endswith('/'):
+                key_name = cloud_path
+            else:
+                key_name = cloud_path + os.path.split(path)[1]
+            filename = os.path.split(path)[1]
+            file_size = os.path.getsize(path)
+            
+            copy_file = True
+            key = b.get_key(key_name)
+            
+            if file_size == 0:
+                if ignore_empty:
+                    if not quiet:
+                        sys.stdout.write(
+                            'Skipping %s -> %s (empty file)\n' %
+                            filename, key_name.split('/')[-1]
+                            )
+                    copy_file = False
+            
+            if key:
+                if no_overwrite:
+                    copy_file = False
+                    if not quiet:
+                        if filename != key_name.split('/')[-1]:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (not overwriting)\n' %
+                                filename, key_name.split('/')[-1]
+                                )
+                        else:
+                            sys.stdout.write('Skipping %s (not overwriting)\n' %
+                                             filename)
+                elif key.size == file_size:
+                    copy_file = False
+                    if not quiet:
+                        if filename != key_name.split('/')[-1]:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (size matches)\n' %
+                                filename, key_name.split('/')[-1]
+                                )
+                        else:
+                            sys.stdout.write('Skipping %s (size matches)\n' %
+                                             filename)
+            
+            if copy_file:
+                if filename != key_name.split('/')[-1]:
+                    sys.stdout.write('%s -> %s\n' %
+                                     (filename, key_name.split('/')[-1]))
+                else:
+                    sys.stdout.write('%s\n' % filename)
+                
+                if not no_op:
+                    # Setup callback
+                    num_cb = int(file_size ** .25)
+                    
+                    # Send the file
+                    k = b.new_key(key_name)
+                    k.update_metadata(metadata)
+                    if cloud_service == 'gs':
+                        k.set_contents_from_filename(
+                            path, cb=cb, num_cb=num_cb, policy=grant
+                            )
+                    else:
+                        k.set_contents_from_filename(
+                            path, cb=cb, num_cb=num_cb, policy=grant,
+                            reduced_redundancy=reduced, encrypt_key=encrypt
+                            )
+                    
+                    # Clean stdout
+                    sys.stdout.write('\n')
+    
+    elif xfer_type == 'download':
+        # Perform cloud "download"
+        
+        cloud_path_key = None
+        
+        if cloud_path:
+            # Check for globbing
+            if glob:
+                glob = cloud_path.split('/')[-1]
+                if glob:
+                    cloud_path = cloud_path[:-len(glob)]
+            
+            if cloud_path:
+                cloud_path_key = b.get_key(cloud_path)
+        else:
+            glob = False
+        
+        if cloud_path_key and not cloud_path_key.name.endswith('/'):
+            # Single file download
+            key = cloud_path_key
+            keypath = key.name.split('/')[-1]
+            if not os.path.isdir(path) and not path.endswith(os.sep):
+                rename = True
+                fullpath = path
+            else:
+                fullpath = os.path.join(path, keypath)
+            
+            if key.size == 0:
+                if ignore_empty:
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (empty key)\n' %
+                                keypath, fullpath.split(os.sep)[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (empty key)\n' %
+                                fullpath.split(os.sep)[-1]
+                                )
+                    copy_file = False
+            
+            if not os.path.isdir(os.path.split(fullpath)[0]):
+                if not quiet:
+                    sys.stdout.write(
+                        'Creating new directory: %s\n' %
+                        os.path.split(fullpath)[0]
+                        )
+                if not no_op:
+                    os.makedirs(os.path.split(fullpath)[0])
+            elif os.path.exists(fullpath):
+                if no_overwrite:
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (not overwriting)\n' %
+                                keypath, fullpath.split(os.sep)[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (not overwriting)\n' %
+                                fullpath.split(os.sep)[-1]
+                                )
+                    copy_file = False
+                elif key.size == os.path.getsize(fullpath):
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (size matches)\n' %
+                                keypath.replace('/', os.sep),
+                                fullpath.split(os.sep)[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (size matches)\n' %
+                                fullpath.split(os.sep)[-1]
+                                )
+                    copy_file = False
+            
+            if copy_file:
+                if rename:
+                    sys.stdout.write(
+                        '%s -> %s\n' % (keypath, fullpath.split(os.sep)[-1])
+                        )
+                else:
+                    sys.stdout.write('%s\n' % keypath)
+                
+                if not no_op:
+                    # Setup callback
+                    num_cb = int(key.size ** .25)
+                    
+                    # Get the file
+                    key.get_contents_to_filename(fullpath, cb=cb, num_cb=num_cb)
+                    
+                    # Clean stdout
+                    sys.stdout.write('\n')
+        
+        else:
+            # Possible multi file download
+            if not cloud_path_key and cloud_path and \
+               not cloud_path.endswith('/'):
+                cloud_path += '/'
+            
+            keys = []
+            
+            sys.stdout.write('Scanning for keys to transfer...\n')
+            
+            for key in b.list(prefix=cloud_path):
+                # Skip the key if it is the cloud path
+                if not key.name[len(cloud_path):] or \
+                   key.name[len(cloud_path):] == '$folder$':
+                    continue
+                
+                if no_recurse and '/' in key.name[len(cloud_path):]:
+                    continue
+                
+                if glob and not fnmatch(key.name.split('/')[-1], glob):
+                    continue
+                
+                keypath = key.name[len(cloud_path):]
+                if cloud_service == 'gs':
+                    fullpath = os.path.join(
+                        path,
+                        keypath.replace('_$folder$', os.sep)
+                        )
+                else:
+                    fullpath = os.path.join(path, keypath.replace('/', os.sep))
+                
+                keys.append(fullpath)
+                
+                if key.size == 0 and ignore_empty:
+                    if not quiet:
+                        sys.stdout.write(
+                            'Skipping %s (empty key)\n' %
+                            fullpath[len(os.path.join(path, '')):]
+                            )
+                    continue
+                
+                if not os.path.isdir(os.path.split(fullpath)[0]):
+                    if not quiet:
+                        sys.stdout.write(
+                            'Creating new directory: %s\n' %
+                            os.path.split(fullpath)[0]
+                            )
+                    if not no_op:
+                        os.makedirs(os.path.split(fullpath)[0])
+                elif os.path.exists(fullpath):
+                    if no_overwrite:
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (not overwriting)\n' %
+                                fullpath[len(os.path.join(path, '')):]
+                                )
+                        continue
+                    elif key.size == os.path.getsize(fullpath) or \
+                         key.name.endswith('/') or \
+                         key.name.endswith('_$folder$'):
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (size matches)\n' %
+                                fullpath[len(os.path.join(path, '')):]
+                                )
+                        continue
+                
+                if cloud_service == 'gs':
+                    sys.stdout.write('%s\n' %
+                                     keypath.replace('_$folder$', os.sep))
+                else:
+                    sys.stdout.write('%s\n' % keypath.replace('/', os.sep))
+                
+                if not no_op:
+                    if key.name.endswith('/') or key.name.endswith('_$folder$'):
+                        # Looks like a directory, so just print the status
+                        submit_cb(0, 0)
+                    else:
+                        # Setup callback
+                        num_cb = int(key.size ** .25)
+                        
+                        # Get the file
+                        key.get_contents_to_filename(fullpath, cb=cb,
+                                                     num_cb=num_cb)
+                    
+                    # Clean stdout
+                    sys.stdout.write('\n')
+            
+            # If specified, perform deletes
+            if delete:
+                for root, dirs, files in os.walk(path):
+                    if no_recurse:
+                        if root != path:
+                            continue
+                    
+                    if files:
+                        for file in files:
+                            if glob and not fnmatch(file, glob):
+                                continue
+                            
+                            filepath = os.path.join(root, file)
+                            if filepath not in keys:
+                                sys.stdout.write(
+                                    'deleting %s\n' %
+                                    filepath[len(os.path.join(path, '')):]
+                                    )
+                                if not no_op:
+                                    # Delete the file
+                                    os.remove(filepath)
+                    elif root != path:
+                        dirpath = os.path.join(root, '')
+                        if dirpath not in keys:
+                            sys.stdout.write(
+                                'deleting %s\n' %
+                                dirpath[len(os.path.join(path, '')):]
+                                )
+                            if not no_op:
+                                # Remove the directory
+                                os.rmdir(dirpath)
+    else:
+        # Perform cloud to cloud "sync"
+        
+        cloud_path_key = None
+        
+        if cloud_path:
+            # Check for globbing
+            if glob:
+                glob = cloud_path.split('/')[-1]
+                if glob:
+                    cloud_path = cloud_path[:-len(glob)]
+            
+            if cloud_path:
+                cloud_path_key = b.get_key(cloud_path)
+        else:
+            glob = False
+        
+        if cloud_path_key and not cloud_path_key.name.endswith('/'):
+            # Single file sync
+            key = cloud_path_key
+            keypath = key.name.split('/')[-1]
+            if cloud_dest_path and not cloud_dest_path.endswith('/'):
+                rename = True
+                fullpath = cloud_dest_path
+            else:
+                fullpath = cloud_dest_path + keypath
+                fullpath = fullpath.lstrip('/')
+            
+            dest_key = b2.get_key(fullpath)
+            
+            if key.size == 0:
+                if ignore_empty:
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (empty key)\n' %
+                                keypath.split('/')[-1], fullpath.split('/')[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (empty key)\n' % fullpath
+                                )
+                    copy_file = False
+            
+            if dest_key:
+                # TODO: Check for differing ACL
+                if no_overwrite:
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (not overwriting)\n' %
+                                keypath.split('/')[-1], fullpath.split('/')[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (not overwriting)\n' % fullpath
+                                )
+                    copy_file = False
+                elif key.size == dest_key.size:
+                    if not quiet:
+                        if rename:
+                            sys.stdout.write(
+                                'Skipping %s -> %s (size matches)\n' %
+                                keypath.split('/')[-1], fullpath.split('/')[-1]
+                                )
+                        else:
+                            sys.stdout.write(
+                                'Skipping %s (size matches)\n' % fullpath
+                                )
+                    copy_file = False
+            
+            if copy_file:
+                if rename:
+                    sys.stdout.write('%s -> %s...  ' % (
+                        keypath.split('/')[-1], fullpath.split('/')[-1])
+                        )
+                else:
+                    sys.stdout.write('%s...  ' % keypath)
+                sys.stdout.flush()
+                if not no_op:
+                    speeds.append((0, time.time()))
+                    
+                    # Start "spinner" thread
+                    ev = threading.Event()
+                    t1 = threading.Thread(target=spinner, args=(ev, 0.25))
+                    t1.start()
+                    
+                    try:
+                        # Transfer the key
+                        key.copy(cloud_dest_bucket, fullpath,
+                                 metadata=metadata, reduced_redundancy=reduced,
+                                 preserve_acl=preserve, encrypt_key=encrypt)
+                    except Exception, e:
+                        raise e
+                    finally:
+                        # End "spinner" thread
+                        ev.set()
+                        t1.join()
+                    
+                    if rename:
+                        sys.stdout.write('\r%s -> %s    \n' % (
+                            keypath.split('/')[-1], fullpath.split('/')[-1]
+                            ))
+                    else:
+                        sys.stdout.write('\r%s    \n' % keypath)
+                    sys.stdout.flush()
+                    submit_cb(key.size, key.size)
+                else:
+                    if rename:
+                        sys.stdout.write('\r%s -> %s    ' % (
+                            keypath.split('/')[-1], fullpath.split('/')[-1])
+                            )
+                    else:
+                        sys.stdout.write('\r%s    ' % keypath)
+                    sys.stdout.flush()
+                
+                # Clean stdout
+                sys.stdout.write('\n')
+        
+        else:
+            # Possible multi file sync
+            if not cloud_path_key and cloud_path and \
+               not cloud_path.endswith('/'):
+                cloud_path += '/'
+            if cloud_dest_path and not cloud_dest_path.endswith('/'):
+                cloud_dest_path += '/'
+            
+            keys = []
+            
+            sys.stdout.write('Scanning for keys to transfer...\n')
+            
+            for key in b.list(prefix=cloud_path):
+                if no_recurse and '/' in key.name[len(cloud_path):]:
+                    continue
+                
+                if glob and not fnmatch(key.name.split('/')[-1], glob):
+                    continue
+                
+                if key.name == cloud_path:
+                    keypath = key.name.split('/')[-2] + '/'
+                else:
+                    keypath = key.name[len(cloud_path):]
+                fullpath = cloud_dest_path + keypath
+                fullpath = fullpath.lstrip('/')
+                
+                keys.append(fullpath)
+                dest_key = b2.get_key(fullpath)
+                
+                if key.size == 0:
+                    if ignore_empty:
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (empty key)\n' %
+                                fullpath.replace('_$folder$', '/')
+                                )
+                        continue
+                
+                if dest_key:
+                    # TODO: Check for differing ACL
+                    if no_overwrite:
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (not overwriting)\n' %
+                                fullpath.replace('_$folder$', '/')
+                                )
+                        continue
+                    elif key.size == dest_key.size:
+                        if not quiet:
+                            sys.stdout.write(
+                                'Skipping %s (size matches)\n' %
+                                fullpath.replace('_$folder$', '/')
+                                )
+                        continue
+                
+                sys.stdout.write('%s...  ' % keypath.replace('_$folder$', '/'))
+                sys.stdout.flush()
+                if not no_op:
+                    speeds.append((0, time.time()))
+                    
+                    # Start "spinner" thread
+                    ev = threading.Event()
+                    t1 = threading.Thread(target=spinner, args=(ev, 0.25))
+                    t1.start()
+                    
+                    try:
+                        # Transfer the key
+                        key.copy(cloud_dest_bucket, fullpath,
+                                 metadata=metadata, reduced_redundancy=reduced,
+                                 preserve_acl=preserve, encrypt_key=encrypt)
+                    except Exception, e:
+                        raise e
+                    finally:
+                        # End "spinner" thread
+                        ev.set()
+                        t1.join()
+                    
+                    sys.stdout.write('\r%s    \n' % \
+                                     keypath.replace('_$folder$', '/'))
+                    sys.stdout.flush()
+                    submit_cb(key.size, key.size)
+                else:
+                    sys.stdout.write('\r%s    ' % \
+                                     keypath.replace('_$folder$', '/'))
+                    sys.stdout.flush()
+                
+                # Clean stdout
+                sys.stdout.write('\n')
+            
+            # If specified, perform deletes
+            if delete:
+                for key in b2.list(prefix=cloud_dest_path):
+                    if no_recurse and '/' in key.name[len(cloud_dest_path):]:
+                        continue
+                    
+                    if glob and not fnmatch(key.name.split('/')[-1], glob):
+                        continue
+                    
+                    keypath = key.name[len(cloud_dest_path):]
+                    
+                    if key.name not in keys:
+                        sys.stdout.write(
+                            'deleting %s\n' % keypath.replace('_$folder$', '/')
+                            )
+                        if not no_op:
+                            # Delete the key
+                            key.delete()
+
+if __name__ == "__main__":
+    main()

File project.config.template

 django-categories
 django-registration
 django-tastypie
+defusedxml
 django-social-auth
 django-postman
 django-hitcount
 django-autocomplete-light
 django-guardian
 raven
+nose
+django-nose
+nose-ignore-docstring
+coverage
+mock
 selenium
--django-cities-light
+cssselect
+pyvirtualdisplay
+chromedriver
+webtest
+django-webtest
+boto
+django-storages
+django-picklefield
 "
 
 # list of apps installed in project
 base_tests
 "
 
+# utility and 3rd-party apps that are not tested as base tests
+NOT_TESTED="
+staticpages
+sections
+newspaper
+commenthelpers
+pagination
+txtutils
+veditor
+"
+
+# default verbosity (for django's "python manage.py test -v LEVEL ...")
+# * level is 1 to 3
+DEFAULT_TEST_VERBOSITY=2
+
+# use nose/django-nose as test runner instead of built-in Django default
+USE_NOSE=0
+
+# use test server (use custom test server ports defined below for live
+# servers)
+# * if 0, django will use default live server port (localhost:8081).
+USE_LIVE_SERVER=1
+
+# default live-server ports
+TEST_SERVER_PORT="9006-9016"
+
+# use run_scripts.sh to start display (instead of starting display in
+# tests)
+START_DISPLAY=0
+
+# DISPLAY id
+XVFB_DISPLAY_ID=":99"
+
+
+# media server url
+MEDIA_SERVER_URL="http://media.dev1.kynded.net"
+
+# AWS S3 access credentials
+
+# credentials for kchan
+AWS_ACCESS_KEY_ID="YOUR_AWS_ACCESS_KEY_ID"
+AWS_SECRET_ACCESS_KEY="YOUR_AWS_SECRET_ACCESS_KEY"
+AWS_BUCKET_NAME="YOUR_AWS_BUCKET_NAME"
+
+# s3sync script configurations
+
+# default directory to sync to remote storage
+S3SYNC_SRC_DIR="${PROJECT_DIR}/static"
+
+
 ########################################################################

File run_tests.sh

 #!/bin/sh
 #
-# Script to run tests using Django default test runner.
+# Script to run tests using Django or nose test runner.
 #
 # * created: 2013-07-20 Kevin Chan <kefin@makedostudio.com>
-# * updated: 2013-07-21 kchan
+# * updated: 2013-09-18 kchan
 
 ########################################################################
 myname=$(basename "$0")
 #   -v | --verbosity [0-3]    # specify verbosity when running tests
 #   --settings SETTINGS       # Python path to settings module
 #   --pythonpath PYTHONPATH   # directory to add to Python path
-#   --traceback               # print traceback on exception
-#   --noinput                 # tells Django to NOT prompt the user for input
-#   --failfast                # tells Django to stop running test suite after
-#                             # first failed test
-#   --testrunner TESTRUNNER   # use specified test runner class
+#   -d | --start-display      # use run_script.sh to start display
+#                             # (instead of making tests start display)
+#   -s | --use-live-server    # use live server (to run selenium tests)
+#                             # * with custom ports defined in
+#                             #   project.config
+#   -N | --use-nose           # set USE_NOSE to 1
+#                             # * this will run tests use the nose
+#                             #   test runner instead of the Django
+#                             #   default.
+#   -D | --use-default        # use the default (Django) test runner
+#   -n | --nose OPTION        # set option for nose test runner
+#                             # * can be specified multiple times
+#                             # * nose options only apply whtn
+#                             #   USE_NOSE is set to 1 in project.config.
 #
 # Wrapper script around Django default rest runner to perform tests.
 # * if tests are not specified, script will run the following  default
 #   tests defined in project.config:
+# * script will set TEST_VERBOSITY configuration var to verbosity
+#   level in os.environ (this will get passed to test_settingd.py).
+#
 EOF
     echo "$DEFAULT_TESTS" | sed -e '/^$/d;s/^/#     /'
     exit 1
 # --pythonpath=PYTHONPATH
 #                       A directory to add to the Python path, e.g.
 #                       "/home/djangoprojects/myproject".
-# --traceback           Print traceback on exception
-# --noinput             Tells Django to NOT prompt the user for input of any
-#                       kind.
-# --failfast            Tells Django to stop running the test suite after
-#                       first failed test.
-# --testrunner=TESTRUNNER
-#                       Tells Django to use specified test runner class
-#                       instead of the one specified by the TEST_RUNNER
-#                       setting.
 # --liveserver=LIVESERVER
 #                       Overrides the default address where the live server
 #                       (used with LiveServerTestCase) is expected to run
 
 DIVIDER="########################################################################"
 
+GREP="grep"
+SED="sed"
+AWK="awk"
+
 show_version=0
-verbosity="$DEFAULT_VERBOSITY"
+use_live_server="${USE_LIVE_SERVER-0}"
+start_display="${START_DISPLAY-0}"
+verbosity="$DEFAULT_TEST_VERBOSITY"
 test_runner_opts=
+nose_opts=
 tests=
 
+# set default test runner
+if [ "X$USE_NOSE" = "X1" ]; then
+    test_runner="nose"
+else
+    test_runner="django"
+fi
+
+# default live server ports (if TEST_SERVER_PORT is not specified)
+DEFAULT_TEST_SERVER_PORT="9006-9010"
+TEST_SERVER_URL="localhost:${TEST_SERVER_PORT-DEFAULT_TEST_SERVER_PORT}"
+
+XVFB_DISPLAY="${XVFB_DISPLAY_ID-:99}"
+DISPLAY_CONTROL="${mydir}/xvfb_control.sh"
+
 while [ $# -gt 0 ]
 do
     case "$1" in
             shift
             test_runner_opts="$test_runner_opts --pythonpath=$1"
             ;;
-        --traceback)
-            test_runner_opts="$test_runner_opts --traceback"
+        -s|--use-live-server)
+            use_live_server=1
             ;;
-        --noinput)
-            test_runner_opts="$test_runner_opts --noinput"
+        -d|--start-display)
+            start_display=1
             ;;
-        --failfast)
-            test_runner_opts="$test_runner_opts --failfast"
+        -N|--use-nose)
+            test_runner="nose"
             ;;
-        --testrunner)
+        -D|--use-default)
+            test_runner="django"
+            ;;
+        -n|--nose)
             shift
-            test_runner_opts="$test_runner_opts --testrunner=$1"
-            ;;
-        --liveserver)
-            error "Unimplemented: use 'python manage.py test' command instead"
+            nose_opts="$nost_opts $1"
             ;;
         --version)
             show_version=1
     . "$VIRTUALENV_DIR/bin/activate"
 fi
 
+if [ $use_live_server -ne 0 ]; then
+    test_runner_opts="$test_runner_opts --liveserver=$TEST_SERVER_URL"
+fi
+
 echo "$DIVIDER"
 echo "# python $MANAGE_SCRIPT test $test_runner_opts $tests"
 echo "$DIVIDER"
 echo
 
+export VIRTUALENV_BIN_DIR="$VIRTUALENV_DIR/bin"
+export DISPLAY="$XVFB_DISPLAY"
+export DISPLAY_STARTED="$start_display"
+export TEST_VERBOSITY="$verbosity"
+
+case "$test_runner" in
+    nose)
+        export USE_NOSE=1
+        if [ "X$nose_opts" != "X" ]; then
+            export NOSE_ARGS="$nose_opts"
+        fi
+        ;;
+    *)
+        export USE_NOSE=0
+        ;;
+esac
+
+if [ $start_display -ne 0 ] && [ -x "$DISPLAY_CONTROL" ]; then
+    echo "# starting display..."
+    "$DISPLAY_CONTROL" start
+    echo
+fi
+
 python "$MANAGE_SCRIPT" test $test_runner_opts $tests
 
+if [ $start_display -ne 0 ] && [ -x "$DISPLAY_CONTROL" ]; then
+    echo
+    echo "# stopping display..."
+    "$DISPLAY_CONTROL" stop
+fi
+
 exit
+#!/bin/sh
+#
+# Script to sync static media to AWS S3
+# * requires boto-rsync script
+# * see: https://github.com/seedifferently/boto_rsync
+#
+# * created: 2013-08-12 Kevin Chan <kefin@makedostudio.com>
+# * updated: 2013-08-12 kchan
+
+########################################################################
+myname=$(basename "$0")
+OLD_PWD=$PWD
+cd $(dirname "$0")
+mydir=${PWD%/}
+cd "$OLD_PWD"
+########################################################################
+
+
+########################################################################
+# helper functions
+
+usage()
+{
+    cat <<EOF
+# Usage: $myname [options] SRC_DIR
+#    SRC_DIR                      # src directory path to sync to remote storage
+# options:
+#    -v | --verbose               # verbose output
+#    -n | --dry-run               # do dry run and do not execute
+#    -o | --default_opts OPTIONS  # default options
+#    -g | --grant PERMISSIONS     # use "PERMISSIONS" acl for each file
+#    -d | --delete                # delete files in dst not in src
+#
+# Script to sync local server media files specified in SRC_DIR to AWS S3
+# remote storage.
+# 
+# Default source directory to sync to remote:
+# ${S3SYNC_SRC_DIR}
+EOF
+    exit 1
+}
+
+########################################################################
+# load configurations and utils
+
+UTILS="${mydir}/utils.sh"
+[ -f "$UTILS" ] || { echo >&2 "### Cannot find utils.sh"; exit 1; }
+. "$UTILS"
+
+CONFIG_FILE="project.config"
+
+CONFIG="${mydir}/${CONFIG_FILE}"
+[ -f "$CONFIG" ] || config_usage
+. "$CONFIG"
+
+
+########################################################################
+# print configurations
+
+SYNC_SCRIPT="${mydir}/boto-rsync"
+if [ ! -x "$SYNC_SCRIPT" ]; then
+    error "Cannot find sync script: $SYNC_SCRIPT"
+fi
+
+SRC="${S3SYNC_SRC_DIR}"
+DST="s3://${AWS_BUCKET_NAME}/"
+DEFAULT_ACL="public-read"
+DEFAULT_OPTIONS=
+
+default_opts="$DEFAULT_OPTIONS"
+perms="$DEFAULT_ACL"
+src_dir=
+verbose=
+dry_run=
+delete=
+
+while [ $# -gt 0 ]
+do
+    case "$1" in
+        -\?|-h|-help|--help)
+            usage
+            ;;
+        -n|--dry-run)
+            dry_run="--dry-run"
+            ;;
+        -v|--verbose)
+            verbose="--verbose"
+            ;;
+        -o|--default-opts)
+            shift
+            default_opts="$1"
+            ;;
+        -d|--delete)
+            delete="--delete"
+            ;;
+        -g|--grant)
+            shift
+            perms="$1"
+            ;;
+        *)
+            error "Unknown parameter: $1"
+            ;;
+    esac
+    shift
+done
+
+
+if [ "X$AWS_ACCESS_KEY_ID" = "X" ]; then
+    error "AWS_ACCESS_KEY_ID not found. Please define in project.config."
+fi
+
+if [ "X$AWS_SECRET_ACCESS_KEY" = "X" ]; then
+    error "AWS_SECRET_ACCESS_KEY not found. Please define in project.config."
+fi
+
+if [ "X$AWS_BUCKET_NAME" = "X" ]; then
+    error "AWS_BUCKET_NAME not found. Please define in project.config."
+fi
+
+if [ "X$src_dir" = "X" ]; then
+    src_dir="$SRC"
+fi
+
+# sync files from source to destination with default acl permissions
+
+"$SYNC_SCRIPT" \
+    -a "$AWS_ACCESS_KEY_ID" \
+    -s "$AWS_SECRET_ACCESS_KEY" \
+    -g "$perms" \
+    $default_opts $delete \
+    $verbose $dry_run \
+    "$src_dir" "$DST"
+
+exit

File setup_virtualenv.sh

 #     utils.sh             # utility functions
 #
 # * created: 2013-01-26 Kevin Chan <kefin@makedostudio.com>
-# * updated: 2013-07-06 kchan
+# * updated: 2013-08-07 kchan
 
 ########################################################################
 myname=$(basename "$0")
 
 # check list of required packages against installed pacakges
 
-is_installed()
-{
-    # check to see if a package is installed in site-packages
-    if [ "X$1" != "X" ]; then
-        match=$(pip list | egrep -i '^'"$1"'( +\(.+\) *)?$')
-        echo "$match"
-    else
-        echo ""
-    fi
-}
+# is_installed()
+# {
+#     # check to see if a package is installed in site-packages
+#     if [ "X$1" != "X" ]; then
+#         match=$(pip list | egrep -i '^'"$1"'( +\(.+\) *)?$')
+#         echo "$match"
+#     else
+#         echo ""
+#     fi
+# }
 
 check_packages()
 {
 
     section "# checking installed packages ..."
 
-    # for _package in $_pkgs
-    # do
-    #     match=$(is_installed "$_package")
-    #     echo "# $_package: $match"
-    # done
-    # return
-
     pip list > "$tmpfile"
     for _package in $_pkgs
     do
                 if [ "X$match" != "X" ]; then
                     log "# installed    : $match"
                 else
-                    log "# not found    : $_package"
+                    installed=$(check_special_install "$_package")
+                    if [ "X$installed" = "Xtrue" ]; then
+                        log "# installed    : $_package"
+                    else
+                        log "# not found    : $_package"
+                    fi
                 fi
                 ;;
         esac
 
 
 ########################################################################
+# process special package installations
+
+if [ "X$SPECIAL_INSTALL_SCRIPT" != "X" ] \
+    && [ -x "$SPECIAL_INSTALL_SCRIPT" ]; then
+
+    special_install_script="$SPECIAL_INSTALL_SCRIPT"
+    log ""
+    log "# special install script: $special_install_script"
+
+    special_installs=$(awk '
+/\[ *special *install *commands *: *begin *\]/{flag = 1; next}
+/\[ *special *install *commands *: *end *\]/{flag = 0}
+flag {print}' < "$special_install_script" \
+    | sed -ne 's/^ *if *\[ *"\$_package" *= *"\([^"][^"]*\)" *\].*$/\1/p')
+
+    for label in $special_installs
+    do
+        [ "X$label" != "X" ] && store_data "$label"
+    done
+
+    . "$special_install_script"
+else
+    special_install()
+    {
+        log "### no special install script found."
+    }
+    special_install
+fi
+
+
+########################################################################
 # delete packages
 
 if [ $delete -ne 0 ]; then
 
 
 ########################################################################
-# process special package installations
-
-if [ "X$SPECIAL_INSTALL_SCRIPT" != "X" ] \
-    && [ -x "$SPECIAL_INSTALL_SCRIPT" ]; then
-
-    special_install_script="$SPECIAL_INSTALL_SCRIPT"
-    log ""
-    log "# special install script: $special_install_script"
-
-    special_installs=$(awk '
-/\[ *special *install *commands *: *begin *\]/{flag = 1; next}
-/\[ *special *install *commands *: *end *\]/{flag = 0}
-flag {print}' < "$special_install_script" \
-    | sed -ne 's/^ *if *\[ *"\$_package" *= *"\([^"][^"]*\)" *\].*$/\1/p')
-
-    for label in $special_installs
-    do
-        [ "X$label" != "X" ] && store_data "$label"
-    done
-
-    . "$special_install_script"
-else
-    special_install()
-    {
-        log "### no special install script found."
-    }
-    special_install
-fi
-
-
-########################################################################
 # install packages
 
 for pkg in $pkgs

File special_install.sh.template

         pip_special_install "$_package" "$repo"
     fi
 
+    if [ "$_package" = "django-picklefield" ]; then
+        # NOTE: add "-e" as pip options to install python egg
+        repo="git://github.com/gintas/django-picklefield.git#egg=django-picklefield"
+        pip_special_install "$_package" "$repo" "-e"
+    fi
+
+    if [ "$_package" = "nose-ignore-docstring" ]; then
+        # NOTE: add "-e" as pip options to install python egg
+        # * this is a fork of the original repo at
+        #   https://github.com/schlamar/nose-ignore-docstring
+        repo="git://github.com/kefin/nose-ignore-docstring.git#egg=nose-ignore-docstring"
+        pip_special_install "$_package" "$repo" "-e"
+    fi
+
     if [ "$_package" = "minipy" ]; then
         section "# installing minipy ... (manually)"
         minipy_repo_dir="$SRC_DIR/minipy"
         cd "$BASE_DIR"
     fi
 
-    if [ "$_package" = "minicms" ]; then
-        section "# installing minicms ... (manually)"
-        minicms_repo_dir="$SRC_DIR/minicms"
+    if [ "$_package" = "chromedriver" ]; then
+        section "# installing chromedriver ..."
+        src_dir="$SRC_DIR"
         cd "$SRC_DIR"
-        MINICMS_REPO="ssh://hg@bitbucket.org/kchan/minicms"
+        CHROMEDRIVER="https://chromedriver.googlecode.com/files/chromedriver_linux64_2.1.zip"
         {
+            src="chromedriver"
+            dst="$VIRTUALENV_DIR/bin/chromedriver"
             if [ "X$reinstall" != "X" ]; then
-                rm -rf "$minicms_repo_dir"
+                rm -rf "$src" "$dst" "$CHROMEDRIVER"
             fi
-            if [ ! -d "$minicms_repo_dir" ]; then
-                hg clone "$MINICMS_REPO" "$minicms_repo_dir" && \
-                    {
-                    cd "$minicms_repo_dir"
-                    python setup.py install --prefix="$VIRTUALENV_DIR" $quiet
-                }
+            if [ ! -f "$src" ]; then
+                wget "$CHROMEDRIVER" && \
+                    unzip "${src}"*.zip && \
+                    cp "$src" "$dst" && \
+                    log "# installed $dst"
             else
-                cd "$minicms_repo_dir"
-                hg pull -u $quiet
-                python setup.py install --prefix="$VIRTUALENV_DIR" $quiet
+                if [ ! -f "$dst" ]; then
+                    cp "$src" "$dst" && \
+                    log "# installed $dst"
+                fi
             fi
         } | prefix_output "> "
-        cd "$BASE_DIR"
     fi
-
 }
 ### [special install commands : end]
 
 
 
+### [check special installs : begin]
+check_special_install()
+{
+    local _package="$1"
+
+    case "$_package" in
+        chromedriver)
+            # check if chromedriver is installed
+            dst="$VIRTUALENV_DIR/bin/chromedriver"
+            [ -f "$dst" ] && echo "true" || echo "false"
+            ;;
+        *)
+            echo "false"
+            ;;
+    esac
+}
+### [check special installs : end]
+
+
 
 ########################################################################
 
     echo_n() { echo -n ${1+"$@"}; }
 fi
 
-# User user a y/n/number question, with default answer (y,n,number)
+# Ask user a y/n/number question, with default answer (y,n,number)
 # For example:
 #   install=`ask_user "Install this package" y`
 #   index=`ask_user "what file [1-10]" 0`

File xvfb_control.sh

+#!/bin/sh
+#
+# Xvfb daemon script -- op: start/stop/restart/status
+#
+# * created: 2013-07-24 Kevin Chan <kefin@makedostudio.com>
+# * updated: 2013-07-24 kchan
+
+########################################################################
+myname=$(basename "$0")
+OLD_PWD=$PWD
+cd $(dirname "$0")
+mydir=${PWD%/}
+cd "$OLD_PWD"
+########################################################################
+
+########################################################################
+
+NAME="$myname"
+SCRIPTNAME="$NAME"
+
+GREP="grep"
+SED="sed"
+AWK="awk"
+KILLSIG="-9"
+
+DISPLAY="${DISPLAY-:99}"
+XVFB=$(which "Xvfb")
+XVFBARGS="$DISPLAY -ac -screen 0 1024x768x16"
+
+
+# print usage
+
+usage()
+{
+    echo "Usage: $SCRIPTNAME {start|stop|restart|status}"
+}
+
+# helper functions
+
+error()
+{
+    [ "$1" ] && echo >&2 "###" "$@"
+    exit 1
+}
+
+check_file()
+{
+    [ ! -f "$1" ] && error "$(basename $0) - cannot find file: $1"
+}
+
+is_integer()
+{
+    result=$(echo "$1" | $AWK '/^[0-9]+$/{ print; }')
+    [ -z "$result" ] && echo 0 || echo 1
+}
+
+# check if daemon is running
+# * returns PID if running or "" if not running.
+
+check_daemon()
+{
+    local pid=$(ps -eo "pid,args" | $GREP -v $GREP \
+              | $GREP -e "$XVFB $XVFBARGS" \
+              | $SED -ne '1p' | $AWK '{print $1}')
+    [ $(is_integer "$pid") -eq 1 ] && echo "$pid" || echo ""
+}
+
+# start daemon
+
+d_start()
+{
+    local pid=$(check_daemon)
+    if [ ! -z "$pid" ]; then
+        echo "### Error: daemon seems to be already running: PID $pid."
+        echo "### Use 'restart' to stop and start the process again."
+        exit 1
+    fi
+    "$XVFB" $XVFBARGS >/dev/null 2>&1 &
+    max_wait=20
+    n=0
+    while [ $n -lt $max_wait ]
+    do
+        n=$(expr $n + 1)
+        sleep 1
+        pid=$(check_daemon)
+        if [ ! -z "$pid" ]; then
+            echo "$NAME started (PID: $pid)."
+            n=$max_wait
+        fi
+    done
+    if [ -z "$pid" ]; then
+        echo "### Error occured -- process not started."
+        exit 1
+    fi
+}
+
+# stop daemon
+
+d_stop()
+{
+    local pid=$(check_daemon)
+    if [ ! -z "$pid" ]; then
+        kill "$KILLSIG" "$pid"
+        sleep 2
+        pid_result=$(check_daemon)
+        if [ ! -z "$pid_result" ]; then
+            echo "### Error occured -- unable to stop process (PID $pid_result)."
+            exit 1
+        else
+            echo "$NAME with PID $pid stopped."
+        fi
+        rm -rf "$PIDFILE"
+    else
+        echo "### Error: daemon not running (cannot find process)."
+    fi
+}
+
+
+# status
+
+d_status()
+{
+    local pid=$(check_daemon)
+    if [ -z "$pid" ]; then
+        echo "Process is not running."
+    else
+        echo "$NAME is running (PID: $pid)."
+    fi
+}
+
+
+if [ $# -gt 1 ]; then
+    usage
+    exit 1
+fi
+
+[ ! -x "$XVFB" ] && error "Unable to find Xvfb"
+
+case "$1" in
+    start)
+        d_start
+        ;;
+    stop)
+        d_stop
+        ;;
+    restart)
+        d_stop
+        d_start
+        ;;
+    status)
+        d_status
+        ;;
+    *)
+        usage
+        exit 1
+        ;;
+esac
+
+exit
+
+########################################################################
+# UNUSED
+#
+# example script from:
+# http://stackoverflow.com/questions/6183276/how-do-i-run-selenium-in-xvfb
+
+# #!/bin/bash
+#
+# XVFB=/usr/bin/Xvfb
+# XVFBARGS="$DISPLAY -ac -screen 0 1024x768x16"
+# PIDFILE=${HOME}/xvfb_${DISPLAY:1}.pid
+# case "$1" in
+#     start)
+#         echo -n "Starting virtual X frame buffer: Xvfb"
+#         /sbin/start-stop-daemon --start --quiet --pidfile $PIDFILE --make-pidfile --background --exec $XVFB -- $XVFBARGS
+#         echo "."
+#         ;;
+#     stop)
+#         echo -n "Stopping virtual X frame buffer: Xvfb"
+#         /sbin/start-stop-daemon --stop --quiet --pidfile $PIDFILE
+#         echo "."
+#         ;;
+#     restart)
+#         $0 stop
+#         $0 start
+#         ;;
+#     *)
+#         echo "Usage: /etc/init.d/xvfb {start|stop|restart}"
+#         exit 1
+# esac
+# exit 0