Commits

Brent Tubbs  committed 4f82c2e

midway through 0.3.0 rewrite

  • Participants
  • Parent commits e3f3f41

Comments (0)

Files changed (10)

 
 setup(
     name='silk-deployment',
-    version='0.2.9',
+    version='0.3.0',
     author='Brent Tubbs',
     author_email='brent.tubbs@gmail.com',
 	packages=find_packages(),
     include_package_data=True,
     entry_points = {
         'console_scripts': [
-            'silk = silk.utils:cmd_dispatcher',
-            'freeze2yaml = silk.utils:freeze_2_yaml',
-            'yaml2freeze = silk.utils:yaml_2_freeze',
+            'silk = silk.lib:cmd_dispatcher',
         ],
     },
 	install_requires = [
         'gunicorn',
         'CherryPy',
-        'Fabric >= 0.9.2',
+        'Fabric >= 1.0.1',
         'PyYAML',
         'silk-config',
 	],

File silk/cfg_templates/nginx.conf

-server {
-    listen   80;
-    server_name %(nginx_hosts)s;
-
-    access_log  %(remote_root)s/logs/ngaccess.log;
-    error_log  %(remote_root)s/logs/ngerror.log;
-    location / {
-        proxy_pass http://%(bind)s;
-        proxy_redirect              off;
-        proxy_set_header            Host $host;
-        proxy_set_header            X-Real-IP $remote_addr;
-        proxy_set_header            X-Forwarded-For $proxy_add_x_forwarded_for;
-        client_max_body_size        10m;
-        client_body_buffer_size     128k;
-        proxy_connect_timeout       90;
-        proxy_send_timeout          90;
-        proxy_read_timeout          90;
-        proxy_buffer_size           4k;
-        proxy_buffers               4 32k;
-        proxy_busy_buffers_size     64k;
-        proxy_temp_file_write_size  64k;
-    }
-    %(nginx_static)s
-}
-

File silk/cfg_templates/nginx_root.conf

-user www-data;
-worker_processes  4;
-
-error_log  /var/log/nginx/error.log;
-pid        /var/run/nginx.pid;
-
-events {
-    worker_connections  1024;
-    # multi_accept on;
-}
-
-http {
-    include       /etc/nginx/mime.types;
-
-    access_log	/var/log/nginx/access.log;
-
-    sendfile        on;
-    #tcp_nopush     on;
-
-    #keepalive_timeout  0;
-    keepalive_timeout  65;
-    tcp_nodelay        on;
-
-    gzip  on;
-    gzip_disable "MSIE [1-6]\.(?!.*SV1)";
-
-    #includes that come with stock nginx
-    include /etc/nginx/conf.d/*.conf;
-    include /etc/nginx/sites-enabled/*;
-
-    #include config files from all silk sites
-    include %(srv_root)s/*/conf/nginx.conf;
-}

File silk/cfg_templates/supervisord.conf

-[program:%(site)s]
-command=%(cmd)s
-directory=%(remote_root)s
-user=nobody
-autostart=true
-autorestart=true
-stdout_logfile=%(remote_root)s/logs/supervisor.log
-redirect_stderr=True
-environment=%(process_env)s

File silk/cfg_templates/supervisord_root.conf

-; supervisor config file
-
-[unix_http_server]
-file=/var/run//supervisor.sock   ; (the path to the socket file)
-chmod=0700                       ; sockef file mode (default 0700)
-
-[inet_http_server]
-port = 127.0.0.1:9001
-
-[supervisord]
-logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
-pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
-childlogdir=/var/log/supervisor            ; ('AUTO' child log dir, default $TEMP)
-
-; the below section must remain in the config file for RPC
-; (supervisorctl/web interface) to work, additional interfaces may be
-; added by defining them in separate rpcinterface: sections
-[rpcinterface:supervisor]
-supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
-
-[supervisorctl]
-serverurl=unix:///var/run//supervisor.sock ; use a unix:// URL  for a unix socket
-
-; The [include] section can just contain the "files" setting.  This
-; setting can list multiple files (separated by whitespace or
-; newlines).  It can also contain wildcards.  The filenames are
-; interpreted as relative to this file.  Included files *cannot*
-; include files themselves.
-
-[include]
-files = %(srv_root)s/*/conf/supervisord.conf

File silk/conf_templates/nginx.conf

+server {
+    listen   80;
+    server_name %(nginx_hosts)s;
+
+    access_log  %(root)s/logs/ngaccess.log;
+    error_log  %(root)s/logs/ngerror.log;
+    location / {
+        proxy_pass http://%(bind)s;
+        proxy_redirect              off;
+        proxy_set_header            Host $host;
+        proxy_set_header            X-Real-IP $remote_addr;
+        proxy_set_header            X-Forwarded-For $proxy_add_x_forwarded_for;
+        client_max_body_size        10m;
+        client_body_buffer_size     128k;
+        proxy_connect_timeout       90;
+        proxy_send_timeout          90;
+        proxy_read_timeout          90;
+        proxy_buffer_size           4k;
+        proxy_buffers               4 32k;
+        proxy_busy_buffers_size     64k;
+        proxy_temp_file_write_size  64k;
+    }
+    %(nginx_static)s
+}
+

File silk/conf_templates/supervisord.conf

+[program:%(site)s_%(dts)s]
+command=%(cmd)s
+directory=%(root)s
+user=nobody
+autostart=true
+autorestart=true
+stdout_logfile=%(root)s/logs/supervisor.log
+redirect_stderr=True
+environment=%(process_env)s

File silk/fabfile.py

 import sys
 import os
 import datetime
-import pkg_resources
+import time
 import posixpath
 import tempfile
 import random
+import re
 
 import yaml
 from fabric.api import *
+from fabric.colors import green, red, yellow
 from fabric.contrib.files import exists, upload_template
 
 import silk.lib
-from silk.utils import _get_site_deps
+
+
+def _join(*args):
+    """Convenience wrapper around posixpath.join to make the rest of our
+    functions more readable."""
+    return posixpath.join(*args)
 
 SRV_ROOT = '/srv'
 DEFAULT_ROLLBACK_CAP = 3
 DTS_FORMAT = '%Y%m%d_%H%M%S'
 NGINX_SITE_DIR = '/etc/nginx/sites-enabled'
 
-def _get_silk_deps():
-    silk_deps_file = pkg_resources.resource_filename('silk', 'deps.yaml')
-    return yaml.safe_load(open(silk_deps_file).read())
-
 def _set_vars():
     """
     Loads deployment settings into Fabric's global 'env' dict
     """
-    env.local_root = silk.lib.get_site_root(os.getcwd())
+    env.local_root = silk.lib.get_root(os.getcwd())
     env.config = silk.lib.get_site_config(env.local_root)
-    dts = datetime.datetime.now().strftime(DTS_FORMAT)
+    env.dts = datetime.datetime.now().strftime(DTS_FORMAT)
+    
     if len(env.roles) == 1:
         env.config.update(silk.lib.get_role_config(env.roles[0]))
-        env.site = env.config['site']
-        env.remote_root = '/'.join([SRV_ROOT, '%s_%s' % (env.site, dts)])
-        env.envdir = '/'.join([env.remote_root, 'env'])
-        env.rollbackdir = '/'.join([SRV_ROOT, 'rollbacks'])
-        env.deploytime = dts
-        env.config['bind'] = env.config['gunicorn'].get('bind', 'unix:/tmp/%s.sock'
-                                                        % env.site)
-        env.config['silk_deps'] = _get_silk_deps()
-        env.config['site_deps'] = _get_site_deps(env.local_root)
+    env.site = env.config['site']
+    env.deployment = '%s_%s' % (env.site, env.dts)
+    env.root = _join(SRV_ROOT, env.deployment)
+
+    env.envdir = _join(env.root, 'env')
+    env.rollback_cap = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
+
+    # Set up gunicorn config
+    default_bind = silk.lib.GUNICORN_BIND_PATTERN % env.deployment
+    if 'gunicorn' in env.config:
+        env.config['bind'] = env.config['gunicorn'].get('bind', default_bind)
+    else:
+        env.config['bind'] = default_bind 
 
 _set_vars()
 
     env.roledefs[role] = _get_hosts
 # END UGLY MAGIC
 
-def _put_dir(local_dir, remote_dir):
+def _put_dir(local_dir, remote_dir, exclude=''):
     """
     Copies a local directory to a remote one, using tar and put. Silently
     overwrites remote directory if it exists, creates it if it does not
     exist.
     """
-    local_tgz = "/tmp/fabtemp.tgz"
-    remote_tgz = os.path.basename(local_dir) + ".tgz"
-    local('tar -C "{0}" -czf "{1}" .'.format(local_dir, local_tgz))
-    put(local_tgz, remote_tgz)
-    local('rm -f "{0}"'.format(local_tgz))
-    run('rm -Rf "{0}"; mkdir -p "{0}"; tar -C "{0}" -xzf "{1}" && rm -f "{1}"'\
-        .format(remote_dir, remote_tgz))
+    local_tarball = "/tmp/fabtemp.tar.bz2"
+    remote_tarball = os.path.basename(env.site) + ".tar.bz2"
+
+    tar_cmd = 'tar -C "%(local_dir)s" -cjf "%(local_tarball)s" %(exclude)s .' % locals()
+    local(tar_cmd)
+    put(local_tarball, remote_tarball, use_sudo=True)
+    local('rm -f "%(local_tarball)s"' % locals())
+    sudo('rm -Rf "{0}"; mkdir -p "{0}"; tar -C "{0}" -xjf "{1}" && rm -f "{1}"'\
+        .format(remote_dir, remote_tarball))
+
+def _tmpfile():
+    """Generates a random filename on the remote host.  Useful for dumping
+    stdout to a file that you want to download or read later.  Assumes the
+    remote host has a /tmp directory."""
+    chars = "abcdefghijklmnopqrstuvwxyz1234567890"
+    length = 20
+    randompart = "".join([random.choice(chars) for x in xrange(20)])
+    return "/tmp/silk_tmp_%s" % randompart
 
 def _get_blame():
     """
     """
     Writes blame file on remote host.
     """
-    blamefile = tempfile.NamedTemporaryFile()
-    blamefile.write(_get_blame())
-    blamefile.seek(0) # Rewind the file so that the putter can read it.
-    remote_blame = '/'.join([env.remote_root, 'blame.yaml'])
-    put(blamefile.name, remote_blame)
+    blamefile_name = _tmpfile()
+    blamefile = open(blamefile_name, 'w')
+    blame_txt = _get_blame()
+    blamefile.write(blame_txt)
     blamefile.close()
+    remote_blame = _join(env.root, 'blame.yaml')
+    put(blamefile_name, remote_blame)
+    local('rm %s' % blamefile_name)
 
     # Fix the permissions on the remote blame file
     sudo('chmod +r %s' % remote_blame)
 
-def reload():
-    """
-    Reloads supervisord and nginx configs.
-    """
-    print "RELOADING CONFIGS"
-    sudo('supervisorctl reload')
-    sudo('/etc/init.d/nginx reload')
 
-def restart():
-    """
-    Restarts nginx and supervisord.  Normally not needed (reload() is enough)
-    """
-    print "RESTARTING SERVICES"
-    sudo('/etc/init.d/supervisor stop; /etc/init.d/supervisor start')
-    sudo('/etc/init.d/nginx restart')
-
-def archive():
-    """
-    Creates rollback archive of already-deployed site.  Rotates old rollback files.
-    """
-    ROLLBACK_CAP = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
-    if ROLLBACK_CAP > 0:
-      print "CREATING ROLLBACK"
-      if not exists(env.rollbackdir, use_sudo=True):
-          sudo('mkdir -p %s' % env.rollbackdir)
-
-      template_vars = {
-          'rollback_cap': ROLLBACK_CAP,
-          'srv_root': SRV_ROOT,
-      }
-
-      template_vars.update(env)
-
-      oldest_rollback = '%(rollbackdir)s/%(site)s-rollback_%(rollback_cap)s.tar.bz2' % template_vars
-      #first delete the oldest rollback if present
-      if exists(oldest_rollback):
-          sudo('rm %s' % oldest_rollback)
-
-      #then increment the numbers on the existing rollbacks
-      for i in xrange(ROLLBACK_CAP - 1, 0, -1):
-          rollback_file = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i)
-          if exists(rollback_file):
-              newname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i + 1)
-              sudo('mv %s %s' % (rollback_file, newname))
-
-      #now archive env.remote_root if it exists
-      if exists(env.remote_root):
-          sudo('tar -cjf %(rollbackdir)s/%(site)s-rollback_1.tar.bz2 --exclude "*.log" -C %(srv_root)s %(site)s' % template_vars)
-
-def rollback():
-    """
-    Untars most recent rollback archive and sets it running.
-    """
-    print "ROLLING BACK"
-    ROLLBACK_CAP = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
-    rollback_file = '%s/%s-rollback_1.tar.bz2' % (env.rollbackdir, env.site)
-    if exists(rollback_file):
-        #unzip in a tmp dir
-        tmpdir = os.path.join('/tmp', 'rollback-%s' % env.site)
-        if exists(tmpdir, use_sudo=True):
-            sudo('rm %s -rf' % tmpdir)
-        sudo('mkdir %s' % tmpdir)
-        sudo('tar -xjf %s -C %s' % (rollback_file, tmpdir))
-
-        #move current code into oldddir
-        olddir = os.path.join('/tmp', 'old-%s' % env.site)
-        if exists(env.remote_root, use_sudo=True):
-            sudo('mv %s %s' % (env.remote_root, olddir))
-
-        #move new code into srvdir
-        sudo('mv %s/%s %s' % (tmpdir, env.site, env.remote_root))
-
-        #remove olddir
-        if exists(olddir, use_sudo=True):
-            sudo('rm %s -rf' % olddir)
-
-        #clean out the rollback file we just unzipped
-        sudo('rm -rf %s' % rollback_file)
-
-        #decrement the other rollback files
-        for i in xrange(2, ROLLBACK_CAP + 1, 1):
-            oldname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i)
-            newname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i - 1)
-            if exists(oldname):
-                sudo('mv %s %s' % (oldname, newname))
-        reload()
-    else:
-        sys.exit('Error: %s not found' % rollback_file)
-
-#TODO: rebuild virtualenv if it exists but the python version is wrong
-def create_virtualenv():
-    """
-    Creates a virtualenv for the site.  Automatically builds egenix-mx-tools in it, since
-    pip doesn't seem able to install that.
-    """
-    print "CREATING VIRTUALENV"
-    if not exists(env.remote_root, use_sudo=True):
-        sudo('mkdir -p %s' % env.remote_root)
-    sudo('virtualenv --no-site-packages --python=%s %s' % (env.config['runtime'], env.envdir))
-    build_mx_tools()
-
-def install_deps():
-    """
-    Wraps the apt deps, virtualenv creation, and python deps functions to ensure
-    that things are done in the right order.
-    """
-    print "INSTALLING DEPENDENCIES"
-    install_apt_deps()
-    if not exists(env.envdir, use_sudo=True):
-        create_virtualenv()
-    install_python_deps()
-
-def build_mx_tools():
-    """
-    Builds and install egenix-mx-tools into virtualenv
-    """
-    #egenix-mx-tools includes the mxdatetime module, which is
-    #a psycopg2 dependency.  Unfortunately it's not packaged in
-    #a way that pip can install.  So we build it here instead
-    print "INSTALLING MX TOOLS"
-    build_dir = "/tmp/egenix_build"
-    if not exists(build_dir, use_sudo=True):
-        sudo('mkdir -p %s' % build_dir)
-    with cd(build_dir):
-        #download the tools
-        sudo('wget http://downloads.egenix.com/python/egenix-mx-base-3.1.3.tar.gz')
-        #unpack
-        sudo('tar xvf egenix-mx-base-3.1.3.tar.gz')
-    with cd(os.path.join(build_dir, 'egenix-mx-base-3.1.3')):
-        #install into the virtualenv
-        sudo('%s setup.py install' % os.path.join(env.envdir, 'bin', 'python'))
-    sudo('rm -rf %s' % build_dir)
-
-def push_code():
-    """
-    Pushes site to remote host
-    """
-    print "PUSHING CODE TO HOST"
-    if exists(env.remote_root):
-        sudo('rm %s -rf' % env.remote_root)
-    _put_dir(env.local_root, env.remote_root)
-
-def _upload_config_template(template, dest, context):
-    #first try to load the template from the local cfg_templates dir.
+def _write_template(template, dest, context):
+    #first try to load the template from the local conf_templates dir.
     #if it's not there then try loading from pkg_resources
     path = silk.lib.get_template_path(template, env.local_root)
     upload_template(path, dest, context=context, use_sudo=True)
 
-def _get_nginx_static_snippet(url_path, local_path):
-    return """
-    location %(url_path)s {
-        alias %(local_path)s;
-    }
-    """ % locals()
-
-def _write_templates(template_list, template_vars):
-    for pair in template_list:
-        src, dest = pair
-        _upload_config_template(
-            src,
-            dest,
-            context = template_vars
-        )
-
 def _ensure_dir(remote_path):
     if not exists(remote_path, use_sudo=True):
-        sudo('mkdir %s' % remote_path)
+        sudo('mkdir -p %s' % remote_path)
 
 def _format_supervisord_env(env_dict):
     """Takes a dictionary and returns a string in form
       #return empty string
       return ''
 
-def write_config():
-    """
-    Creates and upload config files for nginx, supervisord, and blame.yaml
-    """
-    print "WRITING CONFIG"
-    nginx_static = ''
-    static_dirs = env.config.get('static_dirs', None)
-    if static_dirs:
-      for item in static_dirs:
-          nginx_static += _get_nginx_static_snippet(
-              item['url_path'],
-              #system_path may be a full path, or relative to remote_root
-              posixpath.join(env.remote_root, item['system_path'])
-          )
-    template_vars = {
-        'cmd': silk.lib.get_gunicorn_cmd(env.config, bin_dir='%s/bin' % (env.envdir)),
-        'nginx_static': nginx_static,
-        'nginx_hosts': ' '.join(env.config['listen_hosts']),
-        'process_env': _format_supervisord_env(env.config['env']),
-        'srv_root': SRV_ROOT,
-    }
-    template_vars.update(env)
-    template_vars.update(env.config)
-    config_dir = '/'.join([env.remote_root, 'conf'])
-    #make sure the conf and logs dirs are created
-    _ensure_dir(config_dir)
-    _ensure_dir('/'.join([env.remote_root, 'logs']))
-
-    template_list = (
-        ('supervisord.conf','/'.join([config_dir, 'supervisord.conf'])),
-        ('nginx.conf','/'.join([NGINX_SITE_DIR, 'silk_%s.conf' % env.site])),
-    )
-    _write_templates(template_list, template_vars)
-    _write_blame()
-
-def cleanup():
-    """
-    Removes the old project dir.  (But you still have a rollback!)
-    """
-    print "CLEANING UP"
-    #do this last to minimize time between taking down old site and setting up new one
-    #since "mv" is faster than "cp -r" or "rm -rf"
-    olddir = '/'.join(['/tmp', 'old-%s' % env.site])
-    sudo('rm %s -rf' % olddir)
-
 def server_setup():
     """
     Installs nginx and supervisord on remote host.  Sets up nginx and
     supervisord global config files.
     """
     install_apt_deps()
-    template_list = (
-        ('supervisord_root.conf', '/etc/supervisor/supervisord.conf'),
-    )
-    _write_templates(template_list, env)
-    restart()
 
-def build():
+def push_code():
+    # Push the local site to the remote root, excluding files that we don't
+    # want to leave cluttering the production server
+    _green("PUSHING CODE")
+    exclude = ("--exclude=site.yaml --exclude=roles "
+               "--exclude=requirements.txt")
+    _put_dir(env.local_root, env.root, exclude)
 
-    #- check that you're running on ubuntu
-    if not silk.lib.os_is_ubuntu():
-        sys.exit('This command can only be run on Ubuntu')
+def create_virtualenv():
+    """Create a virtualenv inside the remote root"""
+    if 'runtime' in env.config:
+        pyversion = '--python=%s' % env.config['runtime']
+    else:
+        pyversion = ''
+    cmd = "virtualenv --no-site-packages %s %s" % (pyversion, env.envdir)
+    sudo(cmd)
+
+# TODO: Add a means of cleaning out old pybundles.  
+def install_bundle():
+    """Push the pybundle to the remote host and install it into the virtualenv"""
+    reqs_file = os.path.join(env.local_root, 'requirements.txt')
+    if not os.path.isfile(reqs_file):
+        sys.exit('No requirements.txt file found')
+    reqs = open(reqs_file, 'r').read()
+
+    # Push the pybundle, if necessary
+    _ensure_dir(_join(SRV_ROOT, 'bundles'))
+    remote_pybundle = _join(SRV_ROOT, 'bundles', silk.lib.get_pybundle_name(reqs))
+    if not exists(remote_pybundle):
+        # If bundle hasn't already been made, then make it.
+        pybundle = silk.lib.get_pybundle_path(reqs)
+        if not os.path.isfile(pybundle):
+            _green("BUILDING PYBUNDLE")
+            archive_folder = silk.lib.get_local_archive_dir()
+            if not os.path.isdir(archive_folder):
+                os.makedirs(archive_folder)
+            
+            pypi = env.config.get('pypi', 'http://pypi.python.org/pypi')
+            local('pip bundle %s -r %s -i %s' % (pybundle, reqs_file, pypi))
+        _green("PUSHING PYBUNDLE %s" % pybundle)
+        put(pybundle, remote_pybundle, use_sudo=True)
     
-    #- Create local directory at /srv/<build_name>
-    local('mkdir %s' % env.remote_root)
+    _green("INSTALLING PYBUNDLE")
+    sudo('%s/bin/pip install %s' % (env.envdir, remote_pybundle))
 
-    #- Copy all the site code into the build directory
-    local('cp -r %s %s/' % (env.local_root, env.remote_root))
+def configure_supervisor():
+    """
+    Creates and upload config file for supervisord
+    """
+    _green("WRITING SUPERVISOR CONFIG")
 
-    #- create a virtualenv inside the build directory
-    python_version = env.config['runtime']
-    local_env = '%s/env' % env.remote_root
-    local('virtualenv --no-site-packages --python=%s %s' % (python_version,
-                                                            local_env))
+    template_vars = {
+        'cmd': silk.lib.get_gunicorn_cmd(env, bin_dir='%s/bin' % (env.envdir)),
+        'process_env': _format_supervisord_env(env.config['env']),
+        'srv_root': SRV_ROOT,
+    }
 
-    #- install all python packages into that virtualenv
-        # first create a tmp requirements file
-    dep_list = env.config['site_deps']['python_packages']
-    file_contents = '\n'.join(dep_list)
-    tmpfile = tempfile.NamedTemporaryFile()
-    tmpfile.write(file_contents)
+    template_vars.update(env)
+    template_vars.update(env.config)
+    #make sure the conf and logs dirs are created
+    _ensure_dir(config_dir)
+    _ensure_dir(_join(env.root, 'logs'))
 
-    pip = os.path.join(local_env, 'bin', 'pip')
-    local('%s -r %s' % (pip, tmpfile.name))
-    # Closing a tempfile will clean it up automatically.
-    tmpfile.close()
-    #- create conf and logs folders in the build directory
-    #- write supervisord log file for the build directory
+    # Put supervisord include in default location
+    dest = '/etc/supervisor/conf.d/%s.conf' % site
+    _write_template('supervisord.conf', dest, template_vars)
+
+def _green(text):
+    print green(text)
+
+def _red(text):
+    print red(text)
+
+def _yellow(text):
+    print yellow(text)
+
+def _list_dir(dirname):
+    """Given the path for a directory on the remote host, return its contents
+    as a python list."""
+    cmd = 'ls -1 %s' % dirname
+
+    txt = sudo(cmd)
+
+    return txt.split('\r\n')
+
+def _is_live(site):
+    """Returns True if site 'site' has a supervisord.conf entry, else False."""
+    conf_path = '/etc/supervisor/conf.d/%s.conf' % site
+    old_conf_path = _join(SRV_ROOT, site, 'conf', 'supervisord.conf')
+    return exists(conf_path) or exists(old_conf_path)
+
+def _is_running(procname, tries=3, wait=2):
+    """Given the name of a supervisord process, tell you whether it's running
+    or not.  If status is 'starting', will wait until status has settled."""
+    # Status return from supervisorctl will look like one of these:
+    # mysite_20110623_162319 RUNNING    pid 628, uptime 0:34:13
+    # mysite_20110623_231206 FATAL      Exited too quickly (process log may have details)
+
+    status_parts = sudo('supervisorctl status %s' % env.deployment).split()
+    if status_parts[1] == 'RUNNING':
+        return True
+    elif status_parts[1] == "FATAL":
+        return False
+    elif tries > 0:
+        # It's neither running nor dead yet, so try again
+        _yellow("Waiting %s seconds for process to settle" % wait)
+        time.sleep(wait)
+
+        # decrement the tries and double the wait time for the next check.
+        return _is_running(procname, tries - 1, wait * 2)
+    else:
+        return False
+
+def _is_this_site(name):
+    """Return True if 'name' matches our site name (old style Silk deployment
+    naming' or matches our name + timestamp pattern."""
+    
+    site_pattern = re.compile('%s_\d{8}_\d{6}' % env.site)
+    return (name == env.site) or (re.match(site_pattern, name) is not None)
+
+def cleanup():
+    """Deletes old versions of the site that are still sitting around."""
+    _green("CLEANING UP")
+
+    folders = _list_dir(SRV_ROOT)
+    rollbacks = [x for x in folders if _is_this_site(x)]
+
+    if len(rollbacks) > env.rollback_cap:
+        # There are more rollbacks than we want to keep around.  See if we can
+        # delete some.
+        suspects = rollbacks[:-(env.rollback_cap + 1)]
+        for folder in suspects:
+            fullpath = _join(SRV_ROOT, folder)
+            if not _is_live(fullpath):
+                sudo('rm -rf %s' % fullpath)
+
+def start_supervisor():
+    """Tell supervisord to read the new config, then start the new process."""
+    _green('STARTING PROCESS')
+    sudo('supervisorctl reread')
+    sudo('supervisorctl add %s' % env.deployment)
+
+def _get_nginx_static_snippet(url_path, local_path):
+    return """
+    location %(url_path)s {
+        alias %(local_path)s;
+    }
+    """ % locals()
+
+def configure_nginx():
+    """Writes a new nginx config include pointing at the newly-deployed site."""
+    _green("WRITING NGINX CONFIG")
+    nginx_static = ''
+    static_dirs = env.config.get('static_dirs', None)
+
+    # Use the static_dirs values from the site config to set up static file
+    # serving in nginx.
+    if static_dirs:
+      for item in static_dirs:
+          nginx_static += _get_nginx_static_snippet(
+              item['url_path'],
+              _join(env.root, item['system_path'])
+          )
+    template_vars = {
+        'nginx_static': nginx_static,
+        'nginx_hosts': ' '.join(env.config['listen_hosts']),
+    }
+    template_vars.update(env)
+    template_vars.update(env.config)
+
+    # Create nginx include here:
+    # /etc/nginx/sites-enabled/<sitename>.conf
+    nginx_file = _join('/etc', 'nginx', 'sites-enabled', '%s.conf' % env.site)
+    sudo('rm %s' % nginx_file)
+    _write_template('nginx.conf', nginx_file, template_vars)
+
+def switch_nginx():
+    _green("LOADING NEW NGINX CONFIG")
+
+    # Check if there is an old-style version (withing the site root) of the
+    # nginx config laying around, and rename it to something innocuous if so.
+    old_nginx = _join(SRV_ROOT, env.site, 'conf', 'nginx.conf')
+    if exists(old_nginx):
+        sudo('mv %s %s' % (old_nginx, "%s_disabled" % old_nginx))
+
+    # Tell nginx to rescan its config files
+    sudo('/etc/init.d/nginx reload')
+
+def stop_other_versions():
+    """Stop other versions of the site that are still running, and disable their
+    configs."""
+    proclist = sudo('supervisorctl status').split('\r\n')
+
+    # filter proclist to include only versions of our site
+    proclist = [x for x in proclist if _is_this_site(x)]
+
+    # stop each process left in proclist that isn't the current one
+    for proc in proclist:
+        # We assume that spaces are not allowed in proc names
+        procparts = proc.split()
+        procname = procparts[0]
+        procstatus = procparts[1]
+        if procname != env.deployment:
+            # Stop the process
+            if procstatus == "RUNNING":
+                sudo('supervisorctl stop %s' % procname)
+
+            # Remove it from live config
+            sudo('supervisorctl remove %s' % procname)
+
+            # Remove its supervisord config file
+            conf_file = _join(SRV_ROOT, procname, 'conf', 'supervisord.conf')
+            if exists(conf_file):
+                sudo('rm %s' % conf_file)
+    sudo('supervisorctl reread')
+
+def congrats():
+    """Congratulate the user and print a link to the site."""
+    link0 = "http://%s" % env.config['listen_hosts'][0]
+    msg = ("SUCCESS!  I think.  Check that the site is running by browsing "
+           "to this url:\n\n%s" % link0)
+    _green(msg)
 
 def push():
     """
     configured, this function will put your site on the remote host and get it
     running.
     """
+
+    # push site code and pybundle to server, in timestamped folder
     push_code()
-    write_config()
-    reload()
-    cleanup()
+    # make virtualenv on the server and run pip install on the pybundle
+    create_virtualenv()
+    install_bundle()
+    _write_blame()
+    # write supervisord config for the new site
+    configure_supervisor()
 
-def dump_config():
-    """
-    Just loads up the config and prints it out. For testing.
-    """
-    print env.config
+    ##then the magic
+
+    ##silk starts up supervisord on the new site
+    start_supervisor()
+    # checks that the new site is running (by doing a supervisord xmlrpc
+    # request)
+    if _is_running(env.deployment):
+        _green("Site is running.  Proceeding with nginx switch.")
+        # if the site's running fine, then silk configures nginx to forward requests
+        # to the new site 
+        configure_nginx()
+        switch_nginx()
+        stop_other_versions()
+        cleanup()
+        congrats()
+    else:
+        _red("Process failed to start cleanly.  Off to the log files!")
 import copy
 import os
 import pkg_resources
+import subprocess
+import sys
+import time
+import shutil
+import hashlib
+from signal import SIGTERM, SIGINT
+
+import app_container
 
 from app_container import *
 
-def get_gunicorn_cmd(site_config, bin_dir=''):
-    gconfig = copy.copy(site_config['gunicorn'])
+GUNICORN_DEFAULTS = {
+    'workers': 2,
+    'log-level': 'info',
+    'name': 'gunicorn',
+    'debug': 'false',
+}
+
+# Format for the 'bind' string we'll be making later, sticking the site name +
+# timestamp in the wildcard part.  Used here for generating the gunicorn cmd,
+# and in the fabfile for plugging into the nginx config.
+GUNICORN_BIND_PATTERN = 'unix:/tmp/%s.sock'
+
+def get_gunicorn_cmd(site_env, bin_dir=''):
+    """Given a copy of Fabric's state in site_env, configure and return the
+    gunicorn cmd line needed to run the site"""
+    site_config = site_env.config
+    gconfig = copy.copy(GUNICORN_DEFAULTS)
+    gconfig.update(site_config['gunicorn'])
 
     # Default to using a unix socket for nginx->gunicorn
-    gconfig['bind'] = gconfig.get('bind',
-                                  'unix:/tmp/%s.sock' % site_config['site'])
+    default_bind = GUNICORN_BIND_PATTERN % site_env.deployment
+    gconfig['bind'] = gconfig.get('bind', default_bind)
 
-    # Default to using the site name in the procname
-    gconfig['name'] = gconfig.get('name', site_config['site'])
+    # Default to using the site name and timestamp in the procname
+    gconfig['name'] = site_env.deployment 
 
     debug = gconfig.pop('debug', None)
     options = ' '.join(['--%s %s' % (x, y) for x, y in gconfig.iteritems()])
         cmd = '%s/%s' % (bin_dir, cmd)
     return cmd
 
-def os_is_ubuntu():
-    """
-    Examines /etc/issue and looks for the apt-get program to determine whether
-    the current OS is ubuntu.
-    """
-    if not os.path.isfile('/etc/issue'):
-        return False
-    elif not open('/etc/issue', 'r').read().startswith('Ubuntu'):
-        return False
-    elif os.popen('which apt-get').read() == '':
-        return False
+def get_root(start_dir):
+    testfile = os.path.join(start_dir, 'site.yaml')
+    if os.path.isfile(testfile):
+        return start_dir
     else:
-        return True
+        parent_dir = os.path.split(start_dir)[0]
+        if parent_dir != start_dir:
+            return get_root(parent_dir)
+        else:
+            return None
 
-def get_config(site_root=None, role=None):
+def get_config(root=None, role=None):
     """
     Returns merged site and role config.  Tries hard to come up with
     something if you don't pass in a role or site name.
     """
 
-    # If no site_root given, then look above os.getcwd()
-    if site_root is None:
-        site_root = get_site_root(os.getcwd())
+    # If no root given, then look above os.getcwd()
+    if root is None:
+        root = get_root(os.getcwd())
 
     # If role is None, then try getting it from cmd line and/or env vars ourselves
     role = role or get_role()
     # If role is still none, then look for a blame file, which doesn't require a role.
             # If no role and no blame file, give up.
 
-    config = get_site_config(site_root)
+    config = get_site_config(root)
 
     role_config = get_role_config(role)
     if isinstance(role_config, dict):
         config.update()
     return config
-def get_template_path(template, site_root=None):
+
+def get_template_path(template, root=None):
     """
     Returns path of template from site conf_templates dir, if found there, else
     returns template path from silk's conf_templates dir.
     """
-    if site_root:
-        localpath=os.path.join(site_root, 'conf_templates', template)
+    if root:
+        localpath=os.path.join(root, 'conf_templates', template)
         if os.path.isfile(localpath):
             return localpath
     pkgpath=pkg_resources.resource_filename('silk', 'conf_templates/%s' % template)
     txt = open(template_path, 'r').read()
     return txt % context
 
-def get_pip_cmd(site_config):
-    pypi = site_config.get('pypi', 'http://pypi.python.org/simple/')
-    return 'pip install -i %s' % pypi
+def _run(args, kill_signal, cwd=os.getcwd(), env={}):
+    env.update(os.environ)
+    proc = subprocess.Popen(args, cwd=cwd, env=env)
+    try:
+        proc.wait()
+    except KeyboardInterrupt as e:
+        print "KeyboardInterrupt"
+        proc.send_signal(kill_signal)
+    except Exception as e:
+        print e
+        proc.send_signal(kill_signal)
+
+def run_fab(args):
+    args[0] = 'fab'
+    _run(args, SIGTERM)
+
+def run_devserver():
+    # Overwrite the wsgi_app config var to point to our internal app that will
+    # also mount the static dirs.
+    root = os.environ['SILK_ROOT']
+    role = os.environ['SILK_ROLE']
+    config = silk.lib.get_config(root, role)
+    config['wsgi_app'] = 'silk.devserver:app'
+
+    cmd = silk.lib.get_gunicorn_cmd(config)
+
+    subproc_env = {
+        'SILK_ROOT': root,
+        'SILK_ROLE': app_container.get_role(),
+    }
+
+    # By adding our current subproc_environment to that used for the subprocess, we
+    # ensure that the same paths will be used (such as those set by virtualsubproc_env)
+    subproc_env.update(os.environ)
+
+    _run(cmd.split(), SIGINT, cwd=root, env=subproc_env)
+
+    # This 1 second sleep lets the gunicorn workers exit before we show the
+    # prompt again.
+    time.sleep(1)
+
+def install_skel(sitename):
+    """Copies the contents of site_templates into the named directory (within cwd)"""
+    root = os.environ['SILK_ROOT']
+    #get the dir from pkg_resources
+    src = pkg_resources.resource_filename('silk', 'site_templates')
+    try:
+        shutil.copytree(src, os.path.join(os.getcwd(), sitename))
+    except OSError, e:
+        print e
+
+def get_local_archive_dir():
+    return os.path.join(os.path.expanduser('~'), '.silk')
+
+def get_pybundle_name(reqs):
+    """Hash the requirements list to create a pybundle name."""
+    # Strip leading and trailing whitespace
+    reqs = reqs.strip()
+
+    # put the lines in order to ensure consistent hashing
+    lines = reqs.split()
+    lines.sort()
+    reqs = '\n'.join(lines)
+
+    hash = hashlib.md5(reqs).hexdigest()
+    return "%s.pybundle" % hash
+
+def get_pybundle_path(reqs):
+    """Return the name of the pybundle file that corresponds to the passed-in
+    requirements text."""
+    return os.path.join(get_local_archive_dir(), get_pybundle_name(reqs))
+
+cmd_map = {
+    'run': run_devserver,
+    'skel': install_skel,
+}
+
+def cmd_dispatcher():
+    """wraps 'fab', handles 'silk run'"""
+    args = sys.argv
+    try:
+        cmd = args[1]
+
+        # If a command is provided by cmd_map, use that.  Else pass through to
+        # fabric.
+        if cmd in cmd_map:
+            # Stick some information about the role and root into the current env,
+            # then call the local function in cmd_map.
+            os.environ['SILK_ROLE'] = app_container.get_role() or ''
+            os.environ['SILK_ROOT'] = silk.lib.get_root(os.getcwd()) or ''
+            cmd_map[cmd]()
+        else:
+            # Use project-provided fabfile if present, else the one built into
+            # Silk.  We'll have to trust that the project file imports ours.
+            root = get_root(os.getcwd())
+            site_fab = os.path.join(root, 'fabfile.py')
+            if os.path.isfile(site_fab):
+                fabfile = site_fab
+            else:
+                fabfile = pkg_resources.resource_filename('silk', 'fabfile.py')
+            args.extend(['--fabfile', fabfile])
+            run_fab(args)
+
+    except IndexError:
+        # Print out help text.  Currently just prints it for the cmds specified
+        # in the fabfile, which isn't great because it omits things like 'silk
+        # run' and 'silk deps'.  Would be better to inspect the fabfile and
+        # list the cmds/docstrings myself, right along the non-fabfile cmds
+        # that Silk provides.  Or I could just make all the things I provide as
+        # fab cmds.  That might be the simpler approach.
+        run_fab(['fab', '-l'])

File silk/utils.py

-#!/usr/bin/env python
-import subprocess
-import sys
-import time
-import os
-import pkg_resources
-import shutil
-import tempfile
-from signal import SIGTERM, SIGINT
-
-import yaml
-
-import silk.lib
-import app_container
-
-def _run(args, kill_signal, cwd=os.getcwd(), env={}):
-    env.update(os.environ)
-    proc = subprocess.Popen(args, cwd=cwd, env=env)
-    try:
-        proc.wait()
-    except KeyboardInterrupt as e:
-        print "KeyboardInterrupt"
-        proc.send_signal(kill_signal)
-    except Exception as e:
-        print e
-        proc.send_signal(kill_signal)
-
-def run_fab(args):
-    args[0] = 'fab'
-    _run(args, SIGTERM)
-
-def run_devserver():
-    # Overwrite the wsgi_app config var to point to our internal app that will
-    # also mount the static dirs.
-    root = os.environ['SILK_ROOT']
-    role = os.environ['SILK_ROLE']
-    config = silk.lib.get_config(root, role)
-    config['wsgi_app'] = 'silk.devserver:app'
-
-    cmd = silk.lib.get_gunicorn_cmd(config)
-
-    subproc_env = {
-        'SILK_ROOT': root,
-        'SILK_ROLE': app_container.get_role(),
-    }
-
-    # By adding our current subproc_environment to that used for the subprocess, we
-    # ensure that the same paths will be used (such as those set by virtualsubproc_env)
-    subproc_env.update(os.environ)
-
-    _run(cmd.split(), SIGINT, cwd=root, env=subproc_env)
-
-    # This 1 second sleep lets the gunicorn workers exit before we show the
-    # prompt again.
-    time.sleep(1)
-
-def install_skel(sitename):
-    """Copies the contents of site_templates into the named directory (within cwd)"""
-    root = os.environ['SILK_ROOT']
-    #get the dir from pkg_resources
-    src = pkg_resources.resource_filename('silk', 'site_templates')
-    try:
-        shutil.copytree(src, os.path.join(os.getcwd(), sitename))
-    except OSError, e:
-        print e
-
-def freeze_2_yaml():
-    """Read lines of text from stdin and print a python_packages yaml list"""
-    lines = sys.stdin.read().split('\n')#split only on newlines, not spaces
-    lines = [line for line in lines if line]#remove empty lines
-    print yaml.safe_dump({'python_packages':lines}, default_flow_style=False)
-
-def yaml_2_freeze():
-    """Read lines of deps.yaml from stdin and print requirements.txt contents"""
-    txt = sys.stdin.read()
-    deps = yaml.safe_load(txt)
-    print '\n'.join(deps['python_packages'])
-
-def local_python_deps():
-    """Write a requirements.txt from deps.yaml file for pip, then run pip on it."""
-    root = os.environ['SILK_ROOT']
-    role = os.environ['SILK_ROLE']
-    config = silk.lib.get_config(root, role)
-    depfile = os.path.join(silk.lib.get_site_root(os.getcwd()), 'deps.yaml')
-    txt = open(depfile).read()
-    deps = yaml.safe_load(txt)
-    if deps['python_packages']:
-        reqs = '\n'.join(deps['python_packages'])
-
-        tmpfile = tempfile.NamedTemporaryFile()
-        tmpfile.write(reqs)
-        tmpfile.seek(0)
-
-        pip_cmd = silk.lib.get_pip_cmd(config)
-        cmd = '%s -r %s' % (pip_cmd, tmpfile.name)
-        _run(cmd.split(), SIGTERM)
-        tmpfile.close()
-    else:
-        print "No python deps listed."
-
-def _get_site_deps(local_root):
-    site_deps_file = os.path.join(local_root, 'deps.yaml')
-    return yaml.safe_load(open(site_deps_file).read())
-
-def local_apt_deps():
-    """
-    Installs system packages and build dependencies with apt.
-    """
-    root = os.environ['SILK_ROOT']
-    role = os.environ['SILK_ROLE']
-    deps = _get_site_deps(root)
-    if deps['apt_build_deps']:
-        cmd = 'sudo apt-get build-dep %s -y' % ' '.join(deps['apt_build_deps'])
-        _run(cmd.split(), SIGINT)
-    if deps['apt_packages']:
-        cmd = 'sudo apt-get install %s -y' % ' '.join(deps['apt_packages'])
-        _run(cmd.split(), SIGINT)
-
-def local_deps():
-    """Install local python deps, and ubuntu ones too if possible"""
-    local_python_deps()
-    local_apt_deps()
-
-
-cmd_map = {
-    'run': run_devserver,
-    'skel': install_skel,
-    'deps': local_deps
-}
-
-def cmd_dispatcher():
-    """wraps 'fab', handles 'silk run'"""
-    args = sys.argv
-    try:
-        cmd = args[1]
-
-        # If a command is provided by cmd_map, use that.  Else pass through to
-        # fabric.
-        if cmd in cmd_map:
-            # Stick some information about the role and root into the current env,
-            # then call the local function in cmd_map.
-            os.environ['SILK_ROLE'] = app_container.get_role() or ''
-            os.environ['SILK_ROOT'] = silk.lib.get_site_root(os.getcwd()) or ''
-            cmd_map[cmd]()
-        else:
-            # Use project-provided fabfile if present, else the one built into
-            # Silk.  We'll have to trust that the project file imports ours.
-            root = silk.lib.get_site_root(os.getcwd())
-            site_fab = os.path.join(root, 'fabfile.py')
-            if os.path.isfile(site_fab):
-                fabfile = site_fab
-            else:
-                fabfile = pkg_resources.resource_filename('silk', 'fabfile.py')
-            args.extend(['--fabfile', fabfile])
-            run_fab(args)
-
-    except IndexError:
-        # Print out help text.  Currently just prints it for the cmds specified
-        # in the fabfile, which isn't great because it omits things like 'silk
-        # run' and 'silk deps'.  Would be better to inspect the fabfile and
-        # list the cmds/docstrings myself, right along the non-fabfile cmds
-        # that Silk provides.  Or I could just make all the things I provide as
-        # fab cmds.  That might be the simpler approach.
-        run_fab(['fab', '-l'])
-
-# OK there's a problem with cmds that require arguments.  Some do, some don't,
-# and it's unclear whether I should be sending those on all or none or
-# distinguishing.