Brent Tubbs avatar Brent Tubbs committed 31a0ac8 Merge

merge

Comments (0)

Files changed (15)

 393575a388711ac92cbefa0f109f2cbee617c70b 0.2.91
+09d1b05e463c73e5652720699575a5e8f34ad6d3 0.3.0
+bd84725408308e6c96b18d9af9dd2e432a22b0dd 0.3.2
+ece623ea336f536845f379e9e6f4c2118b6b2e1f 0.3.3
+12d83b3af741816d43b489a4a65ad6f3bcf037ba 0.3.5
+d7defb04fc95b8073cc98363dced43861ca35594 0.3.6
+baf88a2bcdbb8eccbf744b0e5b32f0a69f143c09 0.3.7
+131684fbfe77e312505d063a63ef427f99cc5b08 0.3.8
+eb5c66adcf59ee05343d28f17b83f12e31a6f500 0.3.9
+cad883ea925cf627e376871b3c2a7a7a2ad6bb7b 0.3.10
 include README.rst
 include LICENSE.txt
 include silk/deps.yaml
+recursive-include silk/prereqs *
 recursive-include silk/conf_templates *
 recursive-include silk/site_templates *
 
 setup(
     name='silk-deployment',
-    version='0.2.91',
+    version='0.3.10',
     author='Brent Tubbs',
     author_email='brent.tubbs@gmail.com',
 	packages=find_packages(),
     include_package_data=True,
     entry_points = {
         'console_scripts': [
-            'silk = silk.utils:cmd_dispatcher',
-            'freeze2yaml = silk.utils:freeze_2_yaml',
-            'yaml2freeze = silk.utils:yaml_2_freeze',
+            'silk = silk.lib:cmd_dispatcher',
         ],
     },
 	install_requires = [
         'gunicorn',
         'CherryPy',
-        'Fabric == 0.9.2',
+        'Fabric >= 1.0.1',
         'PyYAML',
-        'silk-config',
+        'silk-config>=0.3.3,<0.4',
 	],
     url='http://bits.btubbs.com/silk-deployment',
     license='LICENSE.txt',

silk/conf_templates/nginx.conf

     listen   80;
     server_name %(nginx_hosts)s;
 
-    access_log  %(remote_root)s/logs/ngaccess.log;
-    error_log  %(remote_root)s/logs/ngerror.log;
+    access_log  %(root)s/logs/ngaccess.log;
+    error_log  %(root)s/logs/ngerror.log;
     location / {
         proxy_pass http://%(bind)s;
         proxy_redirect              off;

silk/conf_templates/supervisord.conf

-[program:%(site)s]
+[program:%(site)s_%(dts)s]
 command=%(cmd)s
-directory=%(remote_root)s
+directory=%(root)s
 user=nobody
 autostart=true
 autorestart=true
-stdout_logfile=%(remote_root)s/logs/supervisor.log
+stdout_logfile=%(root)s/logs/supervisor.log
 redirect_stderr=True
 environment=%(process_env)s

silk/devserver.py

 import os
 import sys
-
 import cherrypy
 
 import silk.lib
Add a comment to this file

silk/extras/__init__.py

Empty file removed.

silk/extras/django.py

-from silk.fabfile import push as real_push
-from silk.utils import run_til_you_die as _run_til_you_die
-from signal import SIGINT
-import silk
-import os
-#SIGH
-
-#time to rewrite code that I wrote last night but is trapped on my laptop because I didn't do an hg addremove
-#SIGH
-#I think the only new file was django.py.
-#which includes collectstatic, and an overwritten push
-
-def _get_proj_dir(root):
-    """Ugly magic function to loop through subdirectories and return the first
-    one that looks like a Django project (has a settings.py)"""
-    #get list of current folder contents
-    paths = [os.path.join(root, folder) for folder in os.listdir(root) if os.path.isdir(os.path.join(os.path.join(root, folder)))]
-    for path in paths:
-        if os.path.isfile(os.path.join(path, 'settings.py')):
-            return path
-
-def collectstatic():
-    """Runs ./manage.py collectstatic, setting current Silk role 
-    as env var so it can be picked up in local_settings"""
-    args = ['./manage.py', 'collectstatic', '--settings=local_settings']
-    proj_dir = _get_proj_dir(silk.lib.get_site_root(os.getcwd()))
-    env_vars = {'SILK_ROLE': silk.lib.get_role()}
-    _run_til_you_die(args, SIGINT, proj_dir, env=env_vars)
-
-def push():
-    collectstatic()
-    real_push()

silk/extras/s3.py

-import os
-import boto
-from boto.s3.key import Key
-
-def push_s3(azn_key, azn_secret, s3bucket, localdir, s3dir):
-    """Recurses through localdir, uploading each file it finds into the corresponding
-    path in s3dir"""
-    conn = boto.connect_s3(azn_key, azn_secret)
-    bucket = conn.get_bucket(s3bucket)
-    tree = os.walk(localdir)
-    for folder in tree:
-        folderpath = folder[0]
-        subfiles = folder[2]
-        for file in subfiles:
-            localfile = os.path.join(folderpath, file)
-            trimmed_dir = folderpath[len(localdir):]
-            #os.path.normpath cleans out any double slashes //
-            #warning: if run from windows the slashes will get turned into backslashes
-            s3file = os.path.normpath("/".join((s3dir, trimmed_dir, file)))
-            print "pushing %s to %s" % (localfile, s3file)
-            k = Key(bucket)
-            k.key = s3file
-            k.set_contents_from_filename(localfile)
-            k.set_acl('public-read')
 import sys
 import os
 import datetime
+import time
+import posixpath
+import random
+import re
+import copy
+import yaml
 import pkg_resources
-import posixpath
-import tempfile
-import random
 
-import yaml
 from fabric.api import *
-from fabric.contrib.files import exists, upload_template
+from fabric.colors import green, red, yellow
+from fabric.contrib.files import exists, contains, upload_template
 
 import silk.lib
 
+def _join(*args):
+    """Convenience wrapper around posixpath.join to make the rest of our
+    functions more readable."""
+    return posixpath.join(*args)
+
 SRV_ROOT = '/srv'
 DEFAULT_ROLLBACK_CAP = 3
-
-def _get_silk_deps():
-    silk_deps_file = pkg_resources.resource_filename('silk', 'deps.yaml')
-    return yaml.safe_load(open(silk_deps_file).read())
-
-def _get_site_deps(local_root):
-    site_deps_file = os.path.join(local_root, 'deps.yaml')
-    return yaml.safe_load(open(site_deps_file).read())
+DTS_FORMAT = '%Y%m%d_%H%M%S'
+NGINX_SITE_DIR = '/etc/nginx/sites-enabled'
 
 def _set_vars():
     """
     Loads deployment settings into Fabric's global 'env' dict
     """
-    env.local_root = silk.lib.get_site_root(os.getcwd())
+    env.local_root = silk.lib.get_root(os.getcwd())
     env.config = silk.lib.get_site_config(env.local_root)
-    if len(env.roles) == 0:
-        sys.exit("ERROR: you must define a role with -R <rolename>")
-    elif len(env.roles) > 1:
-        sys.exit("ERROR: Silk only permits passing in one role at a time")
-    else:
+    env.dts = datetime.datetime.now().strftime(DTS_FORMAT)
+
+    if len(env.roles) == 1:
         env.config.update(silk.lib.get_role_config(env.roles[0]))
     env.site = env.config['site']
-    env.remote_root = '/'.join([SRV_ROOT, env.site])
-    env.envdir = '/'.join([env.remote_root, 'env'])
-    env.workdir = '/'.join(['/tmp', env.site])
-    env.rollbackdir = '/'.join([SRV_ROOT, 'rollbacks'])
-    env.deploytime = datetime.datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
-    env.config['bind'] = env.config['gunicorn'].get('bind', 'unix:/tmp/%s.sock'
-                                                    % env.site)
-    env.config['silk_deps'] = _get_silk_deps()
-    env.config['site_deps'] = _get_site_deps(env.local_root)
+    env.deployment = '%s_%s' % (env.site, env.dts)
+    env.root = _join(SRV_ROOT, env.deployment)
+
+    env.envdir = _join(env.root, 'env')
+    env.rollback_cap = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
+
+    # Use the default supervisord include location for the deployment's include file.
+    env.supd_conf_file = '/etc/supervisor/conf.d/%s.conf' % env.deployment
+
+    # Set up gunicorn config
+    env.default_bind = silk.lib.GUNICORN_BIND_PATTERN % env.deployment
+    if 'gunicorn' in env.config:
+        env.config['bind'] = env.config['gunicorn'].get('bind', env.default_bind)
+    else:
+        env.config['bind'] = env.default_bind
 
 _set_vars()
 
 # UGLY MAGIC
-# Here we're (ab)using Fabric's built in 'role' feature to work with the way 
-# we're loading context-specific config.  Could possibly use a refactor to 
+# Here we're (ab)using Fabric's built in 'role' feature to work with the way
+# we're loading context-specific config.  Could possibly use a refactor to
 # avoid Fabric roles altogether.
 def _get_hosts():
     """Return list of hosts to push to"""
     env.roledefs[role] = _get_hosts
 # END UGLY MAGIC
 
-def _put_dir(local_dir, remote_dir):
+def _tmpfile():
+    """Generates a random filename in /tmp.  Useful for dumping stdout to a
+    file that you want to download or read later.  Assumes the remote host has
+    a /tmp directory."""
+    chars = "abcdefghijklmnopqrstuvwxyz1234567890"
+    length = 20
+    randompart = "".join([random.choice(chars) for x in xrange(20)])
+    return "/tmp/silk_tmp_%s" % randompart
+
+def _put_dir(local_dir, remote_dir, exclude=''):
     """
     Copies a local directory to a remote one, using tar and put. Silently
     overwrites remote directory if it exists, creates it if it does not
     exist.
     """
-    local_tgz = "/tmp/fabtemp.tgz"
-    remote_tgz = os.path.basename(local_dir) + ".tgz"
-    local('tar -C "{0}" -czf "{1}" .'.format(local_dir, local_tgz))
-    put(local_tgz, remote_tgz)
-    local('rm -f "{0}"'.format(local_tgz))
-    run('rm -Rf "{0}"; mkdir -p "{0}"; tar -C "{0}" -xzf "{1}" && rm -f "{1}"'\
-        .format(remote_dir, remote_tgz))
+    tarball = "%s.tar.bz2" % _tmpfile()
+
+    tar_cmd = 'tar -C "%(local_dir)s" -cjf "%(tarball)s" %(exclude)s .' % locals()
+    local(tar_cmd)
+    put(tarball, tarball, use_sudo=True)
+    local('rm -f "%(tarball)s"' % locals())
+    sudo('rm -Rf "{0}"; mkdir -p "{0}"; tar -C "{0}" -xjf "{1}" && rm -f "{1}"'\
+        .format(remote_dir, tarball))
 
 def _get_blame():
     """
-    Returns a yaml file that contains the site config, plus some deployment
-    info.  The actual blame.yaml file will be written from this data.
+    Return information about this deployment, to be written as the "blame"
+    section in the site.yaml file.
     """
-    blame = [
-        {'deployed_by': env.user,
-        'deployed_from': os.uname()[1],
-        'deployed_at': datetime.datetime.now(),
-         'deployed_role': env.roles[0]},
-        {'config':env.config}
-    ]
-    return yaml.safe_dump(blame, default_flow_style=False)
+    return {'deployed_by': env.user,
+            'deployed_from': os.uname()[1],
+            'deployed_at': datetime.datetime.now(),
+            'deployed_role': env.roles[0]}
 
-def _write_blame():
-    """
-    Writes blame file on remote host.
-    """
-    blamefile = tempfile.NamedTemporaryFile()
-    blamefile.write(_get_blame())
-    blamefile.seek(0) # Rewind the file so that the putter can read it.
-    remote_blame = '/'.join([env.workdir, 'blame.yaml'])
-    put(blamefile.name, remote_blame)
-    blamefile.close()
+def _write_file(path, contents, use_sudo=True, chown=None):
+    file_name = _tmpfile()
+    file = open(file_name, 'w')
+    file.write(contents)
+    file.close()
+    put(file_name, path, use_sudo=use_sudo)
+    sudo('chmod +r %s' % path)
+    if chown:
+        sudo('chown %s %s' % (chown, path))
+    local('rm %s' % file_name)
 
-    # Fix the permissions on the remote blame file
-    sudo('chmod +r %s' % remote_blame)
+def _write_site_yaml():
+    """Writes the site.yaml file for the deployed site."""
+    # Make a copy of env.config
+    site_yaml_dict = copy.copy(env.config)
+    # add the blame in
+    site_yaml_dict['blame'] = _get_blame()
+    # write it to the remote host
+    file = _join(env.root, 'site.yaml')
+    _write_file(file, yaml.safe_dump(site_yaml_dict, default_flow_style=False))
 
-def reload():
-    """
-    Reloads supervisord and nginx configs.
-    """
-    print "RELOADING CONFIGS"
-    sudo('supervisorctl reload')
-    sudo('/etc/init.d/nginx reload')
-
-def restart():
-    """
-    Restarts nginx and supervisord.  Normally not needed (reload() is enough)
-    """
-    print "RESTARTING SERVICES"
-    sudo('/etc/init.d/supervisor stop; /etc/init.d/supervisor start')
-    sudo('/etc/init.d/nginx restart')
-
-def archive():
-    """
-    Creates rollback archive of already-deployed site.  Rotates old rollback files.
-    """
-    ROLLBACK_CAP = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
-    if ROLLBACK_CAP > 0:
-      print "CREATING ROLLBACK"
-      if not exists(env.rollbackdir, use_sudo=True):
-          sudo('mkdir -p %s' % env.rollbackdir)
-
-      template_vars = {
-          'rollback_cap': ROLLBACK_CAP,
-          'srv_root': SRV_ROOT,
-      }
-
-      template_vars.update(env)
-
-      oldest_rollback = '%(rollbackdir)s/%(site)s-rollback_%(rollback_cap)s.tar.bz2' % template_vars
-      #first delete the oldest rollback if present
-      if exists(oldest_rollback):
-          sudo('rm %s' % oldest_rollback) 
-
-      #then increment the numbers on the existing rollbacks
-      for i in xrange(ROLLBACK_CAP - 1, 0, -1):
-          rollback_file = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i)
-          if exists(rollback_file):
-              newname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i + 1)
-              sudo('mv %s %s' % (rollback_file, newname))
-
-      #now archive env.remote_root if it exists
-      if exists(env.remote_root):
-          sudo('tar -cjf %(rollbackdir)s/%(site)s-rollback_1.tar.bz2 --exclude "*.log" -C %(srv_root)s %(site)s' % template_vars)
-
-def rollback():
-    """
-    Untars most recent rollback archive and sets it running.
-    """
-    print "ROLLING BACK"
-    ROLLBACK_CAP = env.config.get('rollback_cap', DEFAULT_ROLLBACK_CAP)
-    rollback_file = '%s/%s-rollback_1.tar.bz2' % (env.rollbackdir, env.site)
-    if exists(rollback_file): 
-        #unzip in a tmp dir
-        tmpdir = os.path.join('/tmp', 'rollback-%s' % env.site)
-        if exists(tmpdir, use_sudo=True):
-            sudo('rm %s -rf' % tmpdir)
-        sudo('mkdir %s' % tmpdir)
-        sudo('tar -xjf %s -C %s' % (rollback_file, tmpdir))
-
-        #move current code into oldddir
-        olddir = os.path.join('/tmp', 'old-%s' % env.site)
-        if exists(env.remote_root, use_sudo=True):
-            sudo('mv %s %s' % (env.remote_root, olddir))
-
-        #move new code into srvdir
-        sudo('mv %s/%s %s' % (tmpdir, env.site, env.remote_root))
-
-        #remove olddir
-        if exists(olddir, use_sudo=True):
-            sudo('rm %s -rf' % olddir)
-
-        #clean out the rollback file we just unzipped
-        sudo('rm -rf %s' % rollback_file)
-
-        #decrement the other rollback files
-        for i in xrange(2, ROLLBACK_CAP + 1, 1):
-            oldname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i)
-            newname = '%s/%s-rollback_%s.tar.bz2' % (env.rollbackdir, env.site, i - 1)
-            if exists(oldname):
-                sudo('mv %s %s' % (oldname, newname))
-        reload()
-    else:
-        sys.exit('Error: %s not found' % rollback_file)
-
-
-def install_apt_deps():
-    """
-    Installs system packages and build dependencies with apt.
-    """
-    for deps_dict in (env.config['silk_deps'], env.config['site_deps']):
-        print deps_dict
-        if deps_dict['apt_build_deps']:
-            sudo('apt-get build-dep %s -y' % ' '.join(deps_dict['apt_build_deps']))
-        if deps_dict['apt_packages']:
-            sudo('apt-get install %s -y' % ' '.join(deps_dict['apt_packages']))
-
-#TODO: rebuild virtualenv if it exists but the python version is wrong
-def create_virtualenv():
-    """
-    Creates a virtualenv for the site.  Automatically builds egenix-mx-tools in it, since
-    pip doesn't seem able to install that.
-    """
-    print "CREATING VIRTUALENV"
-    if not exists(env.remote_root, use_sudo=True):
-        sudo('mkdir -p %s' % env.remote_root)
-    sudo('virtualenv --no-site-packages --python=%s %s' % (env.config['runtime'], env.envdir))
-    build_mx_tools()
-
-def _pip_install_deps(dep_list):
-    # Write it out
-    file_contents = '\n'.join(dep_list)
-    tmpfile = tempfile.NamedTemporaryFile()
-    tmpfile.write(file_contents)
-    tmpfile.seek(0) # Rewind the file so that the putter can read it.
-
-    # Upload it to the remote host
-    tmp_remote = '/tmp/%s-reqs.txt' % ''.join([random.choice('abcdefghijklmnopqrstuvwxyz') for x in xrange(5)])
-    put(tmpfile.name, tmp_remote)
-
-    # Closing a tempfile will clean it up automatically.
-    tmpfile.close()
-
-    # Run pip install remotely
-    pip_cmd = silk.lib.get_pip_cmd(env.config) 
-    sudo('%s/bin/%s -r %s' % (env.envdir, pip_cmd, tmp_remote))
-         
-    # Clean up remote reqs file
-    sudo('rm %s' % tmp_remote)
-
-def install_python_deps():
-    """
-    Runs pip install inside the remote virtualenv.  First for silk's dependencies and
-    then for the site's.
-    """
-    for dep_dict in (env.config['silk_deps'], env.config['site_deps']):
-        if dep_dict['python_packages']:
-            _pip_install_deps(dep_dict['python_packages'])
-
-def install_deps():
-    """
-    Wraps the apt deps, virtualenv creation, and python deps functions to ensure
-    that things are done in the right order.
-    """
-    print "INSTALLING DEPENDENCIES"
-    install_apt_deps()
-    if not exists(env.envdir, use_sudo=True):
-        create_virtualenv()
-    install_python_deps()
-
-def build_mx_tools():
-    """
-    Builds and install egenix-mx-tools into virtualenv
-    """
-    #egenix-mx-tools includes the mxdatetime module, which is
-    #a psycopg2 dependency.  Unfortunately it's not packaged in 
-    #a way that pip can install.  So we build it here instead
-    print "INSTALLING MX TOOLS"
-    build_dir = "/tmp/egenix_build"
-    if not exists(build_dir, use_sudo=True):
-        sudo('mkdir -p %s' % build_dir)
-    with cd(build_dir):
-        #download the tools
-        sudo('wget http://downloads.egenix.com/python/egenix-mx-base-3.1.3.tar.gz')
-        #unpack
-        sudo('tar xvf egenix-mx-base-3.1.3.tar.gz')
-    with cd(os.path.join(build_dir, 'egenix-mx-base-3.1.3')):
-        #install into the virtualenv
-        sudo('%s setup.py install' % os.path.join(env.envdir, 'bin', 'python'))
-    sudo('rm -rf %s' % build_dir)
-
-def push_code():    
-    """
-    Pushes site to remote host
-    """
-    print "PUSHING CODE TO HOST"
-    if exists(env.workdir):
-        sudo('rm %s -rf' % env.workdir)
-    _put_dir(env.local_root, env.workdir)
-
-def _upload_config_template(template, dest, context):
-    #first try to load the template from the local cfg_templates dir.
-    #if it's not there then try loading from pkg_resources
+def _write_template(template, dest, context):
     path = silk.lib.get_template_path(template, env.local_root)
     upload_template(path, dest, context=context, use_sudo=True)
 
+def _ensure_dir(remote_path):
+    if not exists(remote_path, use_sudo=True):
+        sudo('mkdir -p %s' % remote_path)
+
+def _format_supervisord_env(env_dict):
+    """Takes a dictionary and returns a string in form
+    'key=val,key2=val2'"""
+    try:
+      return ','.join(['%s="%s"' % (key, env_dict[key]) for key in env_dict.keys()])
+    except AttributeError:
+      #env_dict isn't a dictionary, so they must not have included any env vars for us.
+      return ''
+
+def _green(text):
+    print green(text)
+
+def _red(text):
+    print red(text)
+
+def _yellow(text):
+    print yellow(text)
+
+def _list_dir(dirname):
+    """Given the path for a directory on the remote host, return its contents
+    as a python list."""
+    txt = sudo('ls -1 %s' % dirname)
+    return txt.split('\r\n')
+
+def _is_supervisored(site):
+    """Returns True if site 'site' has a supervisord.conf entry, else False."""
+    # Check both the default include location and the silk <=0.2.9 location.
+    old_conf_file = _join(SRV_ROOT, site, 'conf', 'supervisord.conf')
+    new_conf_file = _join('/etc/supervisor/conf.d', '%s.conf' % site)
+    return exists(new_conf_file) or exists(old_conf_file)
+
+def _socket_head_request(path):
+    """Given a path to a socket, make an HTTP HEAD request on it"""
+    # Get the local path for the unix socket http tool
+    script = pkg_resources.resource_filename('silk', 'sock_http.py')
+    # copy it over to the host
+    dest = _tmpfile()
+    put(script, dest, use_sudo=True)
+    # run it, passing in the path to the socket
+    return sudo('python %s %s HEAD / %s' % (dest, path,
+                                            env.config['listen_hosts'][0]))
+
+def _port_head_request(port):
+    """Given a port number, use curl to make an http HEAD request to it.
+    Return response status as integer."""
+    return run('curl -I http://localhost:%s' % port)
+
+def returns_good_status():
+    """Makes an HTTP request to '/' on the site and returns True if it gets
+    back a status in the 200, 300, or 400 ranges.
+
+    You can only use this as a standalone silk command if your site has a
+    hard-configured 'bind'."""
+    _yellow("Making http request to site to ensure upness.")
+    bind = env.config['bind']
+    if bind.startswith('unix:'):
+        result = _socket_head_request(bind.replace('unix:', ''))
+    else:
+        result = _port_head_request(bind.split(':')[-1])
+    first_line = result.split('\r\n')[0]
+    status = int(first_line.split()[1])
+    return 200 <= status < 500
+
+def _is_running(procname, tries=3, wait=2):
+    """Given the name of a supervisord process, tell you whether it's running
+    or not.  If status is 'starting', will wait until status has settled."""
+    # Status return from supervisorctl will look something like this::
+    # mysite_20110623_162319 RUNNING    pid 628, uptime 0:34:13
+    # mysite_20110623_231206 FATAL      Exited too quickly (process log may have details)
+
+    status_parts = sudo('supervisorctl status %s' % procname).split()
+    if status_parts[1] == 'RUNNING':
+        # For super extra credit, we're actually going to make an HTTP request
+        # to the site to verify that it's up and running.  Unfortunately we
+        # can only do that if Gunicorn is binding to a different socket or port
+        # on each deployment (Silk binds to a new socket on each deployment by
+        # default.)  If you've configured your site to bind to a port, we'll
+        # just have to wing it.
+        if env.config['bind'] == env.default_bind:
+            if returns_good_status():
+                _green("You're golden!")
+                return True
+            else:
+                _red(":(")
+                return False
+        else:
+            return True
+    elif status_parts[1] == "FATAL":
+        return False
+    elif tries > 0:
+        # It's neither running nor dead yet, so try again
+        _yellow("Waiting %s seconds for process to settle" % wait)
+        time.sleep(wait)
+
+        # decrement the tries and double the wait time for the next check.
+        return _is_running(procname, tries - 1, wait * 2)
+    else:
+        return False
+
+def _is_this_site(name):
+    """Return True if 'name' matches our site name (old style Silk deployment
+    naming' or matches our name + timestamp pattern."""
+
+    site_pattern = re.compile('%s_\d{8}_\d{6}' % env.site)
+    return (name == env.site) or (re.match(site_pattern, name) is not None)
+
+def install_server_deps():
+    """
+    Installs nginx and supervisord on remote Ubuntu host.
+    """
+    sudo('apt-get install nginx supervisor --assume-yes --quiet --no-upgrade')
+
+def push_code():
+    _green("PUSHING CODE")
+    # Push the local site to the remote root, excluding files that we don't
+    # want to leave cluttering the production server.  Exclude site.yaml
+    # because we'll be writing a new one containing the site config updated
+    # with the role config.  Omit roles because they're superfluous at that
+    # point and also may contain sensitive connection credentials.
+    exclude = "--exclude=site.yaml --exclude=roles"
+    _put_dir(env.local_root, env.root, exclude)
+
+def create_virtualenv():
+    """Create a virtualenv inside the remote root"""
+    if 'runtime' in env.config:
+        pyversion = '--python=%s' % env.config['runtime']
+    else:
+        pyversion = ''
+
+    # Put all the prereq packages into /srv/pip_cache on the remote host, if
+    # they're not already there.
+    local_dir = pkg_resources.resource_filename('silk', 'prereqs')
+    files = pkg_resources.resource_listdir('silk', 'prereqs')
+
+    for f in files:
+        remote = posixpath.join('/tmp', f)
+        local = posixpath.join(local_dir, f)
+        if not exists(remote):
+            put(local, remote, use_sudo=True)
+
+
+    tmpl_vars = vars()
+    tmpl_vars.update(env)
+
+    c = ("virtualenv --no-site-packages %(pyversion)s --extra-search-dir=/tmp "
+         "--never-download %(envdir)s") % tmpl_vars
+    sudo(c)
+
+def pip_deps():
+    """Install requirements listed in the site's requirements.txt file."""
+
+    _green("INSTALLING PYTHON DEPENDENCIES")
+    reqs_file = os.path.join(env.root, 'requirements.txt')
+    pypi = env.config.get('pypi', 'http://pypi.python.org/pypi')
+    cachedir = posixpath.join(SRV_ROOT, 'pip_cache')
+    _ensure_dir(cachedir)
+    sudo('PIP_DOWNLOAD_CACHE="%s" %s/bin/pip install -r %s -i %s ' %
+         (cachedir, env.envdir, reqs_file, pypi))
+
+def configure_supervisor():
+    """
+    Creates and upload config file for supervisord
+    """
+    _green("WRITING SUPERVISOR CONFIG")
+
+    template_vars = {
+        'cmd': silk.lib.get_gunicorn_cmd(env, bin_dir='%s/bin' % (env.envdir)),
+        'process_env': _format_supervisord_env(env.config['env']),
+        'srv_root': SRV_ROOT,
+    }
+
+    template_vars.update(env)
+    template_vars.update(env.config)
+    #make sure the logs dir is created
+    _ensure_dir(_join(env.root, 'logs'))
+
+    # Put supervisord include in default location
+    _write_template('supervisord.conf', env.supd_conf_file, template_vars)
+
+def fix_supd_config_bug():
+    """Fixes a bug from an earlier version of Silk that wrote an invalid line
+    to the master supervisord.conf"""
+    # Silk 0.2.9 and earlier included a command to configure supervisord and
+    # nginx to include files in /srv/<site>/conf, in addition to their default
+    # include directories.  While this was valid for nginx, supervisord does
+    # not allow for multiple "files" lines in its "include" section (it does
+    # allow for multiple globs on the single "files" line, though.  This command
+    # finds the offending pattern in /etc/supervisor/supervisord.conf and
+    # replaces it with the correct equivalent.
+
+    # Note that Silk 0.3.0 and later does not require any changes from the
+    # default supervisord.conf that ships with Ubuntu.  All files are included
+    # in the supervisord's conf.d directory.
+
+    file = '/etc/supervisor/supervisord.conf'
+
+    if contains(file, "files = /srv/\*/conf/supervisord.conf", use_sudo=True):
+        _green("FIXING OLD SUPERVISOR CONFIG BUG")
+        _yellow("See http://bits.btubbs.com/silk-deployment/issue/15/incorrect-supervisord-include-config-in")
+
+        bad = "\r\n".join([
+            "files = /etc/supervisor/conf.d/*.conf",
+            "files = /srv/*/conf/supervisord.conf"
+        ])
+
+        good = ("files = /etc/supervisor/conf.d/*.conf "
+                "/srv/*/conf/supervisord.conf\n")
+
+        txt = sudo('cat %s' % file)
+
+        if bad in txt:
+            txt = txt.replace(bad, good)
+            _write_file(file, txt, use_sudo=True, chown='root')
+
+def cleanup():
+    """Deletes old versions of the site that are still sitting around."""
+    _green("CLEANING UP")
+
+    folders = _list_dir(SRV_ROOT)
+    rollbacks = [x for x in folders if _is_this_site(x)]
+
+    if len(rollbacks) > env.rollback_cap:
+        # There are more rollbacks than we want to keep around.  See if we can
+        # delete some.
+        suspects = rollbacks[:-(env.rollback_cap + 1)]
+        for folder in suspects:
+            if not _is_supervisored(folder):
+                fullpath = _join(SRV_ROOT, folder)
+                sudo('rm -rf %s' % fullpath)
+
+    # Clean up old socket files in /tmp/ that have no associated site
+    # TODO: use our own list dir function and a regular expression to filter
+    # the list of /tmp sockets instead of this funky grepping.
+    with cd('/tmp'):
+        socks = run('ls -1 | grep %s | grep sock | cat -' % env.site).split('\r\n')
+    for sock in socks:
+        procname = sock.replace('.sock', '')
+        if not exists(_join(SRV_ROOT, procname)):
+            sudo('rm /tmp/%s' % sock)
+
+    # TODO: clean out the pip-* folders that can build up in /tmp
+    # TODO: figure out a way to clean out pybundle files in /srv/_silk_build
+    # that aren't needed anymore.
+
+def start_process():
+    """Tell supervisord to read the new config, then start the new process."""
+    _green('STARTING PROCESS')
+    result = sudo('supervisorctl reread')
+
+    sudo('supervisorctl add %s' % env.deployment)
+
 def _get_nginx_static_snippet(url_path, local_path):
     return """
     location %(url_path)s {
     }
     """ % locals()
 
-def _write_templates(template_list, template_vars):
-    for pair in template_list:
-        src, dest = pair
-        _upload_config_template(
-            src,
-            dest,
-            context = template_vars
-        )
-
-def _ensure_dir(remote_path):
-    if not exists(remote_path, use_sudo=True):
-        sudo('mkdir %s' % remote_path)
-
-def _format_supervisord_env(env_dict):
-    """Takes a dictionary and returns a string in form
-    'key=val,key2=val2'"""
-    try:
-      return ','.join(['%s="%s"' % (key, env_dict[key]) for key in env_dict.keys()])
-    except AttributeError:
-      #env_dict isn't a dictionary, so they must not have included any env vars for us.
-      #return empty string
-      return ''
-
-def write_config():
-    """
-    Creates and upload config files for nginx, supervisord, and blame.yaml
-    """
-    print "WRITING CONFIG"
+def configure_nginx():
+    """Writes a new nginx config include pointing at the newly-deployed site."""
+    _green("WRITING NGINX CONFIG")
     nginx_static = ''
     static_dirs = env.config.get('static_dirs', None)
+
+    # Use the static_dirs values from the site config to set up static file
+    # serving in nginx.
     if static_dirs:
       for item in static_dirs:
           nginx_static += _get_nginx_static_snippet(
               item['url_path'],
-              #system_path may be a full path, or relative to remote_root
-              posixpath.join(env.remote_root, item['system_path'])
+              _join(env.root, item['system_path'])
           )
     template_vars = {
-        'cmd': silk.lib.get_gunicorn_cmd(env.config, bin_dir='%s/bin' % (env.envdir)),
         'nginx_static': nginx_static,
         'nginx_hosts': ' '.join(env.config['listen_hosts']),
-        'process_env': _format_supervisord_env(env.config['env']),
-        'srv_root': SRV_ROOT,
     }
     template_vars.update(env)
     template_vars.update(env.config)
-    config_dir = '/'.join([env.workdir, 'conf'])
-    #make sure the conf and logs dirs are created
-    _ensure_dir(config_dir)
-    _ensure_dir('/'.join([env.workdir, 'logs']))
 
-    template_list = (
-        ('supervisord.conf','/'.join([config_dir, 'supervisord.conf'])),
-        ('nginx.conf','/'.join([config_dir, 'nginx.conf'])),
-    )
-    _write_templates(template_list, template_vars)
-    _write_blame()
+    # Create nginx include here:
+    # /etc/nginx/sites-enabled/<sitename>.conf
+    nginx_file = _join('/etc', 'nginx', 'sites-enabled', env.site)
+    sudo('rm -f %s' % nginx_file)
+    _write_template('nginx.conf', nginx_file, template_vars)
 
-def switch():
-    """
-    Does a little dance to move the old project dir out of the way and put
-    the new one in its place.
-    """
-    print "MOVING NEW CODE INTO PLACE"
-    #copy the virtualenv into env.workdir
-    sudo('cp %s %s/ -r' % (env.envdir, env.workdir))
-    #move old env.remote_root
-    olddir = '/'.join(['/tmp', 'old-%s' % env.site])
-    sudo('mv %s %s' % (env.remote_root, olddir))
-    #move code into place
-    sudo('mv %s %s' % (env.workdir, env.remote_root))
+def switch_nginx():
+    _green("LOADING NEW NGINX CONFIG")
 
-def cleanup():
-    """
-    Removes the old project dir.  (But you still have a rollback!)
-    """
-    print "CLEANING UP"
-    #do this last to minimize time between taking down old site and setting up new one
-    #since "mv" is faster than "cp -r" or "rm -rf"
-    olddir = '/'.join(['/tmp', 'old-%s' % env.site])
-    sudo('rm %s -rf' % olddir)
+    # Check if there is an old-style version (within the site root) of the
+    # nginx config laying around, and rename it to something innocuous if so.
+    old_nginx = _join(SRV_ROOT, env.site, 'conf', 'nginx.conf')
+    if exists(old_nginx):
+        sudo('mv %s %s' % (old_nginx, "%s_disabled" % old_nginx))
 
-def server_setup():
-    """
-    Installs nginx and supervisord on remote host.  Sets up nginx and
-    supervisord global config files.
-    """
-    install_apt_deps()
-    template_list = (
-        ('supervisord_root.conf', '/etc/supervisor/supervisord.conf'),
-        ('nginx_root.conf','/etc/nginx/nginx.conf'),
-    )
-    _write_templates(template_list, env)
-    restart()
+    # Tell nginx to rescan its config files
+    sudo('/etc/init.d/nginx reload')
+
+def stop_other_versions():
+    """Stop other versions of the site that are still running, and disable their
+    configs."""
+    proclist = sudo('supervisorctl status').split('\r\n')
+
+    # parse each line so we can get at just the proc names
+    proclist = [x.split() for x in proclist]
+
+    # filter proclist to include only versions of our site
+    proclist = [x for x in proclist if _is_this_site(x[0])]
+
+    live_statuses = ["RUNNING", "STARTING"]
+
+    # stop each process left in proclist that isn't the current one
+    for proc in proclist:
+        # We assume that spaces are not allowed in proc names
+        procname = proc[0]
+        procstatus = proc[1]
+        if procname != env.deployment:
+            # Stop the process
+            if procstatus in live_statuses:
+                sudo('supervisorctl stop %s' % procname)
+
+            # Remove it from live config
+            sudo('supervisorctl remove %s' % procname)
+
+            # Remove its supervisord config file
+            conf_file = '/etc/supervisor/conf.d/%s.conf' % procname
+            if exists(conf_file):
+                sudo('rm %s' % conf_file)
+
+            # Also remove old style supervisord include if it exists
+            old_conf_file = _join(SRV_ROOT, procname, 'conf/supervisord.conf')
+            if exists(old_conf_file):
+                sudo('rm %s' % old_conf_file)
+
+    sudo('supervisorctl reread')
+
+def congrats():
+    """Congratulate the user and print a link to the site."""
+    link0 = "http://%s" % env.config['listen_hosts'][0]
+    msg = ("SUCCESS!  I think.  Check that the site is running by browsing "
+           "to this url:\n\n%s" % link0)
+    _green(msg)
 
 def push():
     """
-    The main function.  Assuming you have nginx and supervisord installed and
-    configured, this function will put your site on the remote host and get it
-    running.
+    The main function.  This function will put your site on the remote host and get it
+    rTHON DEPENDENCIES.
     """
-    archive()
-    install_deps()
+    # Make sure nginx and supervisord are installed
+    install_server_deps()
+
+    # Fix an embarrassing config bug from earlier versions
+    fix_supd_config_bug()
+
+    # push site code and pybundle to server, in timestamped folder
     push_code()
-    write_config()
-    switch()
-    reload()
-    cleanup()
+    # make virtualenv on the server and run pip install on the pybundle
+    create_virtualenv()
+    pip_deps()
+    _write_site_yaml()
+    # write supervisord config for the new site
+    configure_supervisor()
 
-def dump_config():
-    """
-    Just loads up the config and prints it out. For testing.
-    """
-    print env.config
+    ##then the magic
+
+    ##silk starts up supervisord on the new site
+    start_process()
+    # checks that the new site is running (by using supervisorctl)
+    if _is_running(env.deployment):
+        _green("Site is running.  Proceeding with nginx switch.")
+        # if the site's running fine, then silk configures nginx to forward requests
+        # to the new site
+        configure_nginx()
+        switch_nginx()
+        stop_other_versions()
+        cleanup()
+        congrats()
+    else:
+        _red("Process failed to start cleanly.  Off to the log files!")
+        sys.exit(1)
-import yaml
 import copy
 import os
+import pkg_resources
+import subprocess
 import sys
-import pkg_resources
+import time
+import shutil
+import hashlib
+from signal import SIGTERM, SIGINT
 
-def get_gunicorn_cmd(site_config, bin_dir=''):
-    gconfig = copy.copy(site_config['gunicorn'])
+import app_container
+
+from app_container import *
+
+GUNICORN_DEFAULTS = {
+    'workers': 1,
+    'log-level': 'info',
+    'name': 'gunicorn',
+    'debug': 'false',
+}
+
+# Format for the 'bind' string we'll be making later, sticking the site name +
+# timestamp in the wildcard part.  Used here for generating the gunicorn cmd,
+# and in the fabfile for plugging into the nginx config.
+GUNICORN_BIND_PATTERN = 'unix:/tmp/%s.sock'
+
+def get_gunicorn_cmd(site_env, bin_dir=''):
+    """Given a copy of Fabric's state in site_env, configure and return the
+    gunicorn cmd line needed to run the site"""
+    site_config = site_env['config']
+    gconfig = copy.copy(GUNICORN_DEFAULTS)
+    gconfig = site_config.get('gunicorn', GUNICORN_DEFAULTS)
 
     # Default to using a unix socket for nginx->gunicorn
-    gconfig['bind'] = gconfig.get('bind', 
-                                  'unix:/tmp/%s.sock' % site_config['site'])
-    
-    # Default to using the site name in the procname
-    gconfig['name'] = gconfig.get('name', site_config['site'])
+    if 'deployment' in site_env:
+        default_bind = GUNICORN_BIND_PATTERN % site_env.deployment
+        # Default to using the site name and timestamp in the procname
+        gconfig['name'] = site_env['deployment']
+    else:
+        default_bind = 'localhost:8000'
+        gconfig['name'] = site_config['site']
+
+    gconfig['bind'] = gconfig.get('bind', default_bind)
+
 
     debug = gconfig.pop('debug', None)
     options = ' '.join(['--%s %s' % (x, y) for x, y in gconfig.iteritems()])
         cmd = '%s/%s' % (bin_dir, cmd)
     return cmd
 
-def get_site_root(start_dir):
+def get_root(start_dir):
     testfile = os.path.join(start_dir, 'site.yaml')
     if os.path.isfile(testfile):
         return start_dir
     else:
         parent_dir = os.path.split(start_dir)[0]
         if parent_dir != start_dir:
-            return get_site_root(parent_dir)
+            return get_root(parent_dir)
         else:
             return None
 
-def get_role_list(local_root):
-    """Return a list of the role names defined by yaml roles/*.yaml"""
-    return [file[:-5] for file in os.listdir(os.path.join(local_root, 'roles')) if file.endswith('.yaml')]
-
-def get_role_config(role):
-    role_file = '%s/roles/%s.yaml' % (get_site_root(os.getcwd()), role)
-    config =  yaml.safe_load(open(role_file, 'r').read())
-    return config
-    #TODO: support pulling role info from a web page
-
-def get_site_config(site_root):
-    """Parses and returns site.yaml"""
-    site_config_file = os.path.join(site_root, 'site.yaml')
-    config = yaml.safe_load(open(site_config_file, 'r').read())
-    return config
-
-def get_blame(site_root):
-    """Parses and returns blame.yaml in deployed site"""
-    blame_file = os.path.join(site_root, 'blame.yaml')
-    blame = yaml.safe_load(open(blame_file, 'r').read())
-    return blame
-
-def get_config(site_root=None, role=None):
-    """
-    Returns merged site and role config.  Tries hard to come up with
-    something if you don't pass in a role or site name.
-    """
-
-    # If no site_root given, then look above os.getcwd()
-    if site_root is None:
-        site_root = get_site_root(os.getcwd())
-
-    # If role is None, then try getting it from cmd line and/or env vars ourselves
-    role = role or get_role() 
-
-    # If role is still none, then look for a blame file, which doesn't require a role. 
-    if role is None:
-        try:
-            return get_blame(site_root)[1]['config']
-        except IOError:
-            # If no role and no blame file, give up.
-            return get_site_config(site_root)
-
-    config = get_site_config(site_root)
-    config.update(get_role_config(role))
-    return config
-
-def get_template_path(template, site_root=None):
+def get_template_path(template, root=None):
     """
     Returns path of template from site conf_templates dir, if found there, else
     returns template path from silk's conf_templates dir.
     """
-    if site_root:
-        localpath=os.path.join(site_root, 'conf_templates', template)
+    if root:
+        localpath=os.path.join(root, 'conf_templates', template)
         if os.path.isfile(localpath):
             return localpath
     pkgpath=pkg_resources.resource_filename('silk', 'conf_templates/%s' % template)
     txt = open(template_path, 'r').read()
     return txt % context
 
-def get_role():
+def _run(args, kill_signal, cwd=os.getcwd(), env={}):
+    env.update(os.environ)
+    proc = subprocess.Popen(args, cwd=cwd, env=env)
     try:
-        #if '-R rolename' found in sys.argv, use that
-        return sys.argv[sys.argv.index('-R')+1]
-    except:
-        #role not found in sys.argv, try env var
-        #return None if no role there either
-        return os.environ.get('SILK_ROLE', None)
+        proc.wait()
+    except KeyboardInterrupt as e:
+        print "KeyboardInterrupt"
+        proc.send_signal(kill_signal)
+    except Exception as e:
+        print e
+        proc.send_signal(kill_signal)
 
-def get_pip_cmd(site_config):
-    pypi = site_config.get('pypi', 'http://pypi.python.org/simple/')
-    return 'pip install -i %s' % pypi
+def run_fab(args):
+    args[0] = 'fab'
+    _run(args, SIGTERM)
+
+def run_devserver():
+    # Overwrite the wsgi_app config var to point to our internal app that will
+    # also mount the static dirs.
+    root = os.environ['SILK_ROOT']
+    role = os.environ['SILK_ROLE']
+    config = app_container.get_config(root, role)
+    config['wsgi_app'] = 'silk.devserver:app'
+
+    cmd = get_gunicorn_cmd({'config': config})
+
+    subproc_env = {
+        'SILK_ROOT': root,
+        'SILK_ROLE': app_container.get_role(),
+    }
+
+    # By adding our current subproc_environment to that used for the subprocess, we
+    # ensure that the same paths will be used (such as those set by virtualenv)
+    subproc_env.update(os.environ)
+
+    _run(cmd.split(), SIGINT, cwd=root, env=subproc_env)
+
+    # This 1 second sleep lets the gunicorn workers exit before we show the
+    # prompt again.
+    time.sleep(1)
+
+def install_skel(sitename):
+    """Copies the contents of site_templates into the named directory (within cwd)"""
+    root = os.environ['SILK_ROOT']
+    #get the dir from pkg_resources
+    src = pkg_resources.resource_filename('silk', 'site_templates')
+    try:
+        shutil.copytree(src, os.path.join(os.getcwd(), sitename))
+    except OSError, e:
+        print e
+
+def get_local_archive_dir():
+    return os.path.join(os.path.expanduser('~'), '.silk')
+
+def get_pybundle_name(reqs):
+    """Hash the requirements list to create a pybundle name."""
+    # Strip leading and trailing whitespace
+    reqs = reqs.strip()
+
+    # put the lines in order to ensure consistent hashing
+    lines = reqs.split()
+    lines.sort()
+    reqs = '\n'.join(lines)
+
+    hash = hashlib.md5(reqs).hexdigest()
+    return "%s.pybundle" % hash
+
+def get_pybundle_path(reqs):
+    """Return the name of the pybundle file that corresponds to the passed-in
+    requirements text."""
+    return os.path.join(get_local_archive_dir(), get_pybundle_name(reqs))
+
+cmd_map = {
+    'run': run_devserver,
+    'skel': install_skel,
+}
+
+def cmd_dispatcher():
+    """wraps 'fab', handles 'silk run'"""
+    args = sys.argv
+    try:
+        cmd = args[1]
+
+        # If a command is provided by cmd_map, use that.  Else pass through to
+        # fabric.
+        if cmd in cmd_map:
+            # Stick some information about the role and root into the current env,
+            # then call the local function in cmd_map.
+            os.environ['SILK_ROLE'] = app_container.get_role() or ''
+            os.environ['SILK_ROOT'] = app_container.get_site_root(os.getcwd()) or ''
+            cmd_map[cmd]()
+        else:
+            # Use project-provided fabfile if present, else the one built into
+            # Silk.  We'll have to trust that the project file imports ours.
+            root = get_root(os.getcwd())
+            site_fab = os.path.join(root, 'fabfile.py')
+            if os.path.isfile(site_fab):
+                fabfile = site_fab
+            else:
+                fabfile = pkg_resources.resource_filename('silk', 'fabfile.py')
+            args.extend(['--fabfile', fabfile])
+            run_fab(args)
+
+    except IndexError:
+        # Print out help text.  Currently just prints it for the cmds specified
+        # in the fabfile, which isn't great because it omits things like 'silk
+        # run' and 'silk deps'.  Would be better to inspect the fabfile and
+        # list the cmds/docstrings myself, right along the non-fabfile cmds
+        # that Silk provides.  Or I could just make all the things I provide as
+        # fab cmds.  That might be the simpler approach.
+        run_fab(['fab', '-l'])
Add a comment to this file

silk/prereqs/distribute-0.6.21.tar.gz

Binary file added.

Add a comment to this file

silk/prereqs/pip-1.0.2.tar.gz

Binary file added.

silk/sock_http.py

+#!/usr/bin/env python
+
+import sys
+import socket
+
+REQUEST_TEMPLATE = ('%(method)s %(path)s HTTP/1.1\r\n'
+                    'Host: %(host)s\r\n\r\n')
+
+SUPPORTED_METHODS = ('HEAD', 'GET')
+
+def sockhttp(sockpath, method, path, host):
+    """Make an HTTP request over a unix socket."""
+    req = REQUEST_TEMPLATE % locals()
+    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    s.connect(sockpath)
+    s.send(req)
+    out = ''
+    while 1:
+        data = s.recv(1024)
+        out += data
+        if not data: break
+    s.close()
+    return out
+
+def usage():
+    """Print usage information for this program"""
+    print ("This program allows you to make http requests to unix sockets. "
+           "Usage:\n\n"
+           "python %s /path/to/socket METHOD request_path host_name\n" %
+           __file__)
+
+    print "Supported methods are: %s" % ", ".join(SUPPORTED_METHODS)
+
+if __name__ == '__main__':
+    try:
+        _, sockpath, method, path, host = sys.argv
+    except ValueError:
+        usage()
+        sys.exit(1)
+    print sockhttp(sockpath, method, path, host)
+

silk/utils.py

-#!/usr/bin/env python
-import subprocess
-import sys
-import time
-import os
-import pkg_resources
-import shutil
-import tempfile
-from signal import SIGTERM, SIGINT
-
-import yaml
-
-import silk.lib
-
-def run_til_you_die(args, kill_signal, cwd=os.getcwd(), env={}):
-    env.update(os.environ)
-    proc = subprocess.Popen(args, cwd=cwd, env=env)
-    try:
-        proc.wait()
-    except KeyboardInterrupt as e:
-        print "KeyboardInterrupt"
-        proc.send_signal(kill_signal)
-    except Exception as e:
-        print e
-        proc.send_signal(kill_signal)
-
-def run_fab(args):
-    args[0] = 'fab'
-    run_til_you_die(args, SIGTERM)
-
-def run_devserver(config, root):
-    # Overwrite the wsgi_app config var to point to our internal app that will
-    # also mount the static dirs.
-    config['wsgi_app'] = 'silk.devserver:app'
-    
-    cmd = silk.lib.get_gunicorn_cmd(config)
-    
-    subproc_env = {
-        'SILK_ROOT': root,
-        'SILK_ROLE': silk.lib.get_role(),
-    }
-
-    # By adding our current subproc_environment to that used for the subprocess, we
-    # ensure that the same paths will be used (such as those set by virtualsubproc_env)
-    subproc_env.update(os.environ)
-
-    run_til_you_die(cmd.split(), SIGINT, cwd=root, env=subproc_env)
-
-    # This 1 second sleep lets the gunicorn workers exit before we show the
-    # prompt again.
-    time.sleep(1)
-
-def install_skel(sitename):
-    """Copies the contents of site_templates into the named directory (within cwd)"""
-    #get the dir from pkg_resources
-    src = pkg_resources.resource_filename('silk', 'site_templates')
-    try:
-        shutil.copytree(src, os.path.join(os.getcwd(), sitename))
-    except OSError, e:
-        print e
-    
-def freeze_2_yaml():
-    """Read lines of text from stdin and print a python_packages yaml list"""
-    lines = sys.stdin.read().split('\n')#split only on newlines, not spaces
-    lines = [line for line in lines if line]#remove empty lines
-    print yaml.safe_dump({'python_packages':lines}, default_flow_style=False)
-
-def yaml_2_freeze():
-    """Read lines of deps.yaml from stdin and print requirements.txt contents"""
-    txt = sys.stdin.read()
-    deps = yaml.safe_load(txt)
-    print '\n'.join(deps['python_packages'])
-
-def local_python_deps(config, root):
-    """Write a requirements.txt from deps.yaml file for pip, then run pip on it."""
-    depfile = os.path.join(silk.lib.get_site_root(os.getcwd()), 'deps.yaml')
-    txt = open(depfile).read()
-    deps = yaml.safe_load(txt)
-    reqs = '\n'.join(deps['python_packages'])
-
-    tmpfile = tempfile.NamedTemporaryFile()
-    tmpfile.write(reqs)
-    tmpfile.seek(0)
-
-    pip_cmd = silk.lib.get_pip_cmd(config) 
-    cmd = '%s -r %s' % (pip_cmd, tmpfile.name)
-    run_til_you_die(cmd.split(), SIGTERM)
-    tmpfile.close()
-
-cmd_map = {
-    'run': run_devserver,
-    'skel': install_skel,
-    'deps': local_python_deps
-}
-
-config_required = (
-    run_devserver,
-    local_python_deps,
-)
-
-def cmd_dispatcher():
-    """wraps 'fab', handles 'silk run'"""
-    args = sys.argv
-    try:
-        cmd = args[1]
-    except IndexError:
-        run_fab(*args)
-
-    if cmd in cmd_map:
-        cmd_func = cmd_map[cmd]
-        if cmd_func in config_required:
-            role = silk.lib.get_role()
-            root = silk.lib.get_site_root(os.getcwd())
-            config = silk.lib.get_config(root, role)
-            cmd_func(config, root)
-        else:
-            cmd_func(*args[2:])
-    else:
-        run_fab(args)
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.