Source

timestamp / rebranch.py

# rebranch.py - node recoloring similar to global tags
#
# Copyright 2012 Friedrich Kastner-Masilko <face@snoopie.at>
#
# This software may be used and distributed according to the terms
# of the GNU General Public License, incorporated herein by reference.
#
'''node recoloring similar to global tags

This extension basically lets you rename named branches as well as create new named branches after commits already happened.
'''

from mercurial import cmdutil, localrepo, context, encoding, util, error, errno
from mercurial.node import nullid, bin, hex, short
from mercurial.i18n import _
propertycache = util.propertycache

cmdtable = {}
command = cmdutil.command(cmdtable)

#def override_pull(orig, ui, repo, source=None, **opts):
#    result = orig(ui, repo, source, **opts)
#    pull(ui, databasepath(ui), readconfig(ui)[0])
#    return result

def findglobalrebranches(ui, repo, allrebranches):
    '''Find global rebranches in repo by reading .hgrebranch from every
    head that has a distinct version of it, using a cache to avoid excess
    work.
    Updates the list allrebranches in place: allrebranches maps a
    (node, node) pair to a branch name (see _readrules() below).'''
    # This is so we can be lazy and assume allbranches contains only global
    # rules when we pass it to _writerulescache().
    assert len(allrebranches) == 0, \
           "findglobalrebranches() should be called first"

    (heads, rulefnode, cacherules, shouldwrite) = _readrulescache(ui, repo)
    if cacherules is not None:
        assert not shouldwrite
        allrebranches.extend(cacherules)
        return

    seen = set()                    # set of fnode
    fctx = None
    for head in reversed(heads):        # oldest to newest
        assert head in repo.changelog.nodemap, \
               "rule cache returned bogus head %s" % short(head)

        fnode = rulefnode.get(head)
        if fnode and fnode not in seen:
            seen.add(fnode)
            if not fctx:
                fctx = repo.filectx('.hgrebranch', fileid=fnode)
            else:
                fctx = fctx.filectx(fnode)

            allrebranches.extend(_readrules(ui, repo, fctx.data().splitlines(), fctx))
            
    # and update the cache (if necessary)
    if shouldwrite:
        _writerulescache(ui, repo, heads, rulefnode, allrebranches)

def readlocalrebranches(ui, repo, allrebranches):
    '''Read local rebranches in repo.  Update allrebranches.'''
    try:
        data = repo.opener.read("localrebranch")
    except IOError, inst:
        if inst.errno != errno.ENOENT:
            raise
        return

    # localrebranch is in the local encoding; re-encode to UTF-8 on
    # input for consistency with the rest of this module.
    allrebranches.extend(_readrules(
        ui, repo, data.splitlines(), "localrebranch",
        recode=encoding.fromlocal))

def _readrules(ui, repo, lines, fn, recode=None):
    '''Read rebranch definitions from a file (or any source of lines).
    Return a mapping from (node, node) to branch name: nodes are the range
    specifiers for a new branch name.  All node ids are binary, not hex.'''

    filerules = []               # map (node, node) to branch name
    count = 0

    def warn(msg):
        ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))

    for line in lines:
        count += 1
        if not line:
            continue
        try:
            (nodehex1, nodehex2, name) = line.split(" ", 2)
        except ValueError:
            warn(_("cannot parse entry"))
            continue
        name = name.strip()
        if recode:
            name = recode(name)
        try:
            nodebin1 = bin(nodehex1)            
        except TypeError:
            warn(_("start node '%s' is not well formed") % nodehex1)
            continue
        try:
            nodebin2 = bin(nodehex2)            
        except TypeError:
            warn(_("end node '%s' is not well formed") % nodehex2)
            continue

        # update filerules
        pair = (nodebin1, nodebin2)
        filerules.append(((nodebin1, nodebin2), name))
    return filerules

# The rebranch cache only stores info about heads, not the rebranch
# contents from each head.  I.e. it doesn't try to squeeze out the
# maximum performance, but is simpler has a better chance of actually
# working correctly.  And this gives the biggest performance win: it
# avoids looking up .hgrebranch in the manifest for every head, and it
# can avoid calling heads() at all if there have been no changes to
# the repo.

def _readrulescache(ui, repo):
    '''Read the rebranch cache and return a tuple (heads, fnodes, cacherules,
    shouldwrite).  If the cache is completely up-to-date, cacherules is a
    dict of the form returned by _readrules(); otherwise, it is None and
    heads and fnodes are set.  In that case, heads is the list of all
    heads currently in the repository (ordered from tip to oldest) and
    fnodes is a mapping from head to .hgrebranch filenode.  If those two are
    set, caller is responsible for reading rebranch info from each head.'''

    try:
        cachefile = repo.opener('cache/rebranch', 'r')
        # force reading the file for static-http
        cachelines = iter(cachefile)
    except IOError:
        cachefile = None

    # The cache file consists of lines like
    #   <headrev> <headnode> [<rulenode>]
    # where <headrev> and <headnode> redundantly identify a repository
    # head from the time the cache was written, and <rulenode> is the
    # filenode of .hgrebranch on that head.  Heads with no .hgrebranch file
    # will have no <rulenode>.  The cache is ordered from tip to oldest
    # (which is part of why <headrev> is there: a quick visual check is all
    # that's required to ensure correct order).
    #
    # This information is enough to let us avoid the most expensive part
    # of finding global rebranches, which is looking up <rulenode> in the
    # manifest for each head.
    cacherevs = []                      # list of headrev
    cacheheads = []                     # list of headnode
    cachefnode = {}                     # map headnode to filenode
    if cachefile:
        try:
            for line in cachelines:
                if line == "\n":
                    break
                line = line.rstrip().split()
                cacherevs.append(int(line[0]))
                headnode = bin(line[1])
                cacheheads.append(headnode)
                if len(line) == 3:
                    fnode = bin(line[2])
                    cachefnode[headnode] = fnode
        except Exception:
            # corruption of the rebranch cache, just recompute it
            ui.warn(_('.hg/cache/rebranch is corrupt, rebuilding it\n'))
            cacheheads = []
            cacherevs = []
            cachefnode = {}

    tipnode = repo.changelog.tip()
    tiprev = len(repo.changelog) - 1

    # Case 1 (common): tip is the same, so nothing has changed.
    # (Unchanged tip trivially means no changesets have been added.
    # But, thanks to localrepository.destroyed(), it also means none
    # have been destroyed by strip or rollback.)
    if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
        rules = _readrules(ui, repo, cachelines, cachefile.name)
        cachefile.close()
        return (None, None, rules, False)
    if cachefile:
        cachefile.close()               # ignore rest of file

    repoheads = repo.heads()
    # Case 2 (uncommon): empty repo; get out quickly and don't bother
    # writing an empty cache.
    if repoheads == [nullid]:
        return ([], {}, {}, False)

    # Case 3 (uncommon): cache file missing or empty.

    # Case 4 (uncommon): tip rev decreased.  This should only happen
    # when we're called from localrepository.destroyed().  Refresh the
    # cache so future invocations will not see disappeared heads in the
    # cache.

    # Case 5 (common): tip has changed, so we've added/replaced heads.

    # As it happens, the code to handle cases 3, 4, 5 is the same.

    # N.B. in case 4 (nodes destroyed), "new head" really means "newly
    # exposed".
    newheads = [head
                for head in repoheads
                if head not in set(cacheheads)]

    # Now we have to lookup the .hgrebranch filenode for every new head.
    # This is the most expensive part of finding rebranches, so performance
    # depends primarily on the size of newheads.  Worst case: no cache
    # file, so newheads == repoheads.
    for head in newheads:
        cctx = repo[head]
        try:
            fnode = cctx.filenode('.hgrebranch')
            cachefnode[head] = fnode
        except error.LookupError:
            # no .hgrebranch file on this head
            pass

    # Caller has to iterate over all heads, but can use the filenodes in
    # cachefnode to get to each .hgrebranch revision quickly.
    return (repoheads, cachefnode, None, True)

def _writerulescache(ui, repo, heads, rulefnode, cacherules):

    try:
        cachefile = repo.opener('cache/rebranch', 'w', atomictemp=True)
    except (OSError, IOError):
        return

    realheads = repo.heads()            # for sanity checks below
    for head in heads:
        # temporary sanity checks; these can probably be removed
        # once this code has been in crew for a few weeks
        assert head in repo.changelog.nodemap, \
               'trying to write non-existent node %s to rebranch cache' % short(head)
        assert head in realheads, \
               'trying to write non-head %s to rebranch cache' % short(head)
        assert head != nullid, \
               'trying to write nullid to rebranch cache'

        # This can't fail because of the first assert above.  When/if we
        # remove that assert, we might want to catch LookupError here
        # and downgrade it to a warning.
        rev = repo.changelog.rev(head)

        fnode = rulefnode.get(head)
        if fnode:
            cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
        else:
            cachefile.write('%d %s\n' % (rev, hex(head)))

    # Branch names in the cache are in UTF-8 -- which is the whole reason
    # we keep them in UTF-8 throughout this module.  If we converted
    # them local encoding on input, we would lose info writing them to
    # the cache.
    cachefile.write('\n')
    for ((node1, node2), name) in cacherules:
        cachefile.write("%s %s %s\n" % (hex(node1), hex(node2), name))
    try:
        cachefile.close()
    except (OSError, IOError):
        pass

@command('^rebranch',
         [('r', 'rev', None, _('revision set by range specification'), _('REV')),
          ('b', 'branch', None, _('revision set by branch name'), _('BRANCH')),
          ('d', 'delete', None, _('deletes specified branch')),
         ],
         _('hg rebranch [-r REV | -b BRANCH | -d] BRANCH'))
def rebranch(ui, repo, newbranch=None, **opts):
    '''rebranches a given revision set to the specified branch name

    If no option is specified, all revisions in the current branch will
    get the new branch name. If -r is given, the revsets determines the
    revisions for the new branch, with -b, the given branch name addresses
    them. With the -d option, all nodes addressed with the branch name will
    get the branch of their ancestors, thus effectively deleting the branch
    name.

    This command will affect the current working copy's branch name, if a
    parent on the same branch is affected by the operation. I.e. if your
    current working copy is "mybranch", and the second parent is "mybranch",
    and you change "mybranch" to "newbranch", the working copy will be also
    marked as "newbranch".

    Like with the tag command, this command commits the changes to the
    .hgrebranch file.
    '''
    return

def uisetup(ui):
    # Install new functions in localrepo class

    def nodebranch(self, node, default):
        '''return the branch name associated with a node'''
        if not self._rebranchescache.nodebranchcache:
            nodebranchcache = {}
            branchcache = []

            c = self.changelog

            for rule, name in self._rebranchescache.rebranches:
                if name not in branchcache:
                    branchcache.append(name)
                bindex=branchcache.index(name)
                start = self[rule[0]].rev()
                visit = [self[rule[1]].rev()]
                reachable = {visit[0]:[]}
                while visit:
                    n = visit.pop(0)
                    if n == start:
                        continue
                    if n < 0:
                        continue
                    for p in c.parentrevs(n):
                        if p < start:
                            continue
                        if p not in reachable:
                            reachable[p]=[n]
                            visit.append(p)
                        else:
                            reachable[p].append(n)
                nodebranchcache[start]=bindex
                if start in reachable:                    
                    visit = reachable[start]
                    while visit:
                        n = visit.pop(0)                        
                        if n in reachable and n not in nodebranchcache:
                            nodebranchcache[n]=bindex
                            visit.extend(reachable[n])
                    
            self._rebranchescache.nodebranchcache = nodebranchcache
            self._rebranchescache.branchcache = branchcache
            
        rev = self[node].rev()
        if rev in self._rebranchescache.nodebranchcache:
            return encoding.tolocal(self._rebranchescache.branchcache[self._rebranchescache.nodebranchcache[rev]])

        if default not in self._rebranchescache.branchcache:
            self._rebranchescache.branchcache.append(default)
        self._rebranchescache.nodebranchcache[rev]=self._rebranchescache.branchcache.index(default)
        return encoding.tolocal(default)
    localrepo.localrepository.nodebranch=nodebranch

    @propertycache
    def _rebranchescache(self):
        '''Returns a rebranchescache object that contains various rebranches related caches.'''

        # This simplifies its cache management by having one decorated
        # function (this one) and the rest simply fetch things from it.
        class rebranchescache(object):
            def __init__(self):
                # These two define the list of rebranches for this repository.
                # rebranches lists rule-to-name tuples;
                self.rebranches = None
                self.nodebranchcache = None
                self.branchcache = None

        cache = rebranchescache()
        cache.rebranches = self._findrebranches()

        return cache
    localrepo.localrepository._rebranchescache=_rebranchescache

    def _findrebranches(self):
        '''Do the hard work of finding rebranches. Return a list of
           rule/name tuples, with rule being a pair of binary nodes'''

        allrebranches = []

        findglobalrebranches(self.ui, self, allrebranches)
        readlocalrebranches(self.ui, self, allrebranches)

        return allrebranches
    localrepo.localrepository._findrebranches=_findrebranches

    # Overwrite changectx.branch(self) to call nodebranch function
    def branch(self):
        return self._repo.nodebranch(self._node, self._changeset[5].get("branch"))
    context.changectx.branch=branch
    #extensions.wrapcommand(commands.table, 'pull', override_pull)
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.