orange-multitask / _multitask /

Full commit
Implementation of the multi-task feature learning algorithm described in

.. [Argyriou_etal_2008] Argyriou, A., Evgeniou, T., Pontil, M. (2008).
   Convex multi-task feature learning. Machine Learning, 73(3), 243-272.

import numpy as np
from numpy import dot, sqrt, array, diag, zeros, eye, ix_
from scipy.spatial.distance import pdist

import Orange
import _multitask as multitask

def get_weights(c):
    if isinstance(c, Orange.classification.svm.SVMClassifier):
        weightsa = Orange.classification.svm.get_linear_svm_weights(c)
        weights = [wd[f] for f in c.domain]
    elif isinstance(c, Orange.classification.svm.LinearClassifier):
        #intercept is the last weight (check by Ales)!
        weights = c.weights[0][:-1]
        assert len(c.domain.features) == len(weights)
        weights = array(c.coefficients)
    return weights

def duplicate(data, groups):
    domain =
        [data.domain.features[i] for g in groups for i in g],
        data.domain.class_var, class_vars=data.domain.class_vars)
    return, data)

def transform_domain(domain, transformation):
    """Construct a new domain from a transformation matrix."""
    features = []
    for i, row in enumerate(transformation):
        f = Orange.feature.Continuous('f%i' % (i + 1))
        def tfun(ins, ret, weights=row):
            ins =, ins)
            return dot(weights, ins.native()[:-1])
        f.get_value_from = tfun
    dom =, domain.class_var)
    return dom

class MTFeatLearner(Orange.regression.base.BaseRegressionLearner):
    """Multi-task feature learning algorithm from [Argyriou_etal_2008]_."""
    def __new__(cls, data=None, weights=0, **kwargs):
        self = Orange.regression.base.BaseRegressionLearner.__new__(
            cls, **kwargs)
        if data:
            return self.__call__(data, weights)
            return self
    def __init__(self, learner=None, selection=False, gamma=1,
                 intercept=False, groups=[], max_iter=50, tol=1e-5,
                 norm_covered=0.99, cb=None, name='MTFeat', **kwargs):
        super(MTFeatLearner, self).__init__()
        self.learner = learner
        self.selection = selection
        self.gamma = gamma
        self.intercept = intercept
        self.groups = groups
        self.max_iter = max_iter
        self.tol = tol
        self.norm_covered = norm_covered
        self.cb = cb if cb else lambda *args, **kwargs: None = name

    def __call__(self, data, weights=0):
        groups = self.groups
        if groups and (sum(len(g) for g in self.groups) >
                       len(set(x for g in self.groups for x in g))):
            data = duplicate(data, groups)
            groups, i = [], 0
            for g in self.groups:
                groups.append(range(i, i + len(g)))
                i += len(g)
        datas = multitask.split_by_task(data)
        tasks = sorted(datas.keys())
        training = [datas[t].to_numpy() for t in tasks]
        intercepts = []
        if self.intercept:
            intercepts = [y.mean() for _, y, _ in training]
            training = [(X, y - y.mean(), w) for X, y, w in training]
        dim = len(data.domain.features)
        domain =[Orange.feature.Continuous(
            'f%i' % (i + 1)) for i in range(dim)], data.domain.class_var)
        W = zeros((dim, len(tasks)))
        U = eye(dim)
        D = U / dim
        s = diag(D)

        for i in range(self.max_iter):
            Wold = W.copy()

            # Compute D^{1/2}
            sqrt(s, s)
            D_sqrt = diag(s) if self.selection else dot(U, dot(diag(s), U.T))

            # Solve the regularization problem for fixed D
            for t, (X, y, _) in enumerate(training):
                fX = dot(D_sqrt, X.T)
                if self.learner:
                    dt =, np.column_stack((fX.T, y)))
                    c = self.learner(dt)
                    w = get_weights(c)
                    K = dot(fX.T, fX)
                    a = dot(np.linalg.inv(K + self.gamma * eye(K.shape[0])), y)
                    w = dot(fX, a)
                W[:, t] = w
            W = dot(D_sqrt, W)

            # Update D (solve for fixed W)
            if self.selection:
                s = sqrt(np.sum(W**2, 1))
            elif groups:
                U = zeros((dim, dim))
                s = zeros(dim)
                for g in groups:
                    Uj, sj, _ = np.linalg.svd(W[g, :])
                    if len(g) > len(tasks):
                        sj = np.hstack((sj, zeros(len(g) - len(tasks))))
                    U[ix_(g, g)] = Uj
                    s[g] = sj
                U, s, _ = np.linalg.svd(W)
                if dim > len(tasks):
                    s = np.hstack((s, zeros(dim - len(tasks))))
            s /= s.sum()
            s[s < 1e-10] = 0
            D = diag(s) if self.selection else dot(U, dot(diag(s), U.T))

            self.cb(iter=i, max_iter=self.max_iter, **locals())
            if np.linalg.norm(W - Wold) / W.size < self.tol:

        return MTFeatClassifier(W, U, s, tasks, i, self.selection, data.domain,
                                intercepts, self.norm_covered,

class MTFeatClassifier(Orange.classification.Classifier):
    def __init__(self, W, U, s, tasks, iter, selection, domain, intercepts,
                 norm_covered, **kwargs):
        self.W = W
        self.U = U
        self.s = s
        self.tasks = tasks
        self.iter = iter
        self.selection = selection
        self.domain = domain
        self.intercepts = intercepts
        self.class_var = domain.class_var
        self.norm_covered = norm_covered
        ind = np.argsort(s)[::-1]
        s = np.sort(s)[::-1] if selection else s
        nrelevant = np.searchsorted(np.cumsum(s), norm_covered) + 1
        if selection:
            self.new_domain =[domain.features[i]
                for i in ind[:nrelevant]], domain.class_var)
            self.transform = U.T[:nrelevant, :]
            self.new_domain = transform_domain(domain, self.transform)

    def __call__(self, instance, return_type=Orange.core.GetValue):
        ins =, instance)
        t = int(ins['task'])
        f = dot(self.W[:, t], ins.native()[:-1])
        if self.intercepts:
            f += self.intercepts[t]
        if isinstance(self.domain.class_var, Orange.feature.Continuous):
            val = self.domain.class_var(f)
            val = self.domain.class_var(int(f > 0))
        dist = Orange.statistics.distribution.Distribution(val.variable)
        dist[val] = 1. / (1. + np.exp(-f))
        if return_type == Orange.core.GetValue:
            return val
        elif return_type == Orange.core.GetBoth:
            return val, dist
            return dist
    def transform_data(self, data):
        """Transform data to the new (reduced) domain."""
        if self.selection:
            return, data)
        else: # transform in numpy for speed
            data =, data)
            X, y, _ = data.to_numpy()
            mat = np.column_stack((dot(X, self.transform.T), y))
            fdata =, mat)
            for fins, ins in zip(fdata, data):
                for m in ins.get_metas():
                    fins[m] = ins[m]
            return fdata

    def task_distances(self, metric='euclidean', **kwargs):
        """Compute the task distance matrix.
        See scipy.spatial.distance.pdist for metrics and additional parameters.
        dm = pdist(self.W.T, metric, **kwargs)
        t = len(self.tasks)
        dsm = Orange.misc.SymMatrix(t)
        i = 0
        for ti in range(t - 1):
            for tj in range(ti + 1, t):
                dsm[ti, tj] = dm[i]
                i += 1
        items =[Orange.feature.String(
            'task')], False), [[str(task)] for task in self.tasks])
        dsm.items = items
        return dsm

if __name__ == '__main__':
    school ='../datasets/school')
    train ='../datasets/train1')
    test ='../datasets/test1')
    mean = Orange.regression.mean.MeanLearner(name='Mean')
    ridge = Orange.regression.linear.LinearRegressionLearner(
        intercept=False, ridge_lambda=1, name='Ridge')
    mt_ridge = multitask.MultiTaskLearner(learner=ridge, name='Independent')
    mtf = MTFeatLearner(learner=ridge, max_iterations=30, name='MTFeat')
    res = Orange.evaluation.testing.learn_and_test_on_test_data(
        learners=[mean, ridge, mt_ridge, mtf], learn_set=train, test_set=test)
    print zip(res.classifier_names, Orange.evaluation.scoring.RMSE(res))