1. Zoltán Szabó
  2. ITE


ITE / code / estimators / quick_tests / quick_test_KPP.m

%function [] = quick_test_KPP()
%Quick test for probability product kernel estimators: analytical expression vs estimated value as a function of the sample number. In the test, normal variables are considered. Note: specially, for rho=1/2 we get the Bhattacharyya kernel.

%Copyright (C) 2013 Zoltan Szabo ("http://www.gatsby.ucl.ac.uk/~szabo/", "zoltan (dot) szabo (at) gatsby (dot) ucl (dot) ac (dot) uk")
%This file is part of the ITE (Information Theoretical Estimators) Matlab/Octave toolbox.
%ITE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
%the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
%This software is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
%MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
%You should have received a copy of the GNU General Public License along with ITE. If not, see <http://www.gnu.org/licenses/>.

%clear start:
    clear all; close all;
    distr = 'normal';%fixed
    d = 1; %dimension of the distribution
    num_of_samples_v = [1000:1000:50*1000]; %sample numbers used for estimation 
    %estimator, base:
        cost_name = 'PP_kNN_k';
    rho = 0.9; %parameter of the distribution kernel, >0
    L = length(num_of_samples_v);    
    co = K_initialization(cost_name,1,{'rho',rho,'a',rho-1,'b',rho}); %{'rho',rho,'a',rho-1,'b','rho'}: set the 'rho', 'a' and 'b' fields
    num_of_samples_max = num_of_samples_v(end);  
    K_hat_v = zeros(L,1); %vector of estimated probability kernel values

%distr, d -> samples (Y1,Y2), analytical formula for the probability product kernel (K):
    switch distr
        case 'normal'
                e2 = rand(d,1);
                e1 = e2;
            %(random) linear transformation applied to the data:
                A2 = rand(d);
                A1 = rand * A2; %(e2,A2) => (e1,A1) choice guarantees Y1<<Y2 (in practise, too)
            %covariance matrix:
                cov1 = A1 * A1.';                    
                cov2 = A2 * A2.';     
            %generate samples:
                Y1 = A1 * randn(d,num_of_samples_max) + repmat(e1,1,num_of_samples_max); %A1xN(0,I)+e1
                Y2 = A2 * randn(d,num_of_samples_max) + repmat(e2,1,num_of_samples_max); %A2xN(0,I)+e2            
            %analytical value of probability product kernel (Ref.: Tony Jebara, Risi Kondor, and Andrew Howard. Probability product kernels. Journal of Machine Learning Research, 5:819-844, 2004).
                    inv1 = inv(cov1);
                    inv2 = inv(cov2);
                    inv12 = inv(inv1+inv2);
                e12 = inv1 * e1 + inv2 * e2;
                K = (2*pi)^((1-2*rho)*d/2) * rho^(-d/2) * abs(det(inv12))^(1/2) * abs(det(cov1))^(-rho/2) * abs(det(cov2))^(-rho/2) * exp(-rho/2*(e1.'*inv1*e1 + e2.'*inv2*e2 - e12.'*inv12*e12));
    Tk = 0;%index of the sample number examined   
    for num_of_samples = num_of_samples_v
        Tk = Tk + 1;
        K_hat_v(Tk) = K_estimation(Y1(:,1:num_of_samples),Y2(:,1:num_of_samples),co);
    legend({'estimation','analytical value'});
    xlabel('Number of samples');
    ylabel('Probability product kernel');