Commits

Jian Zhou committed 4ee3639 Merge

merge

Comments (0)

Files changed (39)

 namespace LIBSVM {
 
 #include "libsvm.h"
+  struct svm_node* CLIBSVM::x_space = NULL;
 
-bool CLIBSVM::initialize() {
+  bool CLIBSVM::initialize() {
 
-	/* set default */
+    /* set default */
 
-        parm.cache_size = 100;
-        parm.C = 0.01;
-	parm.eps = 1e-3;
-        parm.svm_type = C_SVC;
-        parm.p = 0.1;
-        parm.shrinking = 1;
-        parm.nr_weight = 0;
-        parm.weight_label = NULL;
-        parm.weight = NULL;
-        parm.probability = 0;
-        parm.nu = 0.5;
-        parm.coef0 = 0;
-        parm.gamma = 0;
-        parm.degree = 3;
-        parm.kernel_type = LINEAR;
+    parm.cache_size = 100;
+    parm.C = 0.01;
+    parm.eps = 1e-3;
+    parm.svm_type = C_SVC;
+    parm.p = 0.1;
+    parm.shrinking = 1;
+    parm.nr_weight = 0;
+    parm.weight_label = NULL;
+    parm.weight = NULL;
+    parm.probability = 0;
+    parm.nu = 0.5;
+    parm.coef0 = 0;
+    parm.gamma = 0;
+    parm.degree = 3;
+    parm.kernel_type = LINEAR;
 
-	return true;
-}
+    model = NULL;
 
-bool CLIBSVM::parms_check() {
-	if (parm.C < 0) {
-		fprintf(
-				stderr,
-				"\nTrade-off between training error and margin is not set (C<0)!\nC value will be set to default value. Clight = Cpef * 100 / n \n");
-		fprintf(stderr, "be less than 1.0 !!!\n\n");
-		return false;
-	}
-	if (parm.eps <= 0) {
-		fprintf(stderr,
-				"\nThe epsilon parameter must be greater than zero!\n\n");
-		return false;
-	}
+    balance = 0;//Off
 
-        //TODO: add more parameter checks 
+    return true;
+  }
 
-	return true;
-}
+  bool CLIBSVM::parms_check() {
+    if (parm.C < 0) {
+      fprintf(
+          stderr,
+          "\nTrade-off between training error and margin is not set (C<0)!\nC value will be set to default value. Clight = Cpef * 100 / n \n");
+      fprintf(stderr, "be less than 1.0 !!!\n\n");
+      return false;
+    }
+    if (parm.eps <= 0) {
+      fprintf(stderr,
+          "\nThe epsilon parameter must be greater than zero!\n\n");
+      return false;
+    }
 
-SAMPLE * CLIBSVM::CreateSample(Sleipnir::CPCL& PCL, vector<SVMLabel> SVMLabels) {
-	size_t i, j, k, iGene, iProblem, numFeatures, numLabels, max_index;
-        float d;
+    if (parm.nu < 0 | parm.nu > 1) {
+      fprintf(stderr, "nu parameter must be between 0 and 1");
+      return false;
+    }
 
-        struct svm_problem* prob;
-        struct svm_node* x_space;
+    //TODO: add more parameter checks 
 
-        prob = Malloc(struct svm_problem,1);
+    return true;
+  }
 
-//        prob->l = 0;//number of labels in PCL
-        numFeatures = PCL.GetExperiments();
-        numLabels = 0;
+  void CLIBSVM::SetXSpace(Sleipnir::CPCL& PCL) {
+    size_t j, k, iGene, numFeatures, numLabels;
+    std::vector<std::string> vecGeneNames;
+    string GeneName;
+    float d;
 
-        cout << "number of features: " << numFeatures << endl;
-	
-        iProblem = 0;
+    numFeatures = PCL.GetExperiments();
+    numLabels = PCL.GetGenes();
+    vecGeneNames = PCL.GetGeneNames();
 
-	for (i = 0; i < SVMLabels.size(); i++) {
-                if (!SVMLabels[i].hasIndex){
-                  SVMLabels[i].SetIndex(PCL.GetGene(SVMLabels[i].GeneName));
-                }
-		iGene = SVMLabels[i].index;
-		if (iGene != -1) {
-                  numLabels++;
-		}
-	}
- 
-        prob->l = numLabels;
-        prob->y = Malloc(double,numLabels);
-        prob->x = Malloc(struct svm_node *, numLabels);
-        x_space = Malloc(struct svm_node, (1+numFeatures) * numLabels);
+    cerr << "total data" << endl;
+    cerr << "number of features (columns) used: " << numFeatures << endl;
+    cerr << "number of labels in data: " << numLabels << endl;
+    cerr << "number of gene (rows) names: " << vecGeneNames.size() << endl;
 
+    x_space = Malloc(struct svm_node, (1+numFeatures) * numLabels);
 
-        max_index = numFeatures;
+    j = 0;//element index
 
-        j = 0;//element index
+    for ( std::vector<std::string>::iterator it = vecGeneNames.begin(); it != vecGeneNames.end(); ++it) {
+      GeneName = *it;
+      iGene = PCL.GetGene(GeneName); 
 
-        for (i = 0; i < SVMLabels.size(); i++) {
-            iGene = SVMLabels[i].index;
+      for ( k = 0; k < numFeatures; k++){
+        x_space[j].index = k;
+        if (!Sleipnir::CMeta::IsNaN(d = PCL.Get(iGene, k))) {
+          x_space[j].value = d;
+        }else{
+          // impute 0 for missing values
+          x_space[j].value = 0;
+        }
+        j++;
+      }
 
-            if (iGene != -1){
-              (prob->x)[i] = &x_space[j];
-              (prob->y)[i] = SVMLabels[i].Target;
+      x_space[j].index = -1;
+      j++;
+    }
 
-              for(k = 0; k < numFeatures; k++){
-                x_space[j].index = k;
-                if (!Sleipnir::CMeta::IsNaN(d = PCL.Get(iGene, k))) {
-                  x_space[j].value = d;
-                }else{
-                  x_space[j].value = 1; //TODO: make this a flag!!!
-                  //if missing value??? SVMPerf imputes 0 ... what about gene i to gene i ? should impute 1?
-                }
-                j++;
-              }         
-              x_space[j].index = -1;
-              j++;
 
-            }
-        }
+  }
 
-        SAMPLE* pSample = new SAMPLE;
-//        pSample = Malloc(SAMPLE,1);
+  SAMPLE * CLIBSVM::CreateSample(Sleipnir::CPCL& PCL, vector<SVMLabel> SVMLabels) {
+    size_t i, j, k, s, iGene, iProblem, numFeatures, numLabels, max_index;
+    float d;
 
-        pSample->n = prob->l;//number of labels
-        pSample->problems = prob;
-        pSample->numFeatures = numFeatures;
-        
-        cout << ((pSample->problems)->y)[0] << endl;
-        cout << ((pSample->problems)->y)[1] << endl;
-PrintSample(*pSample);
-	return pSample;
-}
+    struct svm_problem* prob;
 
+    prob = Malloc(struct svm_problem,1);
 
-/*
-SAMPLE * CLIBSVM::CreateSample(Sleipnir::CDat& Dat, vector<SVMLabel> SVMLabels) {
-	size_t i, j, k, iGene, iProblem, numFeatures, max_index;
-        float d;
+    numFeatures = PCL.GetExperiments();
+    numLabels = 0;
 
-        struct svm_problem* prob;
-        struct svm_node *x_space;
+    iProblem = 0;
 
-        prob->l = 0;//number of labels in Dat
-        numFeatures = Dat.GetGenes();
-        iProblem = 0;
+    for (i = 0; i < SVMLabels.size(); i++) {
+      if (!SVMLabels[i].hasIndex){
+        SVMLabels[i].SetIndex(PCL.GetGene(SVMLabels[i].GeneName));
+      }
+      iGene = SVMLabels[i].index;
+      if (iGene != -1) {
+        numLabels++;
+      }
+    }
 
-	for (i = 0; i < SVMLabels.size(); i++) {
-          iGene = Dat.GetGene(SVMLabels[i].GeneName);
-          if (iGene != -1) {
-            (prob->l)++;
-          }
-	}
+    cerr << "sampled data" << endl;
+    cerr << "number of features used: " << numFeatures << endl;
+    cerr << "number of labels given: " << SVMLabels.size() << endl;
+    cerr << "number of labels in data: " << numLabels << endl;
 
- 
-        prob->y = Malloc(double,prob->l);
-        prob->x = Malloc(struct svm_node *, prob->l);
-        x_space = Malloc(struct svm_node, numFeatures * (prob->l));
+    prob->l = numLabels;
+    prob->y = Malloc(double,numLabels);
+    prob->x = Malloc(struct svm_node *, numLabels);
 
-        max_index = numFeatures;
-        j = 0;
+    if(x_space == NULL) {
+      SetXSpace(PCL);
+    }
 
-        for (i = 0; i < SVMLabels.size(); i++) {
-            iGene = Dat.GetGene(SVMLabels[i].GeneName);
-            if (iGene != -1){
-              (prob->x)[i] = &x_space[j];
-              (prob->y)[i] = SVMLabels[i].Target;
-              for(k = 0; k < numFeatures; k++){
-                x_space[j].index = k;
-                if (!Sleipnir::CMeta::IsNaN(d = Dat.Get(iGene, k))) {
-                  x_space[j].value = d;
-                }else{
-                  x_space[j].value = 1; //TODO: make this a flag!!!
-                  //if missing value??? SVMPerf imputes 0 ... what about gene i to gene i ? should impute 1?
-                }
-              }
-              j++;
-            }
-        }
+    max_index = numFeatures;
 
-        SAMPLE* pSample;
-        pSample = Malloc(SAMPLE,1);
-        
-        pSample->n = prob->l;
-        pSample->problems = prob;
+    s = 0;//sample index
+    for (i = 0; i < SVMLabels.size(); i++) {
+      iGene = SVMLabels[i].index;
 
-	return pSample;
-}*/
+      if (iGene != -1){
+        (prob->x)[s] = &x_space[iGene*(1+numFeatures)];
+        (prob->y)[s] = SVMLabels[i].Target;
+        s++;
+      }
+    }
 
+    SAMPLE* pSample = new SAMPLE;
 
-vector<Result> CLIBSVM::Classify(Sleipnir::CPCL &PCL,
-        vector<SVMLabel> SVMLabels) {
+    pSample->n = prob->l;//number of labels
+    pSample->problems = prob;
+    pSample->numFeatures = numFeatures;
+    return pSample;
+  }
+
+  //TODO: create sample for dab/dat files
+  //
+
+  vector<Result> CLIBSVM::Classify(Sleipnir::CPCL &PCL,
+      vector<SVMLabel> SVMLabels) {
     size_t i, j, iGene;
     double predict_label;
     double* dec_values;
     int svm_type = svm_get_svm_type(model);
     int nr_class = svm_get_nr_class(model);
 
-cerr << nr_class << endl;
-cerr << pSample->n << endl;;
-
     dec_values = Malloc(double, nr_class*(nr_class-1)/2);
     vecResult.resize(pSample->n);
 
-cerr << "number of samples: " << vecResult.size() << endl;
-cerr << "length of svm labels: " << SVMLabels.size() << endl;
-
     j= 0; //pSample index
     for(i = 0 ; i < SVMLabels.size() ; i++){
       if(!SVMLabels[i].hasIndex){//assume createSample sequentially added to pSample TODO: currently hacky
       if (iGene != -1 ) {    
         x = pSample->problems->x[j];
         predict_label = svm_predict_values(model,x, dec_values);
-        dec_value = dec_values[0]; //assume that positive class is the first class TODO: currently hackly
+        dec_value = dec_values[0]; //assume that positive class is the first class TODO: currently hacklyi
 
         vecResult[j].GeneName = SVMLabels[i].GeneName;
         vecResult[j].Target = SVMLabels[i].Target;
         vecResult[j].Value = dec_value;
-        
+
         j++;
 
       }
     }
+
+    free(pSample);
+    //delete pSample ;
     free(dec_values);
+    return vecResult;
+  }
 
-    return vecResult;
-}
 
-    
-//TODO: classify for dab/dat files
-//
+  //TODO: classify for dab/dat files
+  //
 
 }
 
 namespace LIBSVM {
 
-extern "C" {
+
+  extern "C" {
 #define class Class2
 #include <libsvm/svm.h>
 #undef class
 
-}
-
-typedef struct sample { /* a sample is a set of examples */
-   size_t     n;            /* n is the total number of examples */
-   size_t  numFeatures; 
-   struct svm_problem *problems;
-} SAMPLE;
- 
-
-class SVMLabel {
-public:
-	string GeneName;
-	double Target;
-	size_t index;
-	bool hasIndex;
-	SVMLabel(std::string name, double target) {
-		GeneName = name;
-		Target = target;
-		hasIndex = false;
-		index = -1;
-	}
-
-	SVMLabel() {
-		GeneName = "";
-		Target = 0;
-	}
-	void SetIndex(size_t i) {
-		index = i;
-		hasIndex = true;
-	}
-};
-
-class Result {
-public:
-	std::string GeneName;
-	double Target;
-	double Value;
-	int CVround;
-	int Rank;
-	Result() {
-		GeneName = "";
-		Target = 0;
-		Value = Sleipnir::CMeta::GetNaN();
-	}
-
-	Result(std::string name, int cv = -1) {
-		GeneName = name;
-		Target = 0;
-		Value = 0;
-		CVround = cv;
-		Rank = -1;
-	}
-	string toString() {
-		stringstream ss;
-		ss << GeneName << '\t' << Target << '\t' << Value << '\t' << "CV"
-				<< CVround;
-		if (Rank != -1) {
-			ss << '\t' << Rank;
-		}
-		return ss.str();
-	}
-
-};
-
-enum EFilter {
-	EFilterInclude = 0, EFilterExclude = EFilterInclude + 1,
-};
-
-//this class encapsulates the model and parameters and has no associated data
-
-class CLIBSVM {
-public:
-  //struct svm_parameter parm;
-  struct svm_model* model;
-  struct svm_parameter parm;
-  
-
-  CLIBSVM() {
-    initialize();
   }
 
-  void SetSVMType(int type) {
-    parm.svm_type = type;
-  }
-  //void SetLossFunction(size_t loss_f) {
-    //libsvm has only one loss function
-  //}
-
-  void SetTradeoff(double tradeoff) {
-    parm.C = tradeoff; //TODO: only applicable for vanilla svm
-  }
-
-  void SetKernel(int K) {
-    parm.kernel_type = K;
-  }
-
-  void SetPolyD(int D) {
-    parm.degree = D;
-  }
-
-  //void UseCPSP() { // unavailabe for libsvm
-  //}
-  //
-
-  void SetRBFGamma(double g) {
-    parm.gamma = g;
-    //UseCPSP not available for libsvm
-  }
-
-  //void UseSlackRescaling(){  }
-  //void UseMarginRescaling() { }
-  //void SetPrecisionFraction(double frac) { }
-
-  void ReadModel(char* model_file) {
-    FreeModel();
-    model = svm_load_model(model_file); 
-  }
-
-  void FreeModel() {
-    svm_free_and_destroy_model(&model);
-  }
-
-  void WriteModel(char* model_file) {
-    svm_save_model(model_file, model);
-  }
-  
-
-  //static members process data
-  //single gene predictions
-
-  //TODO: add functions to handle PCL files
-  //creates a svm_problem for a given gene index in a microarray set
-  //static svm_problem* CreateProblem(Sleipnir::CPCLSet &PCLSet, size_t iGene, size_t iProblem);
-
-  //creates a svm_problem for a given gene in a Dat file using all other genes as features
-  //static svm_problem* CreateProblem(Sleipnir::CDat& Dat, size_t iGene, size_t iProblem);
-
-  //Creates a sample using a PCLset and SVMlabels Looks up genes by name.
-  //static SAMPLE* CreateSample(Sleipnir::CPCLSet &PCLSet,
-  //			vector<SVMLabel> SVMLabels);
-
-  //Creates a sample of svm_problems using a single PCL and SVMlabels Looks up genes by name.
-  static SAMPLE* CreateSample(Sleipnir::CPCL &PCL, vector<SVMLabel> SVMLabels);
-
-  //Same as above except creates bootstrap samples and does not duplicate data
-  //static SAMPLE** CreateSampleBootStrap(Sleipnir::CPCL &PCL,
-  //	vector<SVMLabel>& SVMLabels, 
-  //      vector<vector<size_t> > vecvecIndex);
-
-  //Creates a sample using a Dat and SVMlabels. Looks up genes by name
-  static SAMPLE* CreateSample(Sleipnir::CDat& CDat,
-			vector<SVMLabel> SMVLabels);
-
-  //Classify single genes
-  vector<Result> Classify(Sleipnir::CPCL& PCL, vector<SVMLabel> SVMLabels);
-  //vector<Result> Classify(Sleipnir::CPCLSet& PCLSet,
-  //			vector<SVMLabel> SVMLabels);
-  vector<Result> Classify(Sleipnir::CDat& Dat, vector<SVMLabel> SVMLabels);
-
-  //MEMBER functions wraps learning
-  void Learn(SAMPLE &sample) {
-    //only L2 for LibSVM
-    //cerr << "SLACK NORM =" << struct_parm.slack_norm << endl;
-    //slack_norm = type of regularization
-
-    //Take care of the labels here
-    size_t i;
-    size_t numn, nump;
-
-
-    struct svm_problem* prob = sample.problems;
-
-    cout << "alsdjfaslkfjd" << endl;
-
-    numn = nump = 0;
-
-    for(i = 0; i < sample.n; i++){
-      if (((*prob).y)[i] > 0){
-        nump ++;
-      }else{
-        numn ++;
-      }
+  typedef struct sample { /* a sample is a set of examples */
+    size_t     n;            /* n is the total number of examples */
+    size_t  numFeatures; 
+    struct svm_problem *problems;
+    sample() {
+      n = 0;
+      numFeatures = 0;
+      problems = NULL;
     }
 
-cout << "number of positives: " << nump << endl;
-cout << "number of negatives: " << numn << endl;
-cout << "cache size: " << parm.cache_size << endl;
-cout << "kernel_type: " << parm.kernel_type << endl;
-cout << "svm_type: " << parm.svm_type << endl;
-const char* output = svm_check_parameter( prob, &parm ) ;//returns null if no issues..
-printf("%s | ", output);
-cout << "an svm_node index: " << (((*prob).x)[0][0]).index << endl;
-cout << "an svm_node value: " << (((*prob).x)[0][0]).value << endl;
-cout.flush();
-//cout <<      svm_check_parameter(prob,&parm) << endl;
-    //no need for rescaling labels because only one loss function for libsvm\
+    ~sample(){
+      //no destructor for problem struct
+      free(problems->y);
+      free(problems->x);
+      problems = NULL;
+    }
+  } SAMPLE;
 
-    if(parms_check()){
-//cout <<      svm_check_parameter(prob,&parm) << endl;
-//      struct svm_problem* prob = sample.problems;        //sample.problem
-//      cout << "here: " << (*prob).l << endl;
-      model = svm_train(prob,&parm);
-      cerr << "done learning model" << endl;
-cout << svm_get_svm_type(model) << endl;
-    }else{
-      cerr << "invalid parms" << endl;
-    }
-  }
 
-  static void FreeSample(SAMPLE s){
-cerr << "free sample: " << endl;
-PrintSample(s);
+  class SVMLabel {
+    public:
+      string GeneName;
+      double Target;
+      size_t index;
+      bool hasIndex;
 
-    FreeProblem(s.problems, s.numFeatures);
-    //free(&s);    
-  }
+      SVMLabel(std::string name, double target) {
+        GeneName = name;
+        Target = target;
+        hasIndex = false;
+        index = -1;
+      }
 
-  static void FreeProblem(svm_problem *prob, size_t numFeatures){
-    //int i = prob->l; //number of examples
-    size_t i, j ;
-    i = j = 0;
+      SVMLabel() {
+        GeneName = "";
+        Target = 0;
+      }
+      void SetIndex(size_t i) {
+        index = i;
+        hasIndex = true;
+      }
+  };
 
-//PrintProblem(prob);
+  class Result {
+    public:
+      std::string GeneName;
+      double Target;
+      double Value;
+      int CVround;
+      int Rank;
+      Result() {
+        GeneName = "";
+        Target = 0;
+        Value = Sleipnir::CMeta::GetNaN();
+      }
 
-    free(prob->y);
+      Result(std::string name, int cv = -1) {
+        GeneName = name;
+        Target = 0;
+        Value = 0;
+        CVround = cv;
+        Rank = -1;
+      }
+      string toString() {
+        stringstream ss;
+        ss << GeneName << '\t' << Target << '\t' << Value << '\t' << "CV"
+          << CVround;
+        if (Rank != -1) {
+          ss << '\t' << Rank;
+        }
+        return ss.str();
+      }
 
-    free(prob->x);
+  };
 
-//    for(i = 0 ; i < prob->l ; i++){
-//      for(j = 0 ; j < numFeatures ; j ++){
+  enum EFilter {
+    EFilterInclude = 0, EFilterExclude = EFilterInclude + 1,
+  };
 
-//PrintNode((prob->x)[i][j]);
+  //this class encapsulates the model and parameters and has no associated data
 
-//        free((prob->x)[i]);
-//      }
-//    }
+  class CLIBSVM {
+    public:
+      struct svm_model* model;
+      struct svm_parameter parm;
+      int balance;
 
-//    free(prob); ??? why can't i free?
-    return;
-  }
+      static struct svm_node *x_space;
 
-  static void PrintSample(SAMPLE s){
-    PrintProblem(s.problems);
-cerr << "number of labels: " << s.n << endl;
-  }
+      CLIBSVM() {
+        initialize();
+      }
 
-  static void PrintProblem(svm_problem *prob){
-    size_t i, j ;
-    i = j = 0;
+      ~CLIBSVM() {
+        svm_free_and_destroy_model( &model );
+        model = NULL;
+      }
 
-//    free(prob->y);
+      void SetBalance(int bal){
+        balance = bal;
+      }
 
-    for(i = 0 ; i < 3 ; i++){
-cerr << "label: " << (prob->y)[i] << endl;
-      for(j = 0 ; j < 2 ; j ++){
+      void SetSVMType(int type) {
+        parm.svm_type = type;
+      }
 
-PrintNode((prob->x)[i][j]);
+      void SetTradeoff(double tradeoff) {
+        parm.C = tradeoff; //TODO: only applicable for vanilla svm
+      }
 
-//        free(&((prob->x)[i][j]));
+      void SetKernel(int K) {
+        parm.kernel_type = K;
       }
-    }
 
-//    free(prob);
-    return;
-  }
+      void SetPolyD(int D) {
+        parm.degree = D;
+      }
 
-  static void PrintNode(svm_node node){
-    cerr << "index: " << node.index << endl;
-    cerr << "value: " << node.value << endl;
-  }
+      void SetRBFGamma(double g) {
+        parm.gamma = g;
+      }
 
+      void SetNu(double nu) {
+        parm.nu = nu;
+      }
 
-  //no pairwise learning for libSVM wrapper
+      void ReadModel(char* model_file) {
+        FreeModel();
+        model = svm_load_model(model_file); 
+      }
 
-  bool parms_check();
-  bool initialize();
-	
-  // functions to convert probablity
-  //void sigmoid_train(Sleipnir::CDat& Results, vector<SVMLabelPair*>& SVMLabels, float& A, float& B);
-  //void sigmoid_predict(Sleipnir::CDat& Results, vector<SVMLabelPair*>& SVMLabels, float A, float B);
-        
-        //not sure exactly what this does in svmperf compare to just ReadModel
-	// read in a SVM model file that's only has the w vector written out for linear kernel
-/*	void ReadModelLinear(char* model_file) {
-	  FreeModel();
-	  structmodel = read_struct_model_w_linear(model_file, &struct_parm);
-	}*/
-	
-	//STRUCTMODEL read_struct_model_w_linear(char *file, STRUCT_LEARN_PARM *sparm);
-};
+      void FreeModel() {
+        svm_free_and_destroy_model(&model);
+      }
+
+      void WriteModel(char* model_file) {
+        svm_save_model(model_file, model);
+      }
+
+
+      //static members process data
+      //
+
+      static void SetXSpace(Sleipnir::CPCL& PCL);
+
+      //single gene predictions
+
+      //TODO: add functions to handle PCL files
+
+      //Creates a sample of svm_problems using a single PCL and SVMlabels Looks up genes by name.
+      static SAMPLE* CreateSample(Sleipnir::CPCL &PCL, vector<SVMLabel> SVMLabels);
+
+      //TODO: Same as above except creates bootstrap samples and does not duplicate data
+
+      //Creates a sample using a Dat and SVMlabels. Looks up genes by name
+      static SAMPLE* CreateSample(Sleipnir::CDat& CDat,
+          vector<SVMLabel> SMVLabels);
+
+      //Classify single genes
+      vector<Result> Classify(Sleipnir::CPCL& PCL, vector<SVMLabel> SVMLabels);
+
+
+      //MEMBER functions wraps learning
+      void Learn(SAMPLE &sample) {
+        //only L2 for LibSVM
+        //cerr << "SLACK NORM =" << struct_parm.slack_norm << endl;
+        //slack_norm = type of regularization
+
+        //Take care of the labels here
+        size_t i;
+        size_t numn, nump;
+
+        struct svm_problem* prob = sample.problems;
+
+        numn = nump = 0;
+
+        for(i = 0; i < sample.n; i++){
+          if (((*prob).y)[i] > 0){
+            nump ++;
+          }else{
+            numn ++;
+          }
+        }
+
+        if (balance) {
+          cerr << "balancing the weights between postivies and negatives. " << endl;
+          parm.nr_weight = 2;
+          parm.weight_label = (int *) realloc(parm.weight_label, sizeof(int)*parm.nr_weight);
+          parm.weight = (double *) realloc(parm.weight, sizeof(double)*parm.nr_weight);
+          parm.weight_label[0] = 1;
+          parm.weight[0] = numn;
+          parm.weight_label[1] = -1;
+          parm.weight[1] = nump;
+        }
+
+        if(parms_check()){
+          model = svm_train(prob,&parm);
+        }else{
+        }
+        prob = NULL;
+
+      }
+
+      static void PrintSample(SAMPLE s){
+        PrintProblem(s.problems);
+      }
+
+      static void PrintProblem(svm_problem *prob){
+        size_t i, j ;
+        i = j = 0;
+
+        for(i = 0 ; i < 3 ; i++){
+          for(j = 0 ; j < 2 ; j ++){
+            PrintNode((prob->x)[i][j]);
+          }
+        }
+
+        return;
+      }
+
+      static void PrintNode(svm_node node){
+        cerr << "index: " << node.index << endl;
+        cerr << "value: " << node.value << endl;
+      }
+
+
+      //no pairwise learning for libSVM wrapper
+
+      bool parms_check();
+      bool initialize();
+
+      //TODO: functions to convert probablity
+
+  };
 }
 
 #endif // NO_SVM_LIBSVM
 				for (j = (i + 1); j < Dat.GetGenes(); ++j)
 					if (!CMeta::IsNaN(d = Dat.Get(i, j)) && (d < dCutoff))
 						Dat.Set(i, j, CMeta::GetNaN());
+
+		if(pMeasure->GetName()=="pearson" || pMeasure->GetName()=="pearnorm"){
+			vector<unsigned long> bins;
+			bins.resize(55);
+			float upper = 0;
+			float lower = 0;
+			if(pMeasure->GetName()=="pearson"){
+				upper = 1.0;
+				lower = -1.0;
+			}else if(pMeasure->GetName()=="pearnorm"){
+				upper = 5.0;
+				lower = -5.0;
+			}
+			float bin_size = (upper - lower) / 50;
+			for(i=0; i<55; i++)
+				bins[i] = 0;
+			for(i=0; i<Dat.GetGenes(); i++){
+				for(j=i+1; j<Dat.GetGenes(); j++){
+					d = Dat.Get(i,j);
+					if(CMeta::IsNaN(d)) continue;
+					int b = (int) ((d - lower) / bin_size);
+					if(b<0){
+						bins[0]++;
+						continue;
+					}
+					if(b>=55){
+						bins[54]++;
+						continue;
+					}
+					bins[b]++;
+				}
+			}
+			g_CatSleipnir().info(
+			"Distribution of distances: bin size: %.5f, number of bins: %d, min bin value: %.5f, max bin value: %.5f",
+			bin_size, 55, lower, upper);
+			for(i=0; i<55; i++){
+				g_CatSleipnir().info("%lu\t%lu", i, bins[i]);
+			}
+		}
+
 	}
 
 	return 0;
 				for (j = (i + 1); j < Dat.GetGenes(); ++j)
 					if (!CMeta::IsNaN(d = Dat.Get(i, j)) && (d < dCutoff))
 						Dat.Set(i, j, CMeta::GetNaN());
+
+		if(pMeasure->GetName()=="pearson" || pMeasure->GetName()=="pearnorm"){
+			vector<unsigned long> bins;
+			bins.resize(55);
+			float upper = 0;
+			float lower = 0;
+			if(pMeasure->GetName()=="pearson"){
+				upper = 1.0;
+				lower = -1.0;
+			}else if(pMeasure->GetName()=="pearnorm"){
+				upper = 5.0;
+				lower = -5.0;
+			}
+			float bin_size = (upper - lower) / 50;
+			for(i=0; i<55; i++)
+				bins[i] = 0;
+			for(i=0; i<Dat.GetGenes(); i++){
+				for(j=i+1; j<Dat.GetGenes(); j++){
+					d = Dat.Get(i,j);
+					if(CMeta::IsNaN(d)) continue;
+					int b = (int) ((d - lower) / bin_size);
+					if(b<0){
+						bins[0]++;
+						continue;
+					}
+					if(b>=55){
+						bins[54]++;
+						continue;
+					}
+					bins[b]++;
+				}
+			}
+			g_CatSleipnir().info(
+			"Distribution of distances: bin size: %.5f, number of bins: %d, min bin value: %.5f, max bin value: %.5f",
+			bin_size, 55, lower, upper);
+			for(i=0; i<55; i++){
+				g_CatSleipnir().info("%lu\t%lu", i, bins[i]);
+			}
+		}
 	}
-
 	return 0;
 }
 
 #include <gsl/gsl_permute_vector_float.h>
 
 using namespace std;
-typedef unsigned short ushort;
+//typedef unsigned short ushort;
+typedef unsigned int utype;
+//#define MAX_UTYPE 65535
+const utype MAX_UTYPE = 4294967295;
 #endif
 
 

src/seekcentral.cpp

 	m_mapstrintDataset.clear();
 	m_mapstrintGene.clear();
 
-	ushort i, j;
+	utype i, j;
 	for(i=0; i<m_vc.size(); i++){
 		if(m_vc[i]==NULL) continue;
 		delete m_vc[i];
 bool CSeekCentral::CalculateRestart(){
 	set<string> ss;
 	m_mapLoadTime.clear();
-	ushort i, prev;
+	utype i, prev;
 	prev = 0;
 	for(i=0; i<m_vecstrAllQuery.size(); i++){
 		if(m_vecstrAllQuery[i].size()>m_maxNumDB){
 	ss.clear();
 	//check
 	vector<char> vc;
-	ushort tot = 0;
+	utype tot = 0;
 	CSeekTools::InitVector(vc, m_vecstrAllQuery.size(), (char) 0);
-	map<ushort, vector< vector<string> > >::const_iterator ci =
+	map<utype, vector< vector<string> > >::const_iterator ci =
 		m_mapLoadTime.begin();
 	for(; ci!=m_mapLoadTime.end(); ci++) tot+=(ci->second).size();
 	vc.clear();
 	copy(src->m_vecstrDP.begin(), src->m_vecstrDP.end(), m_vecstrDP.begin());
 
 	m_quant = src->m_quant;
-	ushort i, j;
+	utype i, j;
 	omp_set_num_threads(m_numThreads);
 
 	m_iDatasets = m_vecstrDatasets.size();
 //Checks how many datasets contain the query
 //requires the queries and searchdatasets to be loaded!
 bool CSeekCentral::CheckDatasets(const bool &replace){
-	ushort dd, j;
-	ushort l;
+	utype dd, j;
+	utype l;
 	stringstream ss; //search dataset (new!)
 	stringstream sq; //query availability
 	stringstream aq; //query (new!)
 
 	for(l=0; l<m_searchdsetMap.size(); l++){
-		ushort iUserDatasets = m_searchdsetMap[l]->GetNumSet();
-		const vector<ushort> &allRDatasets = m_searchdsetMap[l]->GetAllReverse();	
+		utype iUserDatasets = m_searchdsetMap[l]->GetNumSet();
+		const vector<utype> &allRDatasets = m_searchdsetMap[l]->GetAllReverse();	
 		vector<int> count;
 		CSeekTools::InitVector(count, m_vecstrAllQuery[l].size(), (int) 0);
 		bool isFirst = true;
 
 		for(dd=0; dd<iUserDatasets; dd++){
-			ushort i = allRDatasets[dd];
+			utype i = allRDatasets[dd];
 			CSeekIntIntMap *si = m_vc[i]->GetGeneMap();
-			ushort present = 0;
+			utype present = 0;
 			for(j=0, present=0; j<m_vecstrAllQuery[l].size(); j++){
 				if(m_mapstrintGene.find(m_vecstrAllQuery[l][j])==
 					m_mapstrintGene.end()) continue;
 
 	if(replace){
 		vector<string> qq;
-		ushort i;
+		utype i;
 		CMeta::Tokenize(refinedQuery.c_str(), qq, "|", true);
 		m_vecstrAllQuery.resize(qq.size());
 		for(i=0; i<qq.size(); i++){
 
 //load everything except query, search datasets, output directory
 bool CSeekCentral::Initialize(const vector<CSeekDBSetting*> &vecDBSetting,
-	const ushort buffer, const bool to_output_text,
+	const utype buffer, const bool to_output_text,
 	const bool bOutputWeightComponent, const bool bSimulateWeight,
 	const enum CSeekDataset::DistanceMeasure dist_measure,
 	const bool bSubtractAvg, const bool bNormPlatform,
 		m_randRandom = NULL;
 	}
 
-	ushort i, j;
+	utype i, j;
 
 	omp_set_num_threads(m_numThreads);
 
 		}
 
 		vector<string> vecstrPlatforms;
-		map<string,ushort> mapstriPlatform;
+		map<string,utype> mapstriPlatform;
 		vector<CSeekPlatform> vp;
 		CSeekTools::ReadPlatforms(vecDBSetting[i]->GetValue("platform"), vp,
 			vecstrPlatforms, mapstriPlatform);
-		for(map<string,ushort>::iterator it=mapstriPlatform.begin();
+		
+		int cur = m_vp.size();
+		for(map<string,utype>::iterator it=mapstriPlatform.begin();
 			it!=mapstriPlatform.end(); it++){
-			m_mapstriPlatform[it->first] = it->second;
+			m_mapstriPlatform[it->first] = it->second + cur;
 		}
 
-		int cur = m_vp.size();
 		m_vp.resize(cur+vp.size());
 		for(j=0; j<vp.size(); j++)
 			m_vp[cur+j].Copy(vp[j]);
 bool CSeekCentral::Initialize(
 	const vector<CSeekDBSetting*> &vecDBSetting,
 	const char *search_dset, const char *query,
-	const char *output_dir, const ushort buffer, const bool to_output_text,
+	const char *output_dir, const utype buffer, const bool to_output_text,
 	const bool bOutputWeightComponent, const bool bSimulateWeight,
 	const enum CSeekDataset::DistanceMeasure dist_measure,
 	const bool bSubtractAvg, const bool bNormPlatform,
 		return false;
 	}
 
-	ushort i, j;
+	utype i, j;
 	omp_set_num_threads(m_numThreads);
 	m_output_dir = output_dir;
 
 
 bool CSeekCentral::PrepareQuery(const vector<string> &vecstrQuery,
 	CSeekQuery &query){
-	vector<ushort> queryGenes;
-	ushort j;
+	vector<utype> queryGenes;
+	utype j;
 	for(j=0; j<vecstrQuery.size(); j++){
 		if(m_mapstrintGene.find(vecstrQuery[j])==
 			m_mapstrintGene.end()) continue;
 	assert(m_rank_normal_threads==NULL && m_rank_threads==NULL);
 	assert(m_rData==NULL);
 
-	ushort j;
-	const vector<ushort> &queryGenes = query.GetQuery();
-	const vector<ushort> &allRDatasets = dMap.GetAllReverse();
-	ushort iSearchDatasets = dMap.GetNumSet();
-	ushort iQuery = queryGenes.size();
+	utype j;
+	const vector<utype> &queryGenes = query.GetQuery();
+	const vector<utype> &allRDatasets = dMap.GetAllReverse();
+	utype iSearchDatasets = dMap.GetNumSet();
+	utype iQuery = queryGenes.size();
 
 	for(j=0; j<iSearchDatasets; j++)
 		m_vc[allRDatasets[j]]->InitializeQuery(queryGenes);
 
-	m_rData = new ushort**[m_numThreads];
+	m_rData = new utype**[m_numThreads];
 	for(j=0; j<m_numThreads; j++)
-		m_rData[j] = CSeekTools::Init2DArray(m_iGenes, iQuery, (ushort)0);
+		m_rData[j] = CSeekTools::Init2DArray(m_iGenes, iQuery, (utype)0);
 	
 	m_master_rank_threads =
 		CSeekTools::Init2DArray(m_numThreads, m_iGenes, (float)0);
 	m_sum_weight_threads =
 		CSeekTools::Init2DArray(m_numThreads, m_iGenes, (float)0);
 	m_counts_threads =
-		CSeekTools::Init2DArray(m_numThreads, m_iGenes, (ushort)0);
+		CSeekTools::Init2DArray(m_numThreads, m_iGenes, (utype)0);
 	
-	m_rank_normal_threads = new vector<ushort>[m_numThreads];
-	m_rank_threads = new vector<ushort>[m_numThreads];
+	m_rank_normal_threads = new vector<utype>[m_numThreads];
+	m_rank_threads = new vector<utype>[m_numThreads];
 
 	for(j=0; j<m_numThreads; j++){
 		m_rank_normal_threads[j].resize(m_iGenes);
 		m_rank_threads[j].resize(m_iGenes);
-		//CSeekTools::InitVector(m_rank_normal_threads[j], m_iGenes, (ushort) 255);
-		//CSeekTools::InitVector(m_rank_threads[j], m_iGenes, (ushort) 255);
+		//CSeekTools::InitVector(m_rank_normal_threads[j], m_iGenes, (utype) 255);
+		//CSeekTools::InitVector(m_rank_threads[j], m_iGenes, (utype) 255);
 	}
 	
 	CSeekTools::InitVector(m_master_rank, m_iGenes, (float) 0);
 	CSeekTools::InitVector(m_sum_weight, m_iGenes, (float) 0);
-	CSeekTools::InitVector(m_counts, m_iGenes, (ushort) 0);
+	CSeekTools::InitVector(m_counts, m_iGenes, (utype) 0);
 	CSeekTools::InitVector(weight, m_iDatasets, (float)0);
 	
 	return true;
 	assert(m_rank_normal_threads!=NULL && m_rank_threads!=NULL);
 
 	//Aggregate into three vectors: m_master_rank, m_counts, m_sum_weight
-	ushort j, k;
+	utype j, k;
 	for(j=0; j<m_numThreads; j++){
 		for(k=0; k<m_iGenes; k++){
 			m_master_rank[k] += m_master_rank_threads[j][k];
 	return true;
 }
 
-bool CSeekCentral::FilterResults(const ushort &iSearchDatasets){
-	ushort j, k;
+bool CSeekCentral::FilterResults(const utype &iSearchDatasets){
+	utype j, k;
 	bool DEBUG = false;
 	if(DEBUG) fprintf(stderr, "Aggregating genes\n");
 
 bool CSeekCentral::Sort(vector<AResultFloat> &final){
 	if(DEBUG) fprintf(stderr, "Sorting genes\n");
 	final.resize(m_iGenes);
-	ushort j;
+	utype j;
 	for(j=0; j<m_iGenes; j++){
 		//fprintf(stderr, "%d %s\n", j, DB.GetGene((size_t) j).c_str());
 		final[j].i = j;
 
 bool CSeekCentral::Display(CSeekQuery &query, vector<AResultFloat> &final){
 	if(DEBUG) fprintf(stderr, "Results:\n");
-	ushort jj, ii;
+	utype jj, ii;
 	const vector<char> &cQuery = query.GetQueryPresence();
 	for(ii=0, jj=0; jj<500; ii++){
 		if(cQuery[final[ii].i]==1) continue;
 	return true;
 }
 
-bool CSeekCentral::Write(const ushort &i){
+bool CSeekCentral::Write(const utype &i){
 	//assume m_bRandom = false
 	char acBuffer[1024];
 	sprintf(acBuffer, "%s/%d.query", m_output_dir.c_str(), i);
 	}
 
 	if(!m_bRandom && m_bOutputText){
-		const vector<ushort> &allRDatasets =
+		const vector<utype> &allRDatasets =
 			m_searchdsetMap[i]->GetAllReverse();
-		ushort iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
+		utype iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
 		vector<vector<string> > vecOutput;
 		vecOutput.resize(2);
 		vecOutput[0] = vector<string>();
 		sprintf(acBuffer, "%s/%d.results.txt", m_output_dir.c_str(), i);
 		vector<AResultFloat> w;
 		w.resize(m_iDatasets);
-		ushort j;
+		utype j;
 		for(j=0; j<m_iDatasets; j++){
 			w[j].i = j;
 			w[j].f = m_weight[i][j];
 
 bool CSeekCentral::Common(CSeekCentral::SearchMode &sm,
 	gsl_rng *rnd, const CSeekQuery::PartitionMode *PART_M,
-	const ushort *FOLD, const float *RATE,
+	const utype *FOLD, const float *RATE,
 	const vector< vector<float> > *providedWeight,
 	const vector< vector<string> > *newGoldStd){
 
-	ushort i, j, d, dd;
+	utype i, j, d, dd;
 	int k; //keeps track of genes (for random case)
-	ushort l; //keeps track of random repetition (for random case)
+	utype l; //keeps track of random repetition (for random case)
 	char acBuffer[1024];
 	
 	m_Query.resize(m_vecstrAllQuery.size());
 			}
 		}
 
-		const vector<ushort> &allRDatasets =
+		const vector<utype> &allRDatasets =
 			m_searchdsetMap[i]->GetAllReverse();
-		ushort iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
+		utype iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
 
 		//fprintf(stderr, "1 %lu\n", CMeta::GetMemoryUsage());
 
 
 		PrepareQuery(m_vecstrAllQuery[i], query);
 		PrepareOneQuery(query, *(m_searchdsetMap[i]), weight);
-		ushort iQuery = query.GetQuery().size();
+		utype iQuery = query.GetQuery().size();
 
 		//fprintf(stderr, "1b %lu\n", CMeta::GetMemoryUsage());
 		
 
 		if(current_sm==ORDER_STATISTICS)
 			m_rank_d = CSeekTools::Init2DArray(iSearchDatasets, m_iGenes,
-				(ushort) 0);
+				(utype) 0);
 
 		//For outputing component weights!
 		vector<float> wc;
 		schedule(dynamic)
 		for(dd=0; dd<iSearchDatasets; dd++){
 			d = allRDatasets[dd];
-			ushort tid = omp_get_thread_num();
+			utype tid = omp_get_thread_num();
 			if(DEBUG) fprintf(stderr, "Dataset %d, %s\n",
 				d, m_vecstrDatasets[d].c_str());
 
 				continue;
 			}
 
-			vector<ushort> this_q;
+			vector<utype> this_q;
 			for(j=0; j<mapQ->GetNumSet(); j++)
 				this_q.push_back(mapQ->GetReverse(j));
 
 				}
 
 				if(weightComponent && current_sm==CV){
-					ushort numFold = query.GetNumFold();
+					utype numFold = query.GetNumFold();
 					float ww;
 					for(j=0; j<numFold; j++){
 						//if((ww=m_vc[d]->GetCVWeight(j))==-1) continue;
 
 			if(DEBUG) fprintf(stderr, "Doing linear combination\n");
 
-			const ushort MIN_REQUIRED = max((ushort) 1, (ushort) (
+			const utype MIN_REQUIRED = max((utype) 1, (utype) (
 				m_fPercentQueryAfterScoreCutOff * this_q.size()));
 			CSeekWeighter::LinearCombine(m_rank_normal_threads[tid], this_q,
 				*m_vc[d], MIN_REQUIRED, m_bSquareZ);
 			if(DEBUG) fprintf(stderr,
 				"Adding contribution of dataset %d to master ranking: %.5f\n", d, w);
 
-			ushort iGeneSet = mapG->GetNumSet();
-			const vector<ushort> &allRGenes = mapG->GetAllReverse();
-			vector<ushort>::const_iterator iterR = allRGenes.begin();
-			vector<ushort>::const_iterator endR = allRGenes.begin() + iGeneSet;
-			vector<ushort> &Rank_Normal = m_rank_normal_threads[tid];
+			utype iGeneSet = mapG->GetNumSet();
+			const vector<utype> &allRGenes = mapG->GetAllReverse();
+			vector<utype>::const_iterator iterR = allRGenes.begin();
+			vector<utype>::const_iterator endR = allRGenes.begin() + iGeneSet;
+			vector<utype> &Rank_Normal = m_rank_normal_threads[tid];
 			float* Master_Rank = &m_master_rank_threads[tid][0];
 			float* Sum_Weight = &m_sum_weight_threads[tid][0];
-			ushort* Counts = &m_counts_threads[tid][0];
+			utype* Counts = &m_counts_threads[tid][0];
 
 			if(current_sm==ORDER_STATISTICS)
 				for(; iterR!=endR; iterR++){
 		int ret; //for system calls
 
 		if(m_bRandom){
-			/*ushort z, cz;
+			/*utype z, cz;
 			for(cz=0, z=0; z<m_iDatasets; z++)
 				if(m_weight[i][z]!=0) 
 					cz++;
 				CSeekTools::WriteArray(acBuffer, wc);
 				vector<vector<string> > vecParts;
 				vecParts.resize(query.GetNumFold());
-				ushort kk;
+				utype kk;
 				string strParts = "";
 				for(j=0; j<query.GetNumFold(); j++){
 					vecParts[j] = vector<string>();
-					const vector<ushort> &vu = query.GetCVQuery(j);
+					const vector<utype> &vu = query.GetCVQuery(j);
 					string s = "";
 					for(kk=0; kk<vu.size(); kk++){
 						vecParts[j].push_back(m_vecstrGenes[vu[kk]]);
 	return true;
 }
 
-bool CSeekCentral::CheckWeight(const ushort &i){
-	ushort j = 0;
+bool CSeekCentral::CheckWeight(const utype &i){
+	utype j = 0;
 	bool valid = false;
 	for(j=0; j<m_iDatasets; j++){
 		if(m_weight[i][j]!=0){
 }
 
 bool CSeekCentral::CopyTopGenes(CSeekQuery &csq, 
-	const vector<AResultFloat> &src, const ushort top){
-	ushort i, j;
-	vector<ushort> topGenes;
+	const vector<AResultFloat> &src, const utype top){
+	utype i, j;
+	vector<utype> topGenes;
 	for(i=0; i<top; i++){
 		if(src[i].f==-320) continue;
 		topGenes.push_back(src[i].i);
 }
 
 bool CSeekCentral::SetQueryScoreNull(const CSeekQuery &csq){
-	ushort j;
-	const vector<ushort> &query = csq.GetQuery();
+	utype j;
+	const vector<utype> &query = csq.GetQuery();
 	for(j=0; j<query.size(); j++){
 		m_master_rank[query[j]] = -320;
 	}
 }
 
 bool CSeekCentral::CVSearch(gsl_rng *rnd, const CSeekQuery::PartitionMode &PART_M,
-	const ushort &FOLD, const float &RATE){
+	const utype &FOLD, const float &RATE){
 	CSeekCentral::SearchMode sm = CV;
 	return CSeekCentral::Common(sm, rnd, &PART_M, &FOLD, &RATE);
 }
 	standard gene-set */
 bool CSeekCentral::CVCustomSearch(const vector< vector<string> > &newGoldStd,
 	gsl_rng *rnd, const CSeekQuery::PartitionMode &PART_M,
-	const ushort &FOLD, const float &RATE){
+	const utype &FOLD, const float &RATE){
 	CSeekCentral::SearchMode sm = CV_CUSTOM;
 	return CSeekCentral::Common(sm, rnd, &PART_M, &FOLD, &RATE,
 		NULL, &newGoldStd);
 bool CSeekCentral::VarianceWeightSearch(){
 	vector<vector<float> > weights;
 	weights.resize(m_vecstrAllQuery.size());
-	ushort i, j, k;
+	utype i, j, k;
 	for(i=0; i<m_vecstrAllQuery.size(); i++){
 		weights[i] = vector<float>();
 		CSeekTools::InitVector(weights[i], m_iDatasets, (float)0);
-		const vector<ushort> &allRDatasets =
+		const vector<utype> &allRDatasets =
 			m_searchdsetMap[i]->GetAllReverse();
-		ushort iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
+		utype iSearchDatasets = m_searchdsetMap[i]->GetNumSet();
 
 		CSeekQuery q;
 		PrepareQuery(m_vecstrAllQuery[i], q);
-		const vector<ushort> &qGenes = q.GetQuery();
+		const vector<utype> &qGenes = q.GetQuery();
 
 		for(j=0; j<iSearchDatasets; j++){
-			ushort d = allRDatasets[j];
+			utype d = allRDatasets[j];
 			for(k=0; k<qGenes.size(); k++){
 				float gv = m_vc[d]->GetGeneVariance(qGenes[k]);
 				if(CMeta::IsNaN(gv)) continue;
 }
 
 bool CSeekCentral::Destruct(){
-	ushort j;
+	utype j;
 	//for(j=0; j<m_iDatasets; j++) m_vc[j]->DeleteQueryBlock();
 	for(j=0; j<m_iDatasets; j++){
 		if(m_vc[j]!=NULL) continue;
 	return m_Query;
 }
 
-ushort CSeekCentral::GetGene(const string &strGene) const{
+utype CSeekCentral::GetGene(const string &strGene) const{
 	if(m_mapstrintGene.find(strGene)==m_mapstrintGene.end())
 		return CSeekTools::GetNaN();
 	return m_mapstrintGene.find(strGene)->second;
 }
-string CSeekCentral::GetGene(const ushort &geneID) const{
+string CSeekCentral::GetGene(const utype &geneID) const{
 	return m_vecstrGenes[(size_t) geneID];
 }
 

src/seekcentral.h

 		const vector<CSeekDBSetting*> &vecDBSetting,
 		const char *search_dset, const char *query,
 		const char* output_dir,
-		const ushort buffer = 20, const bool to_output_text = false,
+		const utype buffer = 20, const bool to_output_text = false,
 		const bool bOutputWeightComponent = false, const bool bSimulateWeight = false,
 		const enum CSeekDataset::DistanceMeasure dist_measure = CSeekDataset::Z_SCORE,
 		const bool bSubtractAvg = true, const bool bNormPlatform = false,
      */
 	bool Initialize(
 		const vector<CSeekDBSetting*> &vecDBSetting,
-		const ushort buffer = 20, const bool to_output_text = false,
+		const utype buffer = 20, const bool to_output_text = false,
 		const bool bOutputWeightComponent = false, const bool bSimulateWeight = false,
 		const enum CSeekDataset::DistanceMeasure dist_measure = CSeekDataset::Z_SCORE,
 		const bool bSubtractAvg = true, const bool bNormPlatform = false,
 	 * \remark The random number generator is used for partitioning the query.
 	 * \remark Assumes that the CSeekCentral::Initialize() has been called.
 	 */
-	bool CVSearch(gsl_rng*, const CSeekQuery::PartitionMode&, const ushort&, const float&);
+	bool CVSearch(gsl_rng*, const CSeekQuery::PartitionMode&, const utype&, const float&);
 
 	/*!
 	 * \brief Run Seek with the custom dataset weighting
 	 * \remark Assumes that the CSeekCentral::Initialize() has been called.
 	 */
 	bool CVCustomSearch(const vector< vector<string> > &, gsl_rng*,
-		const CSeekQuery::PartitionMode&, const ushort&, const float&);
+		const CSeekQuery::PartitionMode&, const utype&, const float&);
 
 	/*!
 	 * \brief Run Seek with the equal dataset weighting
 	 * \param strGene The \a gene-name as a \c string
 	 * \return The gene-map ID
 	 */
-	ushort GetGene(const string &strGene) const;
+	utype GetGene(const string &strGene) const;
 
 	/*!
 	 * \brief Get the \a gene-name for a given gene-map ID
 	 * \param geneID The gene-map ID
 	 * \return The \a gene-name as a \c string
 	 */
-	string GetGene(const ushort &geneID) const;
+	string GetGene(const utype &geneID) const;
 
 	/*!
 	 * \brief Destruct this search instance
 	/* Central search function */
 	bool Common(CSeekCentral::SearchMode&, gsl_rng* = NULL,
 		const CSeekQuery::PartitionMode* = NULL,
-		const ushort* = NULL, const float* = NULL,
+		const utype* = NULL, const float* = NULL,
 		const vector< vector<float> >* = NULL,
 		const vector< vector<string> >* = NULL);
 
-	bool CheckWeight(const ushort &i);
+	bool CheckWeight(const utype &i);
 	bool CopyTopGenes(CSeekQuery&, const vector<AResultFloat>&, 
-		const ushort);
+		const utype);
 	bool SetQueryScoreNull(const CSeekQuery&);
 	bool PrepareQuery(const vector<string>&, CSeekQuery&);
 	bool CalculateRestart();
 	bool PrepareOneQuery(CSeekQuery &, CSeekIntIntMap &, vector<float>&);
 	bool AggregateThreads();
-	bool FilterResults(const ushort &);
+	bool FilterResults(const utype &);
 	bool Sort(vector<AResultFloat> &);
-	bool Write(const ushort &);
+	bool Write(const utype &);
 	bool Display(CSeekQuery &, vector<AResultFloat>&);
 
 	/* Gene, Dataset, and Platform Mapping*/
 	vector<string> m_vecstrDatasets;
 	vector<string> m_vecstrDP;
 	map<string, string> m_mapstrstrDatasetPlatform;
-	map<string, ushort> m_mapstrintDataset;
-	map<string, ushort> m_mapstrintGene;
+	map<string, utype> m_mapstrintDataset;
+	map<string, utype> m_mapstrintGene;
 	vector<vector<string> > m_vecstrSearchDatasets;
 	vector<CSeekIntIntMap*> m_searchdsetMap;
 
 
 	/* Gene-gene correlation matrix for all datasets
 	 Organized per thread */
-	ushort ***m_rData;
+	utype ***m_rData;
 
 	/* Correlation discretization */
 	vector<float> m_quant;
 	/* multi-threaded programming */
 	float **m_master_rank_threads;
 	float **m_sum_weight_threads;
-	ushort **m_counts_threads;
-	vector<ushort> *m_rank_normal_threads;
-	vector<ushort> *m_rank_threads;
+	utype **m_counts_threads;
+	vector<utype> *m_rank_normal_threads;
+	vector<utype> *m_rank_threads;
 
 	/* Essential search results */
 	vector<float> m_master_rank;
 	vector<float> m_sum_weight;
-	vector<ushort> m_counts;
+	vector<utype> m_counts;
 
 	/* Holds results for all queries */
 	vector< vector<float> > m_weight;
 
 	/* Platform */
 	vector<CSeekPlatform> m_vp;
-	map<string, ushort> m_mapstriPlatform;
+	map<string, utype> m_mapstriPlatform;
 	vector<string> m_vecstrPlatform;
 
 	//CDatabase reference
 
 	size_t m_iDatasets;
 	size_t m_iGenes;
-	ushort m_numThreads;
+	utype m_numThreads;
 
-	ushort m_maxNumDB;
-	map<ushort, vector< vector<string> > > m_mapLoadTime;
+	utype m_maxNumDB;
+	map<utype, vector< vector<string> > > m_mapLoadTime;
 	bool DEBUG;
 
 	bool m_bOutputWeightComponent;
 	float m_fPercentQueryAfterScoreCutOff;
 
 	/* for order statistics, a datasets-by-genes matrix */
-	ushort **m_rank_d;
+	utype **m_rank_d;
 
 	/* for network mode */
 	int m_iClient;

src/seekdataset.cpp

 	if(src->geneAverage.size()>0){
 		//fprintf(stderr, "Great a!\n");
 		geneAverage.resize(src->geneAverage.size());
-		ushort i;
+		utype i;
 		for(i=0; i<src->geneAverage.size(); i++){
 			geneAverage[i] = src->geneAverage[i];
 		}
 	if(src->genePresence.size()>0){
 		//fprintf(stderr, "Great b!\n");
 		genePresence.resize(src->genePresence.size());
-		ushort i;
+		utype i;
 		for(i=0; i<src->genePresence.size(); i++){
 			genePresence[i] = src->genePresence[i];
 		}
 	}
 	if(src->geneVariance.size()>0){
 		geneVariance.resize(src->geneVariance.size());
-		ushort i;
+		utype i;
 		for(i=0; i<src->geneVariance.size(); i++){
 			geneVariance[i] = src->geneVariance[i];
 		}
 		iNumGenes = 0;
 	}
 
-	ushort iSize = genePresence.size();
+	utype iSize = genePresence.size();
 	iNumGenes = iSize;
 	geneMap = new CSeekIntIntMap(src->geneMap);
 
 		cerr << "Gene average or gene presence unread" << endl;
 		return false;
 	}
-	ushort i;
-	ushort iSize = genePresence.size();
+	utype i;
+	utype iSize = genePresence.size();
 	iNumGenes = iSize;
 	geneMap = new CSeekIntIntMap(iSize);
 	vector<char>::const_iterator iterGenePresence = genePresence.begin();
 }
 
 
-bool CSeekDataset::InitializeQueryBlock(const vector<ushort> &queryBlock){
+bool CSeekDataset::InitializeQueryBlock(const vector<utype> &queryBlock){
 
 	DeleteQueryBlock();
 
 	dbMap = new CSeekIntIntMap(iNumGenes);
 
-	vector<ushort>::const_iterator iterQ = queryBlock.begin();
+	vector<utype>::const_iterator iterQ = queryBlock.begin();
 	for(; iterQ!=queryBlock.end(); iterQ++){
 		if(CSeekTools::IsNaN(geneMap->GetForward(*iterQ))){
 			//this query gene is not present in the dataset
 	//fprintf(stderr, "0x1 %lu %d %d\n", CMeta::GetMemoryUsage(), iDBSize, iNumGenes);
 	r = CSeekTools::Init2DArray(iDBSize, iNumGenes, (unsigned char) 255);
 	//fprintf(stderr, "0x2 %lu\n", CMeta::GetMemoryUsage());
-	//CSeekTools::Free2DArray((unsigned short**)r);
 	//free(r[0]);
 	//free(r);
 	//fprintf(stderr, "0x3 %lu\n", CMeta::GetMemoryUsage());
 }
 
 
-bool CSeekDataset::InitializeQuery(const vector<ushort> &query){
+bool CSeekDataset::InitializeQuery(const vector<utype> &query){
 	DeleteQuery();
 
 	if(iDBSize==0 || dbMap==NULL) return true;
 
 	queryMap = new CSeekIntIntMap(iNumGenes);
 
-	ushort i;
-	vector<ushort>::const_iterator iterQ = query.begin();
+	utype i;
+	vector<utype>::const_iterator iterQ = query.begin();
 
 	vector<AResult> a;
 	a.resize(query.size());
 	return true;
 }
 
-const vector<ushort>& CSeekDataset::GetQuery() const{
+const vector<utype>& CSeekDataset::GetQuery() const{
 	return this->query;
 }
 
-const vector<ushort>& CSeekDataset::GetQueryIndex() const{
+const vector<utype>& CSeekDataset::GetQueryIndex() const{
 	return this->queryIndex;
 }
 
-ushort** CSeekDataset::GetDataMatrix(){
+utype** CSeekDataset::GetDataMatrix(){
 	return rData;
 }
 
-bool CSeekDataset::InitializeDataMatrix(ushort **rD,
-	const vector<float> &quant, const ushort &iRows,
-	const ushort &iColumns, const bool bSubtractAvg,
+bool CSeekDataset::InitializeDataMatrix(utype **rD,
+	const vector<float> &quant, const utype &iRows,
+	const utype &iColumns, const bool bSubtractAvg,
 	const bool bNormPlatform, const bool logit,
 	const enum CSeekDataset::DistanceMeasure dist_measure,
 	const float cutoff, 
 		return false;
 	}
 
-	ushort i, j;
+	utype i, j;
 	rData = rD;
-	ushort ii;
-	ushort iNumGenes = geneMap->GetNumSet();
-	ushort iNumQueries = iQuerySize;
+	utype ii;
+	utype iNumGenes = geneMap->GetNumSet();
+	utype iNumQueries = iQuerySize;
 
 	//fprintf(stderr, "iNumQueries is %d\n", iNumQueries);
 	//iRows is the gene id, iColumns is the query id
 	//default value for all rData entries is 0
-	memset(&rData[0][0], 0, sizeof(ushort)*iRows*iColumns);
+	memset(&rData[0][0], 0, sizeof(utype)*iRows*iColumns);
 
 	//assume queryIndex is already sorted
-	vector<ushort> offset;
+	vector<utype> offset;
 	offset.push_back(0);
 	for(i=1; i<queryIndex.size(); i++)
 		offset.push_back(queryIndex[i] - queryIndex[i-1]);
 
 			//GetColumns() is numQuery
 			for(j=0; j<iNumQueries; j++){
-				ushort jj = this->query[j];
+				utype jj = this->query[j];
 				platform_avg[j] = platform->GetPlatformAvg(jj);
 				platform_stdev[j] = platform->GetPlatformStdev(jj);
 			}
 
-			const vector<ushort> &allRGenes = geneMap->GetAllReverse();
+			const vector<utype> &allRGenes = geneMap->GetAllReverse();
 			float a = 0;
 			float vv = 0;
 			unsigned char x = 0;
-			ushort iGeneMapSize = geneMap->GetNumSet();
+			utype iGeneMapSize = geneMap->GetNumSet();
 			if(logit){ //NOT checked
 				for(ii=0; ii<iGeneMapSize; ii++){
 					for(j=0, i = allRGenes[ii], a=GetGeneAverage(i);
 						vv = max((float) min(vv, (float)3.2), (float)-3.2);
 						//By default, cutoff = -nan (i.e., always true)
 						if(vv>cutoff){
-							rData[i][j]= (ushort) (vv*100.0) + 320;
+							rData[i][j]= (utype) (vv*100.0) + 320;
 							//fprintf(stderr, "%.5f %.5f %.5f %.5f\n", quant[x], vv, a, platform_avg[j]);
 						}else{
 							rData[i][j] = 0;
 							/ platform_stdev[j];
 						vv = max((float) min(vv, (float)3.2), (float)-3.2);
 						if(vv>cutoff){
-							rData[i][j]= (ushort) (vv*100.0) + 320;
+							rData[i][j]= (utype) (vv*100.0) + 320;
 							//fprintf(stderr, "r %.2f\n", quant[x]);
 						}else{
 							rData[i][j]= 0;
 						vv = max((float)-3.2, (float)min((float) 3.2, (float) vv));
 						//fprintf(stderr, "%.5f %.5f %.5f\n", quant[x], vv, a);
 						if(vv>cutoff){
-							rData[i][j]= (ushort) (vv*100.0) + 320;
+							rData[i][j]= (utype) (vv*100.0) + 320;
 						}else{
 							rData[i][j] = 0;
 						}
 						vv = quant[x] - a;
 						vv = max((float) min((float)vv, (float)3.2), (float)-3.2);
 						if(vv>cutoff){
-							rData[i][j]= (ushort) (vv*100.0) + 320;
+							rData[i][j]= (utype) (vv*100.0) + 320;
 						}else{
 							rData[i][j] = 0;
 						}
 				float vv = log(quant[x]) - log((float) 1.0 - quant[x]);
 				vv = max((float) min(vv, (float)3.2), (float)-3.2);
 				if(vv>cutoff){
-					rData[i][j] = (ushort) (vv*100.0) + 320;
+					rData[i][j] = (utype) (vv*100.0) + 320;
 				}else{
 					rData[i][j] = 0;
 				}
 								   //precision, should put value to range 
 								   //(-3.0 to 3.0)
 					vv = max((float) min(vv, (float)3.2), (float)-3.2);
-					rData[i][j] = (ushort) (vv*100.0) + 320;
+					rData[i][j] = (utype) (vv*100.0) + 320;
 				}else{
-					rData[i][j] = (ushort) 0; 	//default value for 
+					rData[i][j] = (utype) 0; 	//default value for 
 												//not meeting cutoff
 				}
 			}
 
 				vv = max((float) min(vv, (float)3.2), (float)-3.2);
 				if(vv>cutoff){
-					rData[i][j] = (ushort) (vv*100.0) + 320;
+					rData[i][j] = (utype) (vv*100.0) + 320;
 				}else{
 					rData[i][j] = 0;
 				}
 	}
 
 	if(bRandom){
-		vector<vector<ushort> > allRandom;
+		vector<vector<utype> > allRandom;
 		allRandom.resize(iNumQueries);
 		for(i=0; i<iNumQueries; i++){
-			allRandom[i] = vector<ushort>();
+			allRandom[i] = vector<utype>();
 		}
 
 		for(ii=0; ii<iNumGenes; ii++){
 			}
 		}
 
-		ushort **a = CSeekTools::Init2DArray(iNumQueries, max_size, (ushort)0);
+		utype **a = CSeekTools::Init2DArray(iNumQueries, max_size, (utype)0);
 
 		for(i=0; i<iNumQueries; i++){
 			for(j=0; j<allRandom[i].size(); j++){
 				a[i][j] = allRandom[i][j];
 			}
-			gsl_ran_shuffle(rand, a[i], allRandom[i].size(), sizeof(ushort));
+			gsl_ran_shuffle(rand, a[i], allRandom[i].size(), sizeof(utype));
 		}
 
 		vector<int> k;
 	return m_fDsetStdev;
 }
 
-float CSeekDataset::GetGeneVariance(const ushort &i) const{
+float CSeekDataset::GetGeneVariance(const utype &i) const{
 	return geneVariance[i];
 }
 
-float CSeekDataset::GetGeneAverage(const ushort &i) const{
+float CSeekDataset::GetGeneAverage(const utype &i) const{
 	return geneAverage[i];
 }
 
-ushort CSeekDataset::GetNumGenes() const{
+utype CSeekDataset::GetNumGenes() const{
 	return iNumGenes;
 }
 
-bool CSeekDataset::InitializeCVWeight(const ushort &i){
+bool CSeekDataset::InitializeCVWeight(const utype &i){
 	weight.clear();
 	weight.resize(i);
 	sum_weight = -1;
 	return true;
 }
 
-bool CSeekDataset::SetCVWeight(const ushort &i, const float &f){
+bool CSeekDataset::SetCVWeight(const utype &i, const float &f){
 	weight[i] = f;
 	return true;
 }
 
-float CSeekDataset::GetCVWeight(const ushort &i){
+float CSeekDataset::GetCVWeight(const utype &i){
 	return weight[i];
 }
 
 }
 
 float CSeekDataset::GetDatasetSumWeight(){
-	ushort i;
-	ushort num = 0;
+	utype i;
+	utype num = 0;
 	if(sum_weight==-1){
 		for(sum_weight=0, i=0; i<weight.size(); i++){
 			if(weight[i]==-1) continue;

src/seekdataset.h

 		const string &sinfo, const string &plat,
 		const string &prep, const string &db,
 		const string &gene, const string &quant,
-		const string &dset, const ushort &numDB){
+		const string &dset, const utype &numDB){
 		m_gvarDirectory = gvar;
 		m_sinfoDirectory = sinfo;
 		m_platformDirectory = plat;
 		const char* sinfo, const char* plat,
 		const char* prep, const char* db,
 		const char* gene, const char* quant,
-		const char* dset, const ushort &numDB){
+		const char* dset, const utype &numDB){
 		m_gvarDirectory = gvar;
 		m_sinfoDirectory = sinfo;
 		m_platformDirectory = plat;
 			return "NULL";
 	}
 
-	ushort GetNumDB(){
+	utype GetNumDB(){
 		return m_numDB;
 	}
 
 	string m_geneMapFile;
 	string m_quantFile;
 	string m_dsetFile;
-	ushort m_numDB;
+	utype m_numDB;
 };
 
 
 	 *
 	 * Indicates which query genes are present in the dataset.
 	 */
-	bool InitializeQuery(const vector<ushort> &);
+	bool InitializeQuery(const vector<utype> &);
 
 	/*!
 	 * \brief
 	 * Flattens all the queries into one vector that contains only the unique query genes, then
 	 * constructs a presence map based on this vector.
 	 */
-	bool InitializeQueryBlock(const vector<ushort> &);
+	bool InitializeQueryBlock(const vector<utype> &);
 
 	/*!
 	 * \brief
 	 * \endcode
 	 * Then if a \a correlation is 2.5, the discretized value would be 2.
 	 */
-	bool InitializeDataMatrix(ushort**, const vector<float> &,
-		const ushort&, const ushort&, const bool=true, 
+	bool InitializeDataMatrix(utype**, const vector<float> &,
+		const utype&, const utype&, const bool=true, 
 		const bool=false, const bool=false,
 		const enum DistanceMeasure=Z_SCORE,
 		const float cutoff=-1.0*CMeta::GetNaN(), 
 	 * \brief
 	 * Get the gene-gene \a correlation matrix
 	 *
-	 * \return A two-dimensional array of type \c ushort. Note that the
+	 * \return A two-dimensional array of type \c utype. Note that the
 	 * \a correlation has been scaled to a integer range from 0 to 640.
 	 * See CSeekDataset::InitializeDataMatrix.
 	 *
 	 */
-	ushort** GetDataMatrix();
+	utype** GetDataMatrix();
 
 	/*!
 	 * \brief
 	 * \brief Get the query genes
 	 * \return A vector of queries
 	 */
-	const vector<ushort>& GetQuery() const;
+	const vector<utype>& GetQuery() const;
 
 	/*!
 	 * \brief Get the query gene indices
 	 * \return A vector of query gene indices
 	 */
-	const vector<ushort>& GetQueryIndex() const;
+	const vector<utype>& GetQueryIndex() const;
 
 	/*!
 	 * \brief Get the gene expression variance vector
 	 * \return The variance vector
 	 */
-	float GetGeneVariance(const ushort&) const;
+	float GetGeneVariance(const utype&) const;
 	/*!
 	 * \brief Get the gene average \a correlation vector
 	 * \return The average \a correlation vector
 	 */
-	float GetGeneAverage(const ushort&) const;
+	float GetGeneAverage(const utype&) const;
 	/*!
 	 * \brief Get the mean of the global gene-gene Pearson distribution
 	 * \return The mean Pearson for the dataset
 	 * \brief Get the genome size
 	 * \return The genome size
 	 */
-	ushort GetNumGenes() const;
+	utype GetNumGenes() const;
 
 	/*!
 	 * \brief Initialize the weight of the dataset
 	 * Initializes the total dataset weight, and the score of the
 	 * individual cross-validation (CV) runs. 
 	 */
-	bool InitializeCVWeight(const ushort&);
+	bool InitializeCVWeight(const utype&);
 
 	/*!
 	 * \brief Set the score for a particular cross-validation
 	 * \param i The index
 	 * \param f The validation score
 	 */
-	bool SetCVWeight(const ushort&, const float&);
+	bool SetCVWeight(const utype&, const float&);
 
 	/*!
 	 * \brief Get the score for a particular cross-validation
 	 * \param i The index
 	 */
-	float GetCVWeight(const ushort&);
+	float GetCVWeight(const utype&);
 
 	/*!
 	 * \brief Get all the cross-validation scores
 
 	CSeekIntIntMap *dbMap;
 	CSeekIntIntMap *queryMap;
-	vector<ushort> query;
-	vector<ushort> queryIndex;
+	vector<utype> query;
+	vector<utype> queryIndex;
 
-	ushort iQuerySize;
-	ushort iNumGenes;
-	ushort iDBSize;
+	utype iQuerySize;
+	utype iNumGenes;
+	utype iDBSize;
 
 	vector<float> weight;
 
-	ushort **rData;
+	utype **rData;
 	unsigned char **r;
 
 	float sum_weight;

src/seekevaluate.cpp

 namespace Sleipnir {
 
 bool CSeekPerformanceMeasure::SortRankVector(
-	const vector<unsigned short> &rank,
+	const vector<utype> &rank,
 	const CSeekIntIntMap &mapG, vector<AResult> &a,
 	//optional argument
-	const ushort top){
+	const utype top){
 
-	ushort numGenesD = mapG.GetNumSet();
-	ushort TOP = 0;
-	ushort numNonZero = 0;
-	ushort i;
+	utype numGenesD = mapG.GetNumSet();
+	utype TOP = 0;
+	utype numNonZero = 0;
+	utype i;
 	bool DEBUG = false;
 
 	//a should be the same size as rank
 	if(top==0) TOP = rank.size();
 	else TOP = top;
 
-	vector<ushort>::const_iterator itRank = rank.begin();
+	vector<utype>::const_iterator itRank = rank.begin();
 	vector<AResult>::iterator itA = a.begin();
 	for(i = 0; itRank!=rank.end(); itRank++, itA++, i++){
 		itA->i = i;
 /* designed specifically for a CSeekDataset */
 /* mask: the query genes which are not included in RBP calcualtion */
 bool CSeekPerformanceMeasure::RankBiasedPrecision(const float &rate,
-	const vector<unsigned short> &rank, float &rbp,
+	const vector<utype> &rank, float &rbp,
 	const vector<char> &mask, const vector<char> &gold,
 	const CSeekIntIntMap &mapG, vector<AResult> *ar,
 	/* optional arguments */
-	const ushort top){
+	const utype top){
 
-	ushort i, ii, j, jj;
+	utype i, ii, j, jj;
 	float x;