Qian Zhu avatar Qian Zhu committed 49b603a Merge

Merge

Comments (0)

Files changed (4)

configure.incendio

 	--with-smile=/Genomics/fgrid/function/sleipnir-extlib/smile_linux_x64_gcc_4_4_5/ \
 	--with-svm-perf=/Genomics/fgrid/function/sleipnir-extlib/svm_perf/ \
 	--with-boost-includes=/Genomics/fgrid/function/sleipnir-extlib/boost/ \
-    --prefix=/home/arjunk/software/sleipnir/ \
+	--with-libsvm=/Genomics/Users/youngl/lib/libsvm-3.17/ \
+    --prefix=/home/youngl/software/sleipnir/ \ 
 	LDFLAGS=-static
-	# --with-boost-graph-lib=/usr/lib/libboost_graph.a \
-	#  --with-vowpal-wabbit=../extlib/vowpal_wabbit_v4.1/ \
 # CXXFLAGS=-fno-threadsafe-statics
 		}
 	}
 
-	delete ppDoc;
+	delete[] ppDoc;
 	return vecResult;
 }
 

tools/NetworkCombiner/NetworkCombiner.cpp

 /*****************************************************************************
-* This file is provided under the Creative Commons Attribution 3.0 license.
-*
-* You are free to share, copy, distribute, transmit, or adapt this work
-* PROVIDED THAT you attribute the work to the authors listed below.
-* For more information, please see the following web page:
-* http://creativecommons.org/licenses/by/3.0/
-*
-* This file is a component of the Sleipnir library for functional genomics,
-* authored by:
-* Curtis Huttenhower (chuttenh@princeton.edu)
-* Mark Schroeder
-* Maria D. Chikina
-* Olga G. Troyanskaya (ogt@princeton.edu, primary contact)
-*
-* If you use this library, the included executable tools, or any related
-* code in your work, please cite the following publication:
-* Curtis Huttenhower, Mark Schroeder, Maria D. Chikina, and
-* Olga G. Troyanskaya.
-* "The Sleipnir library for computational functional genomics"
-*****************************************************************************/
+ * This file is provided under the Creative Commons Attribution 3.0 license.
+ *
+ * You are free to share, copy, distribute, transmit, or adapt this work
+ * PROVIDED THAT you attribute the work to the authors listed below.
+ * For more information, please see the following web page:
+ * http://creativecommons.org/licenses/by/3.0/
+ *
+ * This file is a component of the Sleipnir library for functional genomics,
+ * authored by:
+ * Curtis Huttenhower (chuttenh@princeton.edu)
+ * Mark Schroeder
+ * Maria D. Chikina
+ * Olga G. Troyanskaya (ogt@princeton.edu, primary contact)
+ *
+ * If you use this library, the included executable tools, or any related
+ * code in your work, please cite the following publication:
+ * Curtis Huttenhower, Mark Schroeder, Maria D. Chikina, and
+ * Olga G. Troyanskaya.
+ * "The Sleipnir library for computational functional genomics"
+ *****************************************************************************/
 #include "stdafx.h"
 #include <iostream>
 #include <fstream>
 
 float logit(float x)
 {
-     if(x == 0.0)//min float number
-	return -15;
-	//return -std::numeric_limits<float>::max();
-	//return std::numeric_limits<float>::min();
-     if(x == 1.0)//max float number
-	return 15;
-	//return std::numeric_limits<float>::max();
+  if(x == 0.0)//min float number
+    return -15;
+  //return -std::numeric_limits<float>::max();
+  //return std::numeric_limits<float>::min();
+  if(x == 1.0)//max float number
+    return 15;
+  //return std::numeric_limits<float>::max();
 
-     return log( x / ( 1 - x ) );
+  return log( x / ( 1 - x ) );
 }
 
 float sigmoid(float x)
 {
-     float exp_value;
-     float return_value;
+  float exp_value;
+  float return_value;
 
-     /*** Exponential calculation ***/
-     exp_value = exp((double) -x);
+  /*** Exponential calculation ***/
+  exp_value = exp((double) -x);
 
-     /*** Final sigmoid value ***/
-     return_value = 1 / (1 + exp_value);
+  /*** Final sigmoid value ***/
+  return_value = 1 / (1 + exp_value);
 
-     return return_value;
+  return return_value;
 }
 
 float equal_prior(float x, size_t p, size_t n)
 {
-     float logit_value;
-     float return_value;
+  float logit_value;
+  float return_value;
 
-     logit_value = logit(x) + log( ((float) p) / n); // - log(n/p) ... subtract prior ... in effect give equal prior to 
-     return_value = sigmoid(logit_value);
+  logit_value = logit(x) + log( ((float) p) / n); // - log(n/p) ... subtract prior ... in effect give equal prior to 
+  return_value = sigmoid(logit_value);
 
-     return return_value;
+  return return_value;
+}
+
+inline bool exists (const std::string& name) {
+  struct stat buffer;   
+  return (stat (name.c_str(), &buffer) == 0); 
 }
 
 
 int main( int iArgs, char** aszArgs ) {
 
-//	cout << sigmoid(logit(0.999999)) << endl;
-//	cout << sigmoid(logit(0.9)) << endl;
-//	cout << sigmoid(logit(0.1)) << endl;
-//	cout << sigmoid(logit(1)) << endl;
-//	cout << sigmoid(logit(0)) << endl;
+  //	cout << sigmoid(logit(0.999999)) << endl;
+  //	cout << sigmoid(logit(0.9)) << endl;
+  //	cout << sigmoid(logit(0.1)) << endl;
+  //	cout << sigmoid(logit(1)) << endl;
+  //	cout << sigmoid(logit(0)) << endl;
 
 
-	gengetopt_args_info	sArgs;
-	int					iRet;
-	size_t				i, j, k, l;
-	float d;
-	DIR* dp;
-	struct dirent* ep;
-	CDat					DatOut, DatCur;
-	vector<size_t>				veciGenesCur;	
+  gengetopt_args_info	sArgs;
+  int					iRet;
+  size_t				i, j, k, l;
+  float d;
+  DIR* dp;
+  struct dirent* ep;
+  CDat					DatOut, DatCur;
+  vector<size_t>				veciGenesCur;	
 
-	// store all input filenames
-	vector<string> input_files;
-	string weight_filename;
+  // context weight filename
+  std::string weight_filename;
+  std::vector<string> input_files;
+  std::string dab_dir;
 
-	// store prior information
-	vector<size_t> vpos;
-	vector<size_t> vneg;
+  // store prior information
+  vector<size_t> vpos;
+  vector<size_t> vneg;
 
-        // read context weights
-        std::string line;
-        std::vector<std::string> terms;
-        std::map<std::string,double> term2weight;
-        double weight_sum = 0.0;
+  // read context weights
+  std::string line;
+  std::vector<std::string> terms;
+  std::map<std::string,double> term2weight;
+  double weight_sum = 0.0;
 
 
-	if( cmdline_parser( iArgs, aszArgs, &sArgs ) ) {
-		cmdline_parser_print_help( );
-		return 1; }
-	CMeta Meta( sArgs.verbosity_arg );
-	
-	//int type = sArgs.type_arg ; // type of combiner
-	
-	// now collect the data files from directory if given
-	if(sArgs.directory_arg){
-	  dp = opendir (sArgs.directory_arg);
-	  if (dp != NULL){
-	    while (ep = readdir (dp)){
-	      // skip . .. files and temp files with ~
-	      if (ep->d_name[0] == '.' || ep->d_name[strlen(ep->d_name)-1] == '~') 
-		continue;
-	      
-	      // currently opens all files. Add filter here if want pick file extensions
-	      input_files.push_back((string)sArgs.directory_arg + "/" + ep->d_name);	      
-	    }
-	    (void) closedir (dp);	    
-	  }
-	  else{
-	    cerr << "Couldn't open the directory: " << sArgs.directory_arg << '\n';
-	    return 1;
-	  }
-	}
+  if( cmdline_parser( iArgs, aszArgs, &sArgs ) ) {
+    cmdline_parser_print_help( );
+    return 1; }
+    CMeta Meta( sArgs.verbosity_arg );
 
-	if(sArgs.prior_arg){// only subtract prior only if count directory given
-	    std::string term;
-	    std::string count_fn;
-            unsigned found;
-            std::string line;
-	    size_t posnum;
-	    size_t negnum;
-	    for( i = 0 ; i < input_files.size( ) ; i++ ){
-		found = input_files[i].rfind("/");
-		term = input_files[i].substr(found+1);
-		found = term.rfind(".");
-		term = term.replace(found+1,3,"txt");
-		count_fn = std::string(sArgs.prior_arg) + term;
-		ifstream file (count_fn.c_str());
-		if (file.is_open()){
-		    getline (file,line);//dummy line
-		    getline (file,line);
-		}
-//		std::cout <<  << endl;
-		sscanf(line.c_str(),"%d\t%d",&posnum,&negnum);
-//cout << term << endl;
-//cout << posnum << endl;
-//cout << negnum << endl;
-		vpos.push_back( posnum );
-		vneg.push_back( negnum );
-	    }
-	}
+    //int type = sArgs.type_arg ; // type of combiner
 
+    // check if directory valid
+    if(sArgs.directory_arg){
+      dp = opendir (sArgs.directory_arg);
+      if (dp != NULL){
+        (void) closedir (dp);	    
+        dab_dir = sArgs.directory_arg;
+      }
+      else{
+        cerr << "Couldn't open the directory: " << sArgs.directory_arg << '\n';
+        return 1;
+      }
+    }
 
+    if(sArgs.weights_arg){ // only if weight file given..
+      weight_filename = sArgs.weights_arg;
+      std::ifstream weight_file(weight_filename.c_str());
+      cout << "Using weight file: " << weight_filename << endl;
+      // read context weights
+      if(weight_file.is_open())
+      {
+        while(weight_file.good())
+        {
+          std::string term;
+          double weight;
 
-	if(sArgs.weights_arg){ // only if weight file given..
-	weight_filename = sArgs.weights_arg;
-        std::ifstream weight_file(weight_filename.c_str());
-	cout << "Using weight file: " << weight_filename << endl;
-	// read context weights
-        if(weight_file.is_open())
-        {
-          while(weight_file.good())
-          {
-	    std::string term;
-	    double weight;
-	
-	    weight_file >> term >> weight;
-	    //weight = abs(weight);
-	    //cout << term << "\t" << weight << endl;
-	    if(term.length() < 1){
-              continue;
-	    }
-	    if(weight <= 0.1){
-	      continue;
-	    }
-	cout << term << endl;
-	    terms.push_back(term);
-	    term2weight[term] = weight;
-	    weight_sum += weight;
+          weight_file >> term >> weight;
+          //weight = abs(weight);
+          //cout << term << "\t" << weight << endl;
+          if(term.length() < 1){
+            continue;
           }
-          weight_file.close();
+          if(weight <  0.0){//ignore weights less than zero
+            continue;
+          }
+          cout << term << endl;
+          terms.push_back(term);
+          term2weight[term] = weight;
+          weight_sum += weight;
         }
-	
-	cout << "The number of contexts with weight: " << terms.size() << endl;	
+        weight_file.close();
+      }
 
-	// read only networks/contexts with weights
-	std::vector<string> subset_dab;
-	std::vector<string> subset_terms;
-	for( i = 0; i < input_files.size( ); ++i) {
-	  for( j = 0; j < terms.size( ); ++j) {
-	    unsigned int pos = input_files[i].find(terms[j]);
-	    if(pos < input_files[i].length()){
-		//cout << pos << endl;
-		subset_dab.push_back(input_files[i]);
-		subset_terms.push_back(terms[j]);
-		break;
-	    }
-	    //cout << pos << endl;
-	  }
-	}
-	input_files = subset_dab;
-	terms = subset_terms;
-	}else{
-	  //TODO: temporary hacking based on implementation ... should improve
-          // sort so that input file and term array are corresponding with index
-	  std::sort(input_files.begin(),input_files.end());
-	  for( i = 0; i < input_files.size( ); ++i){
-            terms.push_back(input_files[i]);
-            term2weight[input_files[i]] = 1;
+      cout << "The number of contexts with weight: " << terms.size() << endl;	
+
+      // read only networks/contexts with weights
+      std::vector<string> subset_terms;
+      std::string dabname;
+      for( j = 0; j < terms.size( ); ++j) {
+        dabname = dab_dir + "/" + terms[j] + ".dab";
+        if(exists(dabname)){
+          cout << dabname;
+          input_files.push_back(dabname);
+          subset_terms.push_back(terms[j]);
+        }
+        //cout << pos << endl;
+      }
+      terms = subset_terms;
+    }else{
+
+      dp = opendir (dab_dir.c_str());
+      if (dp != NULL){
+        while (ep = readdir (dp)){
+          // skip . .. files and temp files with ~
+          if (ep->d_name[0] == '.' || ep->d_name[strlen(ep->d_name)-1] == '~') 
+            continue;
+          if (std::string(ep->d_name).substr(strlen(ep->d_name)-4,4).compare(string(".dab")) == 0)
+            input_files.push_back((string)sArgs.directory_arg + "/" + ep->d_name);          
+        }
+        (void) closedir (dp);           
+      }
+
+      //TODO: temporary hacking based on implementation ... should improve
+      // sort so that input file and term array are corresponding with index
+
+      std::sort(input_files.begin(),input_files.end());
+      for( i = 0; i < input_files.size( ); ++i){
+        terms.push_back(input_files[i]);
+        term2weight[input_files[i]] = 1;
+      }
+
+      //std::sort(terms.begin(),terms.end());
+      weight_sum = input_files.size( );
+    }
+
+
+    // now iterate dat/dab networks
+    for( i = 0; i < input_files.size( ); ++i ) {
+
+      // open dat/dab network
+      if( !DatCur.Open( input_files[ i ].c_str() ) ) {
+        cerr << "Couldn't open: " << input_files[ i ] << endl;
+        return 1; }	    
+        cerr << "opened: " << input_files[ i ] << endl;
+
+
+        if( sArgs.prior_arg ){
+          for( j = 0; j < DatCur.GetGenes( ); ++j)
+            for( k = ( j + 1 ); k < DatCur.GetGenes( ); ++k)
+              DatCur.Set( j, k, equal_prior( DatCur.Get( j, k ), vpos[ i ], vneg[ i ] ) );
+
+        }
+
+        if( sArgs.logit_flag ){
+          for( j = 0; j < DatCur.GetGenes( ); ++j)
+            for( k = ( j + 1 ); k < DatCur.GetGenes( ); ++k)
+              DatCur.Set( j, k, logit( DatCur.Get( j, k ) ) );
+
+        }else{
+        }
+
+        if( sArgs.znormalize_flag ){
+          DatCur.Normalize( CDat::ENormalizeZScore );
+        }else{
+        }
+
+        cerr << term2weight[terms[i]] << endl;
+
+
+        // if open first network, we will just add edge weights to this CDat
+        if( i == 0 ){
+          DatOut.Open( DatCur );  
+          for( j = 0; j < DatOut.GetGenes( ); ++j)
+            //cerr << "set " << j << endl;
+
+            for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k){
+              //                DatOut.Set( j, k, DatOut.Get( j, k ) );
+              DatOut.Set( j, k, DatOut.Get( j, k ) * term2weight[terms[i]] );
+            }
+          continue;	 
+        }
+
+        //cerr << "map flag" << endl;	  
+        if( sArgs.map_flag ){
+          // Get gene index match	  
+          veciGenesCur.clear();
+          veciGenesCur.resize(DatOut.GetGenes());
+          for( l = 0; l < DatOut.GetGenes(); l++){
+            veciGenesCur[ l ] = DatCur.GetGene( DatOut.GetGene(l) );
+            if( veciGenesCur[ l ] == -1 ){
+              cerr << "ERROR: missing gene" << input_files[ l ] << endl;
+              return 1;	      
+            }
           }
+        }
 
-	  //std::sort(terms.begin(),terms.end());
-	  weight_sum = input_files.size( );
-	}
+        cerr << "add a network" << endl;
 
+        // now add edges to Dat
+        for( j = 0; j < DatOut.GetGenes( ); ++j )
+          for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k ) {
 
-	// now iterate dat/dab networks
-	for( i = 0; i < input_files.size( ); ++i ) {
-
-	  // open dat/dab network
-	  if( !DatCur.Open( input_files[ i ].c_str() ) ) {
-	    cerr << "Couldn't open: " << input_files[ i ] << endl;
-	    return 1; }	    
-	  cerr << "opened: " << input_files[ i ] << endl;
-	   
-
-	  if( sArgs.prior_arg ){
-            for( j = 0; j < DatCur.GetGenes( ); ++j)
-              for( k = ( j + 1 ); k < DatCur.GetGenes( ); ++k)
-                  DatCur.Set( j, k, equal_prior( DatCur.Get( j, k ), vpos[ i ], vneg[ i ] ) );
-
-	  }
-
-          if( sArgs.logit_flag ){
-            for( j = 0; j < DatCur.GetGenes( ); ++j)
-              for( k = ( j + 1 ); k < DatCur.GetGenes( ); ++k)
-                  DatCur.Set( j, k, logit( DatCur.Get( j, k ) ) );
-                
-          }else{
-	  }
-            
-	  if( sArgs.znormalize_flag ){
-            DatCur.Normalize( CDat::ENormalizeZScore );
-          }else{
-	  }
-
- cerr << term2weight[terms[i]] << endl;
-
-
-          // if open first network, we will just add edge weights to this CDat
-          if( i == 0 ){
-            DatOut.Open( DatCur );  
-            for( j = 0; j < DatOut.GetGenes( ); ++j)
-//cerr << "set " << j << endl;
-
-              for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k){
-//                DatOut.Set( j, k, DatOut.Get( j, k ) );
-		DatOut.Set( j, k, DatOut.Get( j, k ) * term2weight[terms[i]] );
-		}
-	    continue;	 
-	  }
-	  
-//cerr << "map flag" << endl;	  
-	  if( sArgs.map_flag ){
-	    // Get gene index match	  
-	    veciGenesCur.clear();
-	    veciGenesCur.resize(DatOut.GetGenes());
-	    for( l = 0; l < DatOut.GetGenes(); l++){
-	      veciGenesCur[ l ] = DatCur.GetGene( DatOut.GetGene(l) );
-	      if( veciGenesCur[ l ] == -1 ){
-		cerr << "ERROR: missing gene" << input_files[ l ] << endl;
-		return 1;	      
-	      }
-	    }
-	  }
-
-cerr << "add a network" << endl;
-	  
-	  // now add edges to Dat
-	  for( j = 0; j < DatOut.GetGenes( ); ++j )
-	    for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k ) {
-	      
-	      if( sArgs.map_flag ){
-		// we are assuming a fully connected network
-		if( CMeta::IsNaN( d = DatCur.Get( veciGenesCur[ j ], veciGenesCur[ k ] ) ) ){
-		  cerr << d << endl;
-		  cerr << veciGenesCur[ j ] << endl;
-		  cerr << veciGenesCur[ k ] << endl;
-		  cerr << "ERROR: missing values" << input_files[ i ] << endl;
-		  return 1;
-		}
-	      }
-	      else{
-		if( CMeta::IsNaN( d = DatCur.Get(  j, k ) ) ){
-		  cerr << "ERROR: missing values" << input_files[ i ] << endl;
-		  return 1;
-		}
-	      }
-
-//cerr << d << endl;
-//cerr << terms.size() << endl;
-//for(int q = 0 ; q < terms.size() ; q++)
-//  cerr << terms[q] << endl;
-//cerr << terms[i] << endl;
-//cerr << term2weight[terms[i]] << endl;	      
-
-	      DatOut.Set( j, k, DatOut.Get( j, k ) + d * term2weight[terms[i]]) ; //weight edge based on context weight TODO: right now this implementation depends on the order of terms vector and niput_file vector to be correspondingi
-//                DatOut.Set( j, k, DatOut.Get( j, k ) +  d ) ;//
-
-              
-	    }
-	}
-	
-	// now convert sum to mean
-	for( j = 0; j < DatOut.GetGenes( ); ++j )
-	  for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k ){
-	    DatOut.Set( j, k, DatOut.Get( j, k ) / weight_sum );
-
-	    if( sArgs.logit_flag ){// transform back to probability values
-              DatOut.Set( j, k, sigmoid( DatOut.Get( j, k ) ) );
+            if( sArgs.map_flag ){
+              // we are assuming a fully connected network
+              if( CMeta::IsNaN( d = DatCur.Get( veciGenesCur[ j ], veciGenesCur[ k ] ) ) ){
+                cerr << d << endl;
+                cerr << veciGenesCur[ j ] << endl;
+                cerr << veciGenesCur[ k ] << endl;
+                cerr << "ERROR: missing values" << input_files[ i ] << endl;
+                return 1;
+              }
+            }
+            else{
+              if( CMeta::IsNaN( d = DatCur.Get(  j, k ) ) ){
+                cerr << "ERROR: missing values" << input_files[ i ] << endl;
+                return 1;
+              }
             }
 
+            //cerr << d << endl;
+            //cerr << terms.size() << endl;
+            //for(int q = 0 ; q < terms.size() ; q++)
+            //  cerr << terms[q] << endl;
+            //cerr << terms[i] << endl;
+            //cerr << term2weight[terms[i]] << endl;	      
+
+            DatOut.Set( j, k, DatOut.Get( j, k ) + d * term2weight[terms[i]]) ; //weight edge based on context weight TODO: right now this implementation depends on the order of terms vector and niput_file vector to be correspondingi
+            //                DatOut.Set( j, k, DatOut.Get( j, k ) +  d ) ;//
+
+
           }
-	//DatOut.Set( j, k, DatOut.Get( j, k ) / input_files.size( ) );
-        
-	DatOut.Save( sArgs.output_arg );
-	return iRet; 
+    }
+
+    // now convert sum to mean
+    for( j = 0; j < DatOut.GetGenes( ); ++j )
+      for( k = ( j + 1 ); k < DatOut.GetGenes( ); ++k ){
+        DatOut.Set( j, k, DatOut.Get( j, k ) / weight_sum );
+
+        if( sArgs.logit_flag ){// transform back to probability values
+          DatOut.Set( j, k, sigmoid( DatOut.Get( j, k ) ) );
+        }
+
+      }
+    //DatOut.Set( j, k, DatOut.Get( j, k ) / input_files.size( ) );
+
+    DatOut.Save( sArgs.output_arg );
+    return iRet; 
 }

tools/SVMperfer/SVMperfer.cpp

 using namespace SVMLight;
 //#include "../../extlib/svm_light/svm_light/kernel.h"
 
+inline bool file_exists (const std::string& name) {
+    struct stat buffer;   
+    return (stat (name.c_str(), &buffer) == 0); 
+}
+
+vector< pair< string, string > > ReadLabelList(ifstream & ifsm, string output_prefix) {
+  static const size_t c_iBuffer = 1024;
+  char acBuffer[c_iBuffer];
+  vector<string> vecstrTokens;
+  vector< pair < string, string > > inout;
+  while (!ifsm.eof()) {
+    ifsm.getline(acBuffer, c_iBuffer - 1);
+    acBuffer[c_iBuffer - 1] = 0;
+    vecstrTokens.clear();
+    CMeta::Tokenize(acBuffer, vecstrTokens);
+    if (vecstrTokens.empty())
+      continue;
+    if (vecstrTokens.size() != 2) {
+      cerr << "Illegal inout line (" << vecstrTokens.size() << "): "
+        << acBuffer << endl;
+      continue;
+    }
+    
+    if( file_exists( output_prefix + "/" + vecstrTokens[1] ) ){
+      continue;
+    }
+    
+
+    //cout << file_exists( vecstrTokens[1] ) << endl;
+
+    inout.push_back( make_pair( vecstrTokens[0], vecstrTokens[1] ) );
+  }
+  cout << inout.size() << " number of label files." << endl;
+  return inout;
+
+}
+
 vector<SVMLight::SVMLabel> ReadLabels(ifstream & ifsm) {
 
-	static const size_t c_iBuffer = 1024;
-	char acBuffer[c_iBuffer];
-	vector<string> vecstrTokens;
-	vector<SVMLight::SVMLabel> vecLabels;
-	size_t numPositives, numNegatives;
-	numPositives = numNegatives = 0;
-	while (!ifsm.eof()) {
-		ifsm.getline(acBuffer, c_iBuffer - 1);
-		acBuffer[c_iBuffer - 1] = 0;
-		vecstrTokens.clear();
-		CMeta::Tokenize(acBuffer, vecstrTokens);
-		if (vecstrTokens.empty())
-			continue;
-		if (vecstrTokens.size() != 2) {
-			cerr << "Illegal label line (" << vecstrTokens.size() << "): "
-					<< acBuffer << endl;
-			continue;
-		}
-		vecLabels.push_back(SVMLight::SVMLabel(vecstrTokens[0], atof(
-				vecstrTokens[1].c_str())));
-		if (vecLabels.back().Target > 0)
-			numPositives++;
-		else
-			numNegatives++;
-	}
-	return vecLabels;
+  static const size_t c_iBuffer = 1024;
+  char acBuffer[c_iBuffer];
+  vector<string> vecstrTokens;
+  vector<SVMLight::SVMLabel> vecLabels;
+  size_t numPositives, numNegatives;
+  numPositives = numNegatives = 0;
+  while (!ifsm.eof()) {
+    ifsm.getline(acBuffer, c_iBuffer - 1);
+    acBuffer[c_iBuffer - 1] = 0;
+    vecstrTokens.clear();
+    CMeta::Tokenize(acBuffer, vecstrTokens);
+    if (vecstrTokens.empty())
+      continue;
+    if (vecstrTokens.size() != 2) {
+      cerr << "Illegal label line (" << vecstrTokens.size() << "): "
+        << acBuffer << endl;
+      continue;
+    }
+    //cout << vecstrTokens[0] << endl;
+    //cout << vecstrTokens[1] << endl;
+
+
+    vecLabels.push_back(SVMLight::SVMLabel(vecstrTokens[0], atof(
+            vecstrTokens[1].c_str())));
+    if (vecLabels.back().Target > 0)
+      numPositives++;
+    else
+      numNegatives++;
+  }
+
+  cout << numPositives << endl;
+  cout << numNegatives << endl;
+
+  return vecLabels;
 }
 
 struct SortResults {
 
-	bool operator()(const SVMLight::Result& rOne, const SVMLight::Result & rTwo) const {
-		return (rOne.Value > rTwo.Value);
-	}
+  bool operator()(const SVMLight::Result& rOne, const SVMLight::Result & rTwo) const {
+    return (rOne.Value > rTwo.Value);
+  }
 };
 
 size_t PrintResults(vector<SVMLight::Result> vecResults, ofstream & ofsm) {
-	sort(vecResults.begin(), vecResults.end(), SortResults());
-	int LabelVal;
-	for (size_t i = 0; i < vecResults.size(); i++) {
-		ofsm << vecResults[i].GeneName << '\t' << vecResults[i].Target << '\t'
-				<< vecResults[i].Value << endl;
-	}
+  sort(vecResults.begin(), vecResults.end(), SortResults());
+  int LabelVal;
+  for (size_t i = 0; i < vecResults.size(); i++) {
+    ofsm << vecResults[i].GeneName << '\t' << vecResults[i].Target << '\t'
+      << vecResults[i].Value << endl;
+  }
 }
 ;
 
 struct ParamStruct {
-	vector<float> vecK, vecTradeoff;
-	vector<size_t> vecLoss;
-	vector<char*> vecNames;
+  vector<float> vecK, vecTradeoff;
+  vector<size_t> vecLoss;
+  vector<char*> vecNames;
 };
 
 ParamStruct ReadParamsFromFile(ifstream& ifsm, string outFile) {
-	static const size_t c_iBuffer = 1024;
-	char acBuffer[c_iBuffer];
-	char* nameBuffer;
-	vector<string> vecstrTokens;
-	size_t extPlace;
-	string Ext, FileName;
-	if ((extPlace = outFile.find_first_of(".")) != string::npos) {
-		FileName = outFile.substr(0, extPlace);
-		Ext = outFile.substr(extPlace, outFile.size());
-	} else {
-		FileName = outFile;
-		Ext = "";
-	}
-	ParamStruct PStruct;
-	size_t index = 0;
-	while (!ifsm.eof()) {
-		ifsm.getline(acBuffer, c_iBuffer - 1);
-		acBuffer[c_iBuffer - 1] = 0;
-		vecstrTokens.clear();
-		CMeta::Tokenize(acBuffer, vecstrTokens);
-		if (vecstrTokens.empty())
-			continue;
-		if (vecstrTokens.size() != 3) {
-			cerr << "Illegal params line (" << vecstrTokens.size() << "): "
-					<< acBuffer << endl;
-			continue;
-		}
-		if (acBuffer[0] == '#') {
-			cerr << "skipping " << acBuffer << endl;
-		} else {
-			PStruct.vecLoss.push_back(atoi(vecstrTokens[0].c_str()));
-			PStruct.vecTradeoff.push_back(atof(vecstrTokens[1].c_str()));
-			PStruct.vecK.push_back(atof(vecstrTokens[2].c_str()));
-			PStruct.vecNames.push_back(new char[c_iBuffer]);
-			if (PStruct.vecLoss[index] == 4 || PStruct.vecLoss[index] == 5)
-				sprintf(PStruct.vecNames[index], "%s_l%d_c%4.6f_k%4.3f%s",
-						FileName.c_str(), PStruct.vecLoss[index],
-						PStruct.vecTradeoff[index], PStruct.vecK[index],
-						Ext.c_str());
-			else
-				sprintf(PStruct.vecNames[index], "%s_l%d_c%4.6f%s",
-						FileName.c_str(), PStruct.vecLoss[index],
-						PStruct.vecTradeoff[index], Ext.c_str());
-			index++;
-		}
+  static const size_t c_iBuffer = 1024;
+  char acBuffer[c_iBuffer];
+  char* nameBuffer;
+  vector<string> vecstrTokens;
+  size_t extPlace;
+  string Ext, FileName;
+  if ((extPlace = outFile.find_first_of(".")) != string::npos) {
+    FileName = outFile.substr(0, extPlace);
+    Ext = outFile.substr(extPlace, outFile.size());
+  } else {
+    FileName = outFile;
+    Ext = "";
+  }
+  ParamStruct PStruct;
+  size_t index = 0;
+  while (!ifsm.eof()) {
+    ifsm.getline(acBuffer, c_iBuffer - 1);
+    acBuffer[c_iBuffer - 1] = 0;
+    vecstrTokens.clear();
+    CMeta::Tokenize(acBuffer, vecstrTokens);
+    if (vecstrTokens.empty())
+      continue;
+    if (vecstrTokens.size() != 3) {
+      cerr << "Illegal params line (" << vecstrTokens.size() << "): "
+        << acBuffer << endl;
+      continue;
+    }
+    if (acBuffer[0] == '#') {
+      cerr << "skipping " << acBuffer << endl;
+    } else {
+      PStruct.vecLoss.push_back(atoi(vecstrTokens[0].c_str()));
+      PStruct.vecTradeoff.push_back(atof(vecstrTokens[1].c_str()));
+      PStruct.vecK.push_back(atof(vecstrTokens[2].c_str()));
+      PStruct.vecNames.push_back(new char[c_iBuffer]);
+      if (PStruct.vecLoss[index] == 4 || PStruct.vecLoss[index] == 5)
+        sprintf(PStruct.vecNames[index], "%s_l%d_c%4.6f_k%4.3f%s",
+            FileName.c_str(), PStruct.vecLoss[index],
+            PStruct.vecTradeoff[index], PStruct.vecK[index],
+            Ext.c_str());
+      else
+        sprintf(PStruct.vecNames[index], "%s_l%d_c%4.6f%s",
+            FileName.c_str(), PStruct.vecLoss[index],
+            PStruct.vecTradeoff[index], Ext.c_str());
+      index++;
+    }
 
-	}
-	return PStruct;
+  }
+  return PStruct;
 }
 
 int main(int iArgs, char** aszArgs) {
-	gengetopt_args_info sArgs;
+  gengetopt_args_info sArgs;
 
-	CPCL PCL;
-	SVMLight::CSVMPERF SVM;
+  CPCL PCL;
+  SVMLight::CSVMPERF SVM;
 
-	size_t i, j, iGene, jGene;
-	ifstream ifsm;
-	if (cmdline_parser(iArgs, aszArgs, &sArgs)) {
-		cmdline_parser_print_help();
-		return 1;
-	}
-	SVM.SetVerbosity(sArgs.verbosity_arg);
-	SVM.SetLossFunction(sArgs.error_function_arg);
-	if (sArgs.k_value_arg > 1) {
-		cerr << "k_value is >1. Setting default 0.5" << endl;
-		SVM.SetPrecisionFraction(0.5);
-	} else if (sArgs.k_value_arg <= 0) {
-		cerr << "k_value is <=0. Setting default 0.5" << endl;
-		SVM.SetPrecisionFraction(0.5);
-	} else {
-		SVM.SetPrecisionFraction(sArgs.k_value_arg);
-	}
+  size_t i, j, iGene, jGene;
+  ifstream ifsm, iifsm;
 
-	
-	if (sArgs.cross_validation_arg < 1){
-	  cerr << "cross_valid is <1. Must be set at least 1" << endl;
-	  return 1;
-	}
-	else if(sArgs.cross_validation_arg < 2){
-	  cerr << "cross_valid is set to 1. No cross validation holdouts will be run." << endl;
-	}
-	
-	SVM.SetTradeoff(sArgs.tradeoff_arg);
-	if (sArgs.slack_flag)
-		SVM.UseSlackRescaling();
-	else
-		SVM.UseMarginRescaling();
+  if (cmdline_parser(iArgs, aszArgs, &sArgs)) {
+    cmdline_parser_print_help();
+    return 1;
+  }
+  SVM.SetVerbosity(sArgs.verbosity_arg);
+  SVM.SetLossFunction(sArgs.error_function_arg);
+  if (sArgs.k_value_arg > 1) {
+    cerr << "k_value is >1. Setting default 0.5" << endl;
+    SVM.SetPrecisionFraction(0.5);
+  } else if (sArgs.k_value_arg <= 0) {
+    cerr << "k_value is <=0. Setting default 0.5" << endl;
+    SVM.SetPrecisionFraction(0.5);
+  } else {
+    SVM.SetPrecisionFraction(sArgs.k_value_arg);
+  }
 
 
-	if (!SVM.parms_check()) {
-		cerr << "Sanity check failed, see above errors" << endl;
-		return 1;
-	}
+  if (sArgs.cross_validation_arg < 1){
+    cerr << "cross_valid is <1. Must be set at least 1" << endl;
+    return 1;
+  }
+  else if(sArgs.cross_validation_arg < 2){
+    cerr << "cross_valid is set to 1. No cross validation holdouts will be run." << endl;
+  }
 
-	//  cout << "there are " << vecLabels.size() << " labels processed" << endl;
-	size_t iFile;
-	vector<string> PCLs;
-	if (sArgs.input_given) {
-		if (!PCL.Open(sArgs.input_arg, sArgs.skip_arg, sArgs.mmap_flag)) {
-			cerr << "Could not open input PCL" << endl;
-			return 1;
-		}
-	}
+  SVM.SetTradeoff(sArgs.tradeoff_arg);
+  if (sArgs.slack_flag)
+    SVM.UseSlackRescaling();
+  else
+    SVM.UseMarginRescaling();
 
-	vector<SVMLight::SVMLabel> vecLabels;
-	set<string> setLabeledGenes;
-	if (sArgs.labels_given) {
-		ifsm.clear();
-		ifsm.open(sArgs.labels_arg);
-		if (ifsm.is_open())
-			vecLabels = ReadLabels(ifsm);
-		else {
-			cerr << "Could not read label file" << endl;
-			return 1;
-		}
-		for (i = 0; i < vecLabels.size(); i++)
-			setLabeledGenes.insert(vecLabels[i].GeneName);
-	}
 
-	SVMLight::SAMPLE* pTrainSample;
-	vector<SVMLight::SVMLabel> pTrainVector[sArgs.cross_validation_arg];
-	vector<SVMLight::SVMLabel> pTestVector[sArgs.cross_validation_arg];
-	vector<SVMLight::Result> AllResults;
-	vector<SVMLight::Result> tmpAllResults;
+  if (!SVM.parms_check()) {
+    cerr << "Sanity check failed, see above errors" << endl;
+    return 1;
+  }
 
-	if (sArgs.model_given && sArgs.labels_given) { //learn once and write to file
-		pTrainSample = CSVMPERF::CreateSample(PCL, vecLabels);
-		SVM.Learn(*pTrainSample);
-		SVM.WriteModel(sArgs.model_arg,sArgs.simple_model_flag);
-	} else if (sArgs.model_given && sArgs.output_given) { //read model and classify all
+  if (!sArgs.output_given){
+    cerr << "output prefix not provided" << endl;
+    return 1;
+  }
+  
+  string output_prefix(sArgs.output_arg);
 
-		if(sArgs.test_labels_given && !sArgs.all_flag){
-		vector<SVMLight::SVMLabel> vecTestLabels;
-			ifsm.clear();
-			ifsm.open(sArgs.test_labels_arg);
-			if (ifsm.is_open())
-				vecTestLabels = ReadLabels(ifsm);
+  //  cout << "there are " << vecLabels.size() << " labels processed" << endl;
+  size_t iFile;
+  vector<string> PCLs;
+  if (sArgs.input_given) {
+    if (!PCL.Open(sArgs.input_arg, sArgs.skip_arg, sArgs.mmap_flag)) {
+      cerr << "Could not open input PCL" << endl;
+      return 1;
+    }
+  }
 
-			else {
-				cerr << "Could not read label file" << endl;
-				exit(1);
-			}
 
+  vector< pair < string, string > > vecLabelLists;
+  if (sArgs.labels_given) {
+    ifsm.clear();
+    ifsm.open(sArgs.labels_arg);
+    if (ifsm.is_open())
+      vecLabelLists = ReadLabelList(ifsm, output_prefix);
+    else {
+      cerr << "Could not read label list" << endl;
+      return 1;
+    }
+    ifsm.close();
+  }else{
+    cerr << "list of labels not given" << endl;
+    return 1;
+    //  if (sArgs.labels_given) {
+    //    vecLabelLists.push_back(pair(sArgs.labels_arg,sArgs.output_arg))
+    //  }
+  }
+  size_t k;
+  string labels_fn;
+  string output_fn;
 
-			cerr << "Loading Model" << endl;
-			SVM.ReadModel(sArgs.model_arg);
-			cerr << "Model Loaded" << endl;
+  
+    SVMLight::SAMPLE* pTrainSample;
+    vector<SVMLight::Result> AllResults;
+    vector<SVMLight::Result> tmpAllResults;
+    vector<SVMLight::SVMLabel> pTrainVector[sArgs.cross_validation_arg];
+    vector<SVMLight::SVMLabel> pTestVector[sArgs.cross_validation_arg];
+    vector<SVMLight::SVMLabel> vecLabels;
+ 
+    string out_fn;
 
-			pTestVector[0].reserve((size_t) vecTestLabels.size()+1 );
-			for (j = 0; j < vecTestLabels.size(); j++) {
-				pTestVector[0].push_back(vecTestLabels[j]);		      
-			}
+  for(k = 0; k < vecLabelLists.size(); k ++){
+    labels_fn = vecLabelLists[k].first;
+    output_fn = vecLabelLists[k].second;
 
+    cout << labels_fn << endl;
+    cout << output_fn << endl;
+    
+    vecLabels.clear();
 
-			tmpAllResults = SVM.Classify(PCL,	pTestVector[0]);
-			cerr << "Classified " << tmpAllResults.size() << " examples"<< endl;
-			AllResults.insert(AllResults.end(), tmpAllResults.begin(), tmpAllResults.end());
-			tmpAllResults.resize(0);
-			ofstream ofsm;
-			ofsm.clear();
-			ofsm.open(sArgs.output_arg);
-			PrintResults(AllResults, ofsm);
-			return 0;
-		}else{
-			vector<SVMLabel> vecAllLabels;
+    ifsm.clear();
+    ifsm.open(labels_fn.c_str());
+    if (ifsm.is_open())
+      vecLabels = ReadLabels(ifsm);
+    else {
+      cerr << "Could not read label file" << endl;
+      return 1;
+    }
+    ifsm.close();
 
-			for (size_t i = 0; i < PCL.GetGenes(); i++)
-				vecAllLabels.push_back(SVMLabel(PCL.GetGene(i), 0));
+    cout << "finished reading labels." << endl;
 
-			SVM.ReadModel(sArgs.model_arg);
-			AllResults = SVM.Classify(PCL, vecAllLabels);
-			ofstream ofsm;
-			ofsm.open(sArgs.output_arg);
-			if (ofsm.is_open())
-				PrintResults(AllResults, ofsm);
-			else {
-				cerr << "Could not open output file" << endl;
-			}
-		}
-	} else if (sArgs.output_given && sArgs.labels_given) {
-		//do learning and classifying with cross validation
-	        if( sArgs.cross_validation_arg > 1){	    
-		  for (i = 0; i < sArgs.cross_validation_arg; i++) {
-		    pTestVector[i].reserve((size_t) vecLabels.size()
-					   / sArgs.cross_validation_arg + sArgs.cross_validation_arg);
-		    pTrainVector[i].reserve((size_t) vecLabels.size()
-					    / (sArgs.cross_validation_arg)
-					    * (sArgs.cross_validation_arg - 1)
-					    + sArgs.cross_validation_arg);
-		    for (j = 0; j < vecLabels.size(); j++) {
-		      if (j % sArgs.cross_validation_arg == i) {
-			pTestVector[i].push_back(vecLabels[j]);
-		      } else {
-			pTrainVector[i].push_back((vecLabels[j]));
-		      }
-		    }
-		  }
-		}
-		else{ // if you have less than 2 fold cross, no cross validation is done, all train genes are used. If test_labels are predicted if given, otherwise all genes are predicted.
-		  
-			if(sArgs.test_labels_given){
-					  pTrainVector[0].reserve((size_t) vecLabels.size() + sArgs.cross_validation_arg);
-					  for (j = 0; j < vecLabels.size(); j++) {
-						pTrainVector[0].push_back(vecLabels[j]);		    
-					  }
 
-						ifstream ifsm2;
-						vector<SVMLight::SVMLabel> vecTestLabels;
-						ifsm2.clear();
-						ifsm2.open(sArgs.test_labels_arg);
-						if (ifsm2.is_open())
-							vecTestLabels = ReadLabels(ifsm2);
-						else {
-							cerr << "Could not read label file" << endl;
-							exit(1);
-						}
+    //do learning and classifying with cross validation
+    if( sArgs.cross_validation_arg > 1){	    
+      for (i = 0; i < sArgs.cross_validation_arg; i++) {
 
-						pTestVector[0].reserve((size_t) vecTestLabels.size()+1 );
-						for (j = 0; j < vecTestLabels.size(); j++) {
-							pTestVector[0].push_back(vecTestLabels[j]);		      
-						}
-						
-			}
-			else{// no holdout so train is the same as test gene set
-					  pTestVector[0].reserve((size_t) vecLabels.size() + sArgs.cross_validation_arg);
-					  pTrainVector[0].reserve((size_t) vecLabels.size() + sArgs.cross_validation_arg);
-		  
-					  for (j = 0; j < vecLabels.size(); j++) {
-						pTestVector[0].push_back(vecLabels[j]);		      
-						pTrainVector[0].push_back(vecLabels[j]);		    
-					  }
-			}
-		}
-		
-		
-		vector<SVMLabel> vec_allUnlabeledLabels;
-		vector<Result> vec_allUnlabeledResults;
-		vector<Result> vec_tmpUnlabeledResults;
-		if (sArgs.all_flag) {
-			vec_allUnlabeledLabels.reserve(PCL.GetGenes());
-			vec_allUnlabeledResults.reserve(PCL.GetGenes());
-			for (i = 0; i < PCL.GetGenes(); i++) {
-				if (setLabeledGenes.find(PCL.GetGene(i))
-						== setLabeledGenes.end()) {
-					vec_allUnlabeledLabels.push_back(
-							SVMLabel(PCL.GetGene(i), 0));
-					vec_allUnlabeledResults.push_back(Result(PCL.GetGene(i)));
-				}
-			}
-		}
-		if (sArgs.params_given) { //reading paramters from file
-			ifsm.close();
-			ifsm.clear();
-			ifsm.open(sArgs.params_arg);
-			if (!ifsm.is_open()) {
-				cerr << "Could not open: " << sArgs.params_arg << endl;
-				return 1;
-			}
-			ParamStruct PStruct;
-			string outFile(sArgs.output_arg);
-			PStruct = ReadParamsFromFile(ifsm, outFile);
+        pTestVector[i].clear();
+        pTrainVector[i].clear();
 
-			size_t iParams;
-			ofstream ofsm;
-			SVMLight::SAMPLE * ppTrainSample[sArgs.cross_validation_arg];
-			
-			//build all the samples since they are being reused
-			for (i = 0; i < sArgs.cross_validation_arg; i++)
-				ppTrainSample[i] = SVMLight::CSVMPERF::CreateSample(PCL,
-						pTrainVector[i]);
-			
-			for (iParams = 0; iParams < PStruct.vecTradeoff.size(); iParams++) {
-				SVM.SetLossFunction(PStruct.vecLoss[iParams]);
-				SVM.SetTradeoff(PStruct.vecTradeoff[iParams]);
-				SVM.SetPrecisionFraction(PStruct.vecK[iParams]);
-				for (j = 0; j < vec_allUnlabeledResults.size(); j++)
-					vec_allUnlabeledResults[j].Value = 0;
-				for (i = 0; i < sArgs.cross_validation_arg; i++) {
-					cerr << "Cross Validation Trial " << i << endl;
-					SVM.Learn(*ppTrainSample[i]);
-					
-					cerr << "Learned" << endl;					
-					
-					tmpAllResults = SVM.Classify(PCL, pTestVector[i]);
-					cerr << "Classified " << tmpAllResults.size()
-							<< " examples" << endl;
-					AllResults.insert(AllResults.end(), tmpAllResults.begin(),
-							tmpAllResults.end());
-					tmpAllResults.resize(0);
-					if (sArgs.all_flag && vec_allUnlabeledLabels.size() > 0) {
-						vec_tmpUnlabeledResults = SVM.Classify(PCL,
-								vec_allUnlabeledLabels);
-						for (j = 0; j < vec_tmpUnlabeledResults.size(); j++)
-							vec_allUnlabeledResults[j].Value
-									+= vec_tmpUnlabeledResults[j].Value;
-					}
+        pTestVector[i].reserve((size_t) vecLabels.size()
+            / sArgs.cross_validation_arg + sArgs.cross_validation_arg);
+        pTrainVector[i].reserve((size_t) vecLabels.size()
+            / (sArgs.cross_validation_arg)
+            * (sArgs.cross_validation_arg - 1)
+            + sArgs.cross_validation_arg);
+        for (j = 0; j < vecLabels.size(); j++) {
+          if (j % sArgs.cross_validation_arg == i) {
+            pTestVector[i].push_back(vecLabels[j]);
+          } else {
+            pTrainVector[i].push_back((vecLabels[j]));
+          }
+        }
+      }
+    }
+    else{ // if you have less than 2 fold cross, no cross validation is done, all train genes are used and predicted
 
-				}
+      // no holdout so train is the same as test gene set
+      pTestVector[0].reserve((size_t) vecLabels.size() + sArgs.cross_validation_arg);
+      pTrainVector[0].reserve((size_t) vecLabels.size() + sArgs.cross_validation_arg);
 
+      for (j = 0; j < vecLabels.size(); j++) {
+        pTestVector[0].push_back(vecLabels[j]);		      
+        pTrainVector[0].push_back(vecLabels[j]);		    
+      }
+    }
 
-				ofsm.open(PStruct.vecNames[iParams]);
-				if (sArgs.all_flag) { //add the unlabeled results
-					for (j = 0; j < vec_tmpUnlabeledResults.size(); j++)
-						vec_allUnlabeledResults[j].Value
-								/= sArgs.cross_validation_arg;
-					AllResults.insert(AllResults.end(),
-							vec_allUnlabeledResults.begin(),
-							vec_allUnlabeledResults.end());
-				}
+    for (i = 0; i < sArgs.cross_validation_arg; i++) {
+      pTrainSample = SVMLight::CSVMPERF::CreateSample(PCL,
+          pTrainVector[i]);
 
-				PrintResults(AllResults, ofsm);
-				ofsm.close();
-				ofsm.clear();
-				if (i > 0 || iParams > 0)
-					SVM.FreeModel();
-				AllResults.resize(0);
-			}
-		} else { //run once
+      cerr << "Cross Validation Trial " << i << endl;
 
-			for (i = 0; i < sArgs.cross_validation_arg; i++) {
-				pTrainSample = SVMLight::CSVMPERF::CreateSample(PCL,
-						pTrainVector[i]);
+      SVM.Learn(*pTrainSample);
+      cerr << "Learned" << endl;
+      tmpAllResults = SVM.Classify(PCL,
+          pTestVector[i]);
+      cerr << "Classified " << tmpAllResults.size() << " examples"
+        << endl;
+      AllResults.insert(AllResults.end(), tmpAllResults.begin(),
+          tmpAllResults.end());
+      tmpAllResults.resize(0);
 
-				cerr << "Cross Validation Trial " << i << endl;
+      if (i > 0) {
+        SVMLight::CSVMPERF::FreeSample(*pTrainSample);
+      }
+    }
 
-				SVM.Learn(*pTrainSample);
-				cerr << "Learned" << endl;
-				tmpAllResults = SVM.Classify(PCL,
-						pTestVector[i]);
+    ofstream ofsm;
+    ofsm.clear();
+    out_fn = output_prefix + "/" + output_fn;
+    ofsm.open(out_fn.c_str());
+    PrintResults(AllResults, ofsm);
+    cout << "printed: " << output_fn << endl;
 
-				cerr << "Classified " << tmpAllResults.size() << " examples"
-						<< endl;
-				AllResults.insert(AllResults.end(), tmpAllResults.begin(),
-						tmpAllResults.end());
-				tmpAllResults.resize(0);
-				if (sArgs.all_flag) {
-					vec_tmpUnlabeledResults = SVM.Classify(
-							PCL, vec_allUnlabeledLabels);
-					for (j = 0; j < vec_tmpUnlabeledResults.size(); j++)
-						vec_allUnlabeledResults[j].Value
-								+= vec_tmpUnlabeledResults[j].Value;
+ 
+    delete[] pTrainSample;
+    AllResults.clear();
+    tmpAllResults.clear();
+    vecLabels.clear();
 
-				}
-				if (i > 0) {
-					SVMLight::CSVMPERF::FreeSample(*pTrainSample);
-				}
-			}
 
-			if (sArgs.all_flag) { //add the unlabeled results
-				for (j = 0; j < vec_allUnlabeledResults.size(); j++)
-					vec_allUnlabeledResults[j].Value
-							/= sArgs.cross_validation_arg;
-				AllResults.insert(AllResults.end(),
-						vec_allUnlabeledResults.begin(),
-						vec_allUnlabeledResults.end());
-			}
 
-			ofstream ofsm;
-			ofsm.clear();
-			ofsm.open(sArgs.output_arg);
-			PrintResults(AllResults, ofsm);
-			return 0;
-		}
-	} else {
-		cerr << "More options are needed" << endl;
-	}
-
+  } 
 }
 
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.