about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorPjotr Prins2017-07-07 06:29:47 +0000
committerPjotr Prins2017-07-07 06:29:47 +0000
commitdd72b87354d1d3f6d3aa42ed0123a23880e9cb15 (patch)
treee08ca195b8ea77956a062a6843cf614e2d453191 /src
parent450341a7969b1da1d036be2dcdfab919e39d6473 (diff)
downloadpangemma-dd72b87354d1d3f6d3aa42ed0123a23880e9cb15.tar.gz
Some three compile time fixes for the GNU GCC 7.1.0 compiler on Linux.
This patch is a mess because we use different line endings. I propose
to convert to standard Unix mode (as was the original GEMMA code).

To convert with vim one can use

  set fileformat=unix

Do not merge this pull request, we can handle te fixes later.
Diffstat (limited to 'src')
-rw-r--r--src/io.cpp85
-rw-r--r--src/lapack.cpp185
-rw-r--r--src/logistic.cpp1449
3 files changed, 859 insertions, 860 deletions
diff --git a/src/io.cpp b/src/io.cpp
index 3a4bc3c..3bf6a9e 100644
--- a/src/io.cpp
+++ b/src/io.cpp
@@ -114,7 +114,7 @@ std::istream& safeGetline(std::istream& is, std::string& t) {
                 sb->sbumpc();
             return is;
         case EOF:
-	  
+
             // Also handle the case when the last line has no line
             // ending.
             if(t.empty())
@@ -312,7 +312,7 @@ bool ReadFile_column (const string &file_pheno, vector<int> &indicator_idv,
 		if (strcmp(ch_ptr, "NA")==0) {
 		  indicator_idv.push_back(0);
 		  pheno.push_back(-9);
-		} 
+		}
 		else {
 
 		  // Pheno is different from pimass2.
@@ -800,7 +800,7 @@ bool ReadFile_bed (const string &file_bed, const set<string> &setSnps,
 	for (size_t t=0; t<ns_total; ++t) {
 
 	  // n_bit, and 3 is the number of magic numbers.
-	  infile.seekg(t*n_bit+3); 
+	  infile.seekg(t*n_bit+3);
 
 		if (setSnps.size()!=0 &&
 		    setSnps.count(snpInfo[t].rs_number) == 0) {
@@ -978,7 +978,7 @@ void Plink_ReadOneSNP (const int pos, const vector<int> &indicator_idv,
   else {n_bit=ni_total/4+1;}
 
   // n_bit, and 3 is the number of magic numbers.
-  infile.seekg(pos*n_bit+3); 
+  infile.seekg(pos*n_bit+3);
 
   // Read genotypes.
   char ch[1];
@@ -993,7 +993,7 @@ void Plink_ReadOneSNP (const int pos, const vector<int> &indicator_idv,
     b=ch[0];
 
     // Minor allele homozygous: 2.0; major: 0.0.
-    for (size_t j=0; j<4; ++j) { 
+    for (size_t j=0; j<4; ++j) {
       if ((i==(n_bit-1)) && c==ni_total) {break;}
       if (indicator_idv[c]==0) {c++; continue;}
       c++;
@@ -1406,7 +1406,7 @@ bool PlinkKin (const string &file_bed, vector<int> &indicator_snp,
 		if (indicator_snp[t]==0) {continue;}
 
 		// n_bit, and 3 is the number of magic numbers.
-		infile.seekg(t*n_bit+3);		
+		infile.seekg(t*n_bit+3);
 
 		// Read genotypes.
 		geno_mean=0.0;	n_miss=0; ci_total=0; geno_var=0.0;
@@ -1415,7 +1415,7 @@ bool PlinkKin (const string &file_bed, vector<int> &indicator_snp,
 			b=ch[0];
 
 			// Minor allele homozygous: 2.0; major: 0.0.
-			for (size_t j=0; j<4; ++j) {                
+			for (size_t j=0; j<4; ++j) {
 				if ((i==(n_bit-1)) && ci_total==ni_total) {
 				  break;
 				}
@@ -1734,7 +1734,7 @@ bool ReadFile_bed (const string &file_bed, vector<int> &indicator_idv,
 		if (indicator_snp[t]==0) {continue;}
 
 		// n_bit, and 3 is the number of magic numbers.
-		infile.seekg(t*n_bit+3);		
+		infile.seekg(t*n_bit+3);
 
 		// Read genotypes.
 		c_idv=0; geno_mean=0.0; n_miss=0; c=0;
@@ -1855,7 +1855,7 @@ bool ReadFile_bed (const string &file_bed, vector<int> &indicator_idv,
 		if (indicator_snp[t]==0) {continue;}
 
 		// n_bit, and 3 is the number of magic numbers.
-		infile.seekg(t*n_bit+3);		
+		infile.seekg(t*n_bit+3);
 
 		// Read genotypes.
 		c_idv=0; geno_mean=0.0; n_miss=0; c=0;
@@ -1864,7 +1864,7 @@ bool ReadFile_bed (const string &file_bed, vector<int> &indicator_idv,
 			b=ch[0];
 
 			// Minor allele homozygous: 2.0; major: 0.0.
-			for (size_t j=0; j<4; ++j) {                
+			for (size_t j=0; j<4; ++j) {
 			  if ((i==(n_bit-1)) && c==ni_total) {break;}
 				if (indicator_idv[c]==0) {c++; continue;}
 				c++;
@@ -2113,7 +2113,7 @@ bool ReadFile_sample (const string &file_sample,
 	vector<map<uint32_t, size_t> > cvt_factor_levels;
 
 	char col_type[num_cols];
-	
+
 	// Read header line2.
 	if(!safeGetline(infile, line).eof()) {
 		ch_ptr=strtok ((char *)line.c_str(), " \t");
@@ -2168,7 +2168,7 @@ bool ReadFile_sample (const string &file_sample,
 			}
 			if(col_type[i]=='D')
 			{
-			  
+
 			  // NOTE THIS DOES NOT CHECK TO BE SURE LEVEL
 			  // IS INTEGRAL i.e for atoi error.
 			  if (strcmp(ch_ptr, "NA")!=0) {
@@ -2189,7 +2189,7 @@ bool ReadFile_sample (const string &file_sample,
 		pheno.push_back(pheno_row);
 
 	}
-	
+
 	// Close and reopen the file.
  	infile.close();
  	infile.clear();
@@ -2202,7 +2202,7 @@ bool ReadFile_sample (const string &file_sample,
 		    file_sample<<endl;
 		  return false;
 		}
-		
+
 		// Skip header.
 		safeGetline(infile2, line);
 		safeGetline(infile2, line);
@@ -2220,16 +2220,16 @@ bool ReadFile_sample (const string &file_sample,
 			size_t fac_cvt_i=0;
 			size_t num_fac_levels;
 			while (i<num_cols) {
-			  
+
 			  if(col_type[i]=='C') {
 			    if (strcmp(ch_ptr, "NA")==0) {flag_na=1; d=-9;}
 			    else {d=atof(ch_ptr);}
-			    
+
 			    v_d.push_back(d);
 			  }
-			  
+
 			  if(col_type[i]=='D') {
-			    
+
 			    // NOTE THIS DOES NOT CHECK TO BE SURE
 			    // LEVEL IS INTEGRAL i.e for atoi error.
 			    num_fac_levels=cvt_factor_levels[fac_cvt_i].size();
@@ -2251,7 +2251,7 @@ bool ReadFile_sample (const string &file_sample,
 			    }
 			    fac_cvt_i++;
 			  }
-			  
+
 			  ch_ptr=strtok (NULL, " \t");
 			  i++;
 			}
@@ -2321,7 +2321,7 @@ bool ReadFile_bgen(const string &file_bgen, const set<string> &setSnps,
 	int sig;
 	LUDecomp (WtW, pmt, &sig);
 	LUInvert (WtW, pmt, WtWi);
-	
+
 	// Read in header.
 	uint32_t bgen_snp_block_offset;
 	uint32_t bgen_header_length;
@@ -2373,7 +2373,7 @@ bool ReadFile_bgen(const string &file_bgen, const set<string> &setSnps,
 	size_t ni_total=indicator_idv.size();
 
 	// Number of samples to use in test.
-	size_t ni_test=0;   
+	size_t ni_test=0;
 
 	uint32_t bgen_N;
 	uint16_t bgen_LS;
@@ -2434,7 +2434,7 @@ bool ReadFile_bgen(const string &file_bgen, const set<string> &setSnps,
 		if (setSnps.size()!=0 && setSnps.count(rs)==0) {
 		  SNPINFO sInfo={"-9", rs, -9, -9, minor, major,
 				 static_cast<size_t>(-9), -9, (long int) -9};
-		  
+
 			snpInfo.push_back(sInfo);
 			indicator_snp.push_back(0);
 			if(CompressedSNPBlocks)
@@ -2476,7 +2476,7 @@ bool ReadFile_bgen(const string &file_bgen, const set<string> &setSnps,
 		c_idv=0;
 		gsl_vector_set_zero (genotype_miss);
 		for (size_t i=0; i<bgen_N; ++i) {
-		  
+
 			// CHECK this set correctly!
 			if (indicator_idv[i]==0) {continue;}
 
@@ -2665,7 +2665,7 @@ bool bgenKin (const string &file_oxford, vector<int> &indicator_snp,
 		infile.read(reinterpret_cast<char*>(&bgen_LB),4);
 		bgen_B_allele.resize(bgen_LB);
 		infile.read(&bgen_B_allele[0], bgen_LB);
-		
+
 		uint16_t unzipped_data[3*bgen_N];
 
 		if (indicator_snp[t]==0) {
@@ -2683,11 +2683,11 @@ bool bgenKin (const string &file_oxford, vector<int> &indicator_snp,
 		{
 		  infile.read(reinterpret_cast<char*>(&bgen_P),4);
 		  uint8_t zipped_data[bgen_P];
-		  
+
 		  unzipped_data_size=6*bgen_N;
-		  
+
 		  infile.read(reinterpret_cast<char*>(zipped_data),bgen_P);
-		  
+
 		  int result=
 		    uncompress(reinterpret_cast<Bytef*>(unzipped_data),
 			       reinterpret_cast<uLongf*>(&unzipped_data_size),
@@ -2698,7 +2698,7 @@ bool bgenKin (const string &file_oxford, vector<int> &indicator_snp,
 		}
 		else
 		{
-		  
+
 		  bgen_P=6*bgen_N;
 		  infile.read(reinterpret_cast<char*>(unzipped_data),bgen_P);
 		}
@@ -2708,7 +2708,7 @@ bool bgenKin (const string &file_oxford, vector<int> &indicator_snp,
 
 		for (size_t i=0; i<bgen_N; ++i) {
 
-		  
+
 		  bgen_geno_prob_AA=
 		    static_cast<double>(unzipped_data[i*3])/32768.0;
 		  bgen_geno_prob_AB=
@@ -2723,13 +2723,13 @@ bool bgenKin (const string &file_oxford, vector<int> &indicator_snp,
 		    n_miss++;
 		  }
 		  else {
-		    
+
 		    bgen_geno_prob_AA/=bgen_geno_prob_non_miss;
 		    bgen_geno_prob_AB/=bgen_geno_prob_non_miss;
 		    bgen_geno_prob_BB/=bgen_geno_prob_non_miss;
-		    
+
 		    genotype=2.0*bgen_geno_prob_BB+bgen_geno_prob_AB;
-		    
+
 		    gsl_vector_set(geno, i, genotype);
 		    gsl_vector_set(geno_miss, i, 1.0);
 		    geno_mean+=genotype;
@@ -2936,8 +2936,7 @@ bool ReadHeader_io (const string &line, HEADER &header)
 	header.n_col=header.coln+1;
       } else {
 	cout<<"error! more than two n_total columns in the file."<<endl;
-	n_
-	  error++;}
+	n_error++;}
     } else if (nmis_set.count(type)!=0) {
       if (header.nmis_col==0) {header.nmis_col=header.coln+1;} else {
 	cout<<"error! more than two n_mis columns in the file."<<endl;
@@ -2988,7 +2987,7 @@ bool ReadHeader_io (const string &line, HEADER &header)
     } else {
       string str = ch_ptr;
       string cat = str.substr(str.size()-2, 2);
-      
+
       if(cat == "_c" || cat =="_C"){
 
         // continuous
@@ -2999,7 +2998,7 @@ bool ReadHeader_io (const string &line, HEADER &header)
 	header.catd_col.insert(header.coln+1);
       }
     }
-    
+
     ch_ptr=strtok (NULL, " , \t");
     header.coln++;
   }
@@ -3396,7 +3395,7 @@ bool PlinkKin (const string &file_bed, const int display_pace,
 		  for (size_t j=0; j<4; ++j) {
 		    if ((i==(n_bit-1)) && ci_total==ni_total) {break;}
 		    if (indicator_idv[ci_total]==0) {ci_total++; continue;}
-		    
+
 		    if (b[2*j]==0) {
 		      if (b[2*j+1]==0) {
 			gsl_vector_set(geno, ci_test, 2.0);
@@ -3412,7 +3411,7 @@ bool PlinkKin (const string &file_bed, const int display_pace,
 		      if (b[2*j+1]==1) {gsl_vector_set(geno, ci_test, 0.0); }
 		      else {gsl_vector_set(geno, ci_test, -9.0); n_miss++; }
 		    }
-		    
+
 		    ci_test++;
 		    ci_total++;
 		  }
@@ -3561,7 +3560,7 @@ bool MFILEKin (const size_t mfile_mode, const string &file_mfile,
     } else {
       BimbamKin (file_name, display_pace, indicator_idv, mindicator_snp[l], mapRS2weight, mapRS2cat, msnpInfo[l], W, kin_tmp, ns_tmp);
     }
-    
+
     // Add ns.
     gsl_vector_add(vector_ns, ns_tmp);
 
@@ -3647,7 +3646,7 @@ bool ReadFile_wsnp (const string &file_wcat, const size_t n_vc,
   }
 
   string line, rs, chr, a1, a0, pos, cm;
-  
+
   // Read header.
   HEADER header;
   !safeGetline(infile, line).eof();
@@ -3978,7 +3977,7 @@ void Calcq (const size_t n_block, const vector<size_t> &vec_cat,
 
   // Compute q and s.
   for (size_t i=0; i<vec_cat.size(); i++) {
-    
+
     // Extract quantities.
     cat=vec_cat[i];
     n_total=vec_ni[i];
@@ -4017,7 +4016,7 @@ void Calcq (const size_t n_block, const vector<size_t> &vec_cat,
 
     // Record values.
     for (size_t i=0; i<vec_cat.size(); i++) {
-      
+
       // Extract quantities.
       cat=vec_cat[i];
       n_total=vec_ni[i];
@@ -4369,7 +4368,7 @@ void ReadFile_mref (const string &file_mref, gsl_matrix *S_mat,
       if (i!=j) {gsl_matrix_set(Svar_mat, j, i, d);}
     }
   }
-  
+
   // Free matrices.
   gsl_matrix_free(S_sub);
   gsl_matrix_free(Svar_sub);
diff --git a/src/lapack.cpp b/src/lapack.cpp
index 01d2039..05b85f4 100644
--- a/src/lapack.cpp
+++ b/src/lapack.cpp
@@ -60,20 +60,20 @@ extern "C" double ddot_(int *N, double *DX, int *INCX, double *DY, int *INCY);
 void lapack_float_cholesky_decomp (gsl_matrix_float *A) {
 	int N=A->size1, LDA=A->size1, INFO;
 	char UPLO='L';
-	
+
 	if (N!=(int)A->size2) {
 	  cout << "Matrix needs to be symmetric and same dimension in " <<
 	    "lapack_cholesky_decomp." << endl;
 	  return;
 	}
-	
+
 	spotrf_(&UPLO, &N, A->data, &LDA, &INFO);
 	if (INFO!=0) {
 	  cout << "Cholesky decomposition unsuccessful in " <<
 	    "lapack_cholesky_decomp." << endl;
 	  return;
-	}	
-	
+	}
+
 	return;
 }
 
@@ -81,19 +81,19 @@ void lapack_float_cholesky_decomp (gsl_matrix_float *A) {
 void lapack_cholesky_decomp (gsl_matrix *A) {
 	int N=A->size1, LDA=A->size1, INFO;
 	char UPLO='L';
-	
+
 	if (N!=(int)A->size2) {
 	  cout << "Matrix needs to be symmetric and same dimension in " <<
 	    "lapack_cholesky_decomp." << endl;
 	  return;
 	}
-	
+
 	dpotrf_(&UPLO, &N, A->data, &LDA, &INFO);
 	if (INFO!=0) {
 	  cout << "Cholesky decomposition unsuccessful in " <<
 	    "lapack_cholesky_decomp."<<endl;
 	  return;
-	} 
+	}
 
 	return;
 }
@@ -104,13 +104,14 @@ void lapack_float_cholesky_solve (gsl_matrix_float *A,
 				  gsl_vector_float *x) {
 	int N=A->size1, NRHS=1, LDA=A->size1, LDB=b->size, INFO;
 	char UPLO='L';
-	
+
+
 	if (N!=(int)A->size2 || N!=LDB) {
-	  cout << "Matrix needs to be symmetric and same dimension in " <<cout
+	  cout << "Matrix needs to be symmetric and same dimension in " <<
 	    "lapack_cholesky_solve." << endl;
 	  return;
 	}
-	
+
 	gsl_vector_float_memcpy (x, b);
 	spotrs_(&UPLO, &N, &NRHS, A->data, &LDA, x->data, &LDB, &INFO);
 	if (INFO!=0) {
@@ -118,7 +119,7 @@ void lapack_float_cholesky_solve (gsl_matrix_float *A,
 	    endl;
 	  return;
 	}
-	
+
 	return;
 }
 
@@ -127,13 +128,13 @@ void lapack_cholesky_solve (gsl_matrix *A, const gsl_vector *b,
 			    gsl_vector *x) {
 	int N=A->size1, NRHS=1, LDA=A->size1, LDB=b->size, INFO;
 	char UPLO='L';
-	
+
 	if (N!=(int)A->size2 || N!=LDB) {
 	  cout << "Matrix needs to be symmetric and same dimension in " <<
 	    "lapack_cholesky_solve." << endl;
 	  return;
 	}
-	
+
 	gsl_vector_memcpy (x, b);
 	dpotrs_(&UPLO, &N, &NRHS, A->data, &LDA, x->data, &LDB, &INFO);
 	if (INFO!=0) {
@@ -141,7 +142,7 @@ void lapack_cholesky_solve (gsl_matrix *A, const gsl_vector *b,
 	    endl;
 	  return;
 	}
-	
+
 	return;
 }
 
@@ -149,15 +150,15 @@ void lapack_sgemm (char *TransA, char *TransB, float alpha,
 		   const gsl_matrix_float *A, const gsl_matrix_float *B,
 		   float beta, gsl_matrix_float *C) {
 	int M, N, K1, K2, LDA=A->size1, LDB=B->size1, LDC=C->size2;
-	
+
 	if (*TransA=='N' || *TransA=='n') {M=A->size1; K1=A->size2;}
 	else if (*TransA=='T' || *TransA=='t') {M=A->size2; K1=A->size1;}
 	else {cout<<"need 'N' or 'T' in lapack_sgemm"<<endl; return;}
-	
+
 	if (*TransB=='N' || *TransB=='n') {N=B->size2; K2=B->size1;}
 	else if (*TransB=='T' || *TransB=='t')  {N=B->size1; K2=B->size2;}
 	else {cout<<"need 'N' or 'T' in lapack_sgemm"<<endl;  return;}
-	
+
 	if (K1!=K2) {
 	  cout<<"A and B not compatible in lapack_sgemm"<<endl;
 	  return;
@@ -166,18 +167,18 @@ void lapack_sgemm (char *TransA, char *TransB, float alpha,
 	  cout<<"C not compatible in lapack_sgemm"<<endl;
 	  return;
 	}
-	
+
 	gsl_matrix_float *A_t=gsl_matrix_float_alloc (A->size2, A->size1);
 	gsl_matrix_float_transpose_memcpy (A_t, A);
 	gsl_matrix_float *B_t=gsl_matrix_float_alloc (B->size2, B->size1);
 	gsl_matrix_float_transpose_memcpy (B_t, B);
 	gsl_matrix_float *C_t=gsl_matrix_float_alloc (C->size2, C->size1);
 	gsl_matrix_float_transpose_memcpy (C_t, C);
-	
+
 	sgemm_(TransA, TransB, &M, &N, &K1, &alpha, A_t->data, &LDA,
 	       B_t->data, &LDB, &beta, C_t->data, &LDC);
 	gsl_matrix_float_transpose_memcpy (C, C_t);
-	
+
 	gsl_matrix_float_free (A_t);
 	gsl_matrix_float_free (B_t);
 	gsl_matrix_float_free (C_t);
@@ -190,15 +191,15 @@ void lapack_dgemm (char *TransA, char *TransB, double alpha,
 		   const gsl_matrix *A, const gsl_matrix *B,
 		   double beta, gsl_matrix *C) {
 	int M, N, K1, K2, LDA=A->size1, LDB=B->size1, LDC=C->size2;
-	
+
 	if (*TransA=='N' || *TransA=='n') {M=A->size1; K1=A->size2;}
 	else if (*TransA=='T' || *TransA=='t') {M=A->size2; K1=A->size1;}
 	else {cout<<"need 'N' or 'T' in lapack_dgemm"<<endl; return;}
-	
+
 	if (*TransB=='N' || *TransB=='n') {N=B->size2; K2=B->size1;}
 	else if (*TransB=='T' || *TransB=='t')  {N=B->size1; K2=B->size2;}
 	else {cout<<"need 'N' or 'T' in lapack_dgemm"<<endl;  return;}
-	
+
 	if (K1!=K2) {
 	  cout << "A and B not compatible in lapack_dgemm"<<endl;
 	  return;
@@ -207,7 +208,7 @@ void lapack_dgemm (char *TransA, char *TransB, double alpha,
 	  cout<<"C not compatible in lapack_dgemm"<<endl;
 	  return;
 	}
-	
+
 	gsl_matrix *A_t=gsl_matrix_alloc (A->size2, A->size1);
 	gsl_matrix_transpose_memcpy (A_t, A);
 	gsl_matrix *B_t=gsl_matrix_alloc (B->size2, B->size1);
@@ -219,7 +220,7 @@ void lapack_dgemm (char *TransA, char *TransB, double alpha,
 	       B_t->data, &LDB, &beta, C_t->data, &LDC);
 
 	gsl_matrix_transpose_memcpy (C, C_t);
-	
+
 	gsl_matrix_free (A_t);
 	gsl_matrix_free (B_t);
 	gsl_matrix_free (C_t);
@@ -234,15 +235,15 @@ void lapack_float_eigen_symmv (gsl_matrix_float *A, gsl_vector_float *eval,
 	if (flag_largematrix==1) {
 		int N=A->size1, LDA=A->size1, INFO, LWORK=-1;
 		char JOBZ='V', UPLO='L';
-				
+
 		if (N!=(int)A->size2 || N!=(int)eval->size) {
 		  cout << "Matrix needs to be symmetric and same " <<
 		    "dimension in lapack_eigen_symmv."<<endl;
 		  return;
 		}
-		
+
 		LWORK=3*N;
-		float *WORK=new float [LWORK];	
+		float *WORK=new float [LWORK];
 		ssyev_(&JOBZ, &UPLO, &N, A->data, &LDA, eval->data, WORK,
 		       &LWORK, &INFO);
 		if (INFO!=0) {
@@ -250,31 +251,31 @@ void lapack_float_eigen_symmv (gsl_matrix_float *A, gsl_vector_float *eval,
 		    "lapack_eigen_symmv."<<endl;
 		  return;
 		}
-		
+
 		gsl_matrix_float_view A_sub =
 		  gsl_matrix_float_submatrix(A, 0, 0, N, N);
 		gsl_matrix_float_memcpy (evec, &A_sub.matrix);
 		gsl_matrix_float_transpose (evec);
-		
+
 		delete [] WORK;
-	} else {	
+	} else {
 		int N=A->size1, LDA=A->size1, LDZ=A->size1, INFO,
 		  LWORK=-1, LIWORK=-1;
 		char JOBZ='V', UPLO='L', RANGE='A';
 		float ABSTOL=1.0E-7;
-		
+
 		// VL, VU, IL, IU are not referenced; M equals N if RANGE='A'.
 		float VL=0.0, VU=0.0;
 		int IL=0, IU=0, M;
-		
+
 		if (N!=(int)A->size2 || N!=(int)eval->size) {
 		  cout << "Matrix needs to be symmetric and same " <<
 		    "dimension in lapack_float_eigen_symmv." << endl;
 		  return;
 		}
-		
+
 		int *ISUPPZ=new int [2*N];
-				
+
 		float WORK_temp[1];
 		int IWORK_temp[1];
 		ssyevr_(&JOBZ, &RANGE, &UPLO, &N, A->data, &LDA, &VL,
@@ -286,11 +287,11 @@ void lapack_float_eigen_symmv (gsl_matrix_float *A, gsl_vector_float *eval,
 		    "lapack_float_eigen_symmv." << endl;
 		  return;
 		}
-		LWORK=(int)WORK_temp[0]; LIWORK=(int)IWORK_temp[0];	
-		 
+		LWORK=(int)WORK_temp[0]; LIWORK=(int)IWORK_temp[0];
+
 		float *WORK=new float [LWORK];
 		int *IWORK=new int [LIWORK];
-		
+
 		ssyevr_(&JOBZ, &RANGE, &UPLO, &N, A->data, &LDA, &VL,
 			&VU, &IL, &IU, &ABSTOL, &M, eval->data, evec->data,
 			&LDZ, ISUPPZ, WORK, &LWORK, IWORK, &LIWORK, &INFO);
@@ -299,15 +300,15 @@ void lapack_float_eigen_symmv (gsl_matrix_float *A, gsl_vector_float *eval,
 		    "lapack_float_eigen_symmv." << endl;
 		  return;
 		}
-		
+
 		gsl_matrix_float_transpose (evec);
-		
+
 		delete [] ISUPPZ;
 		delete [] WORK;
 		delete [] IWORK;
 	}
-	
-	
+
+
 	return;
 }
 
@@ -318,16 +319,16 @@ void lapack_eigen_symmv (gsl_matrix *A, gsl_vector *eval, gsl_matrix *evec,
 			 const size_t flag_largematrix) {
 	if (flag_largematrix==1) {
 		int N=A->size1, LDA=A->size1, INFO, LWORK=-1;
-		char JOBZ='V', UPLO='L';		
-		
+		char JOBZ='V', UPLO='L';
+
 		if (N!=(int)A->size2 || N!=(int)eval->size) {
 		  cout << "Matrix needs to be symmetric and same " <<
 		    "dimension in lapack_eigen_symmv." << endl;
 		  return;
 		}
-		
+
 		LWORK=3*N;
-		double *WORK=new double [LWORK];	
+		double *WORK=new double [LWORK];
 		dsyev_(&JOBZ, &UPLO, &N, A->data, &LDA, eval->data, WORK,
 		       &LWORK, &INFO);
 		if (INFO!=0) {
@@ -335,30 +336,30 @@ void lapack_eigen_symmv (gsl_matrix *A, gsl_vector *eval, gsl_matrix *evec,
 		    "lapack_eigen_symmv." << endl;
 		  return;
 		}
-		
+
 		gsl_matrix_view A_sub=gsl_matrix_submatrix(A, 0, 0, N, N);
 		gsl_matrix_memcpy (evec, &A_sub.matrix);
 		gsl_matrix_transpose (evec);
-		
+
 		delete [] WORK;
-	} else {	
+	} else {
    	        int N=A->size1, LDA=A->size1, LDZ=A->size1, INFO;
 		int LWORK=-1, LIWORK=-1;
 		char JOBZ='V', UPLO='L', RANGE='A';
 		double ABSTOL=1.0E-7;
-		
+
 		// VL, VU, IL, IU are not referenced; M equals N if RANGE='A'.
 		double VL=0.0, VU=0.0;
 		int IL=0, IU=0, M;
-		
+
 		if (N!=(int)A->size2 || N!=(int)eval->size) {
 		  cout << "Matrix needs to be symmetric and same " <<
 		    "dimension in lapack_eigen_symmv." << endl;
 		  return;
 		}
-		
+
 		int *ISUPPZ=new int [2*N];
-		
+
 		double WORK_temp[1];
 		int IWORK_temp[1];
 
@@ -370,12 +371,12 @@ void lapack_eigen_symmv (gsl_matrix *A, gsl_vector *eval, gsl_matrix *evec,
 		  cout << "Work space estimate unsuccessful in " <<
 		    "lapack_eigen_symmv." << endl;
 		  return;
-		}	
-		LWORK=(int)WORK_temp[0]; LIWORK=(int)IWORK_temp[0];	
+		}
+		LWORK=(int)WORK_temp[0]; LIWORK=(int)IWORK_temp[0];
 
 		double *WORK=new double [LWORK];
 		int *IWORK=new int [LIWORK];
-		
+
 		dsyevr_(&JOBZ, &RANGE, &UPLO, &N, A->data, &LDA, &VL, &VU,
 			&IL, &IU, &ABSTOL, &M, eval->data, evec->data,
 			&LDZ, ISUPPZ, WORK, &LWORK, IWORK, &LIWORK, &INFO);
@@ -386,12 +387,12 @@ void lapack_eigen_symmv (gsl_matrix *A, gsl_vector *eval, gsl_matrix *evec,
 		}
 
 		gsl_matrix_transpose (evec);
-		
+
 		delete [] ISUPPZ;
 		delete [] WORK;
 		delete [] IWORK;
 	}
-	
+
 	return;
 }
 
@@ -406,7 +407,7 @@ double EigenDecomp (gsl_matrix *G, gsl_matrix *U, gsl_vector *eval,
 		d+=gsl_vector_get(eval, i);
 	}
 	d/=(double)eval->size;
-	
+
 	return d;
 }
 
@@ -422,21 +423,21 @@ double EigenDecomp (gsl_matrix_float *G, gsl_matrix_float *U,
 		d+=gsl_vector_float_get(eval, i);
 	}
 	d/=(double)eval->size;
-	
+
 	return d;
 }
 
 
 double CholeskySolve(gsl_matrix *Omega, gsl_vector *Xty, gsl_vector *OiXty) {
 	double logdet_O=0.0;
-	
+
 	lapack_cholesky_decomp(Omega);
 	for (size_t i=0; i<Omega->size1; ++i) {
 		logdet_O+=log(gsl_matrix_get (Omega, i, i));
-	}	
-	logdet_O*=2.0;	
-	lapack_cholesky_solve(Omega, Xty, OiXty);	
-	
+	}
+	logdet_O*=2.0;
+	lapack_cholesky_solve(Omega, Xty, OiXty);
+
 	return logdet_O;
 }
 
@@ -444,16 +445,16 @@ double CholeskySolve(gsl_matrix *Omega, gsl_vector *Xty, gsl_vector *OiXty) {
 double CholeskySolve(gsl_matrix_float *Omega, gsl_vector_float *Xty,
 		     gsl_vector_float *OiXty) {
 	double logdet_O=0.0;
-	
+
 	lapack_float_cholesky_decomp(Omega);
 	for (size_t i=0; i<Omega->size1; ++i) {
 		logdet_O+=log(gsl_matrix_float_get (Omega, i, i));
-	}	
-	logdet_O*=2.0;	
-	lapack_float_cholesky_solve(Omega, Xty, OiXty);	
-	
+	}
+	logdet_O*=2.0;
+	lapack_float_cholesky_solve(Omega, Xty, OiXty);
+
 	return logdet_O;
-}	
+}
 
 
 // LU decomposition.
@@ -464,18 +465,18 @@ void LUDecomp (gsl_matrix *LU, gsl_permutation *p, int *signum) {
 
 void LUDecomp (gsl_matrix_float *LU, gsl_permutation *p, int *signum) {
 	gsl_matrix *LU_double=gsl_matrix_alloc (LU->size1, LU->size2);
-	
-	// Copy float matrix to double.	
+
+	// Copy float matrix to double.
 	for (size_t i=0; i<LU->size1; i++) {
 		for (size_t j=0; j<LU->size2; j++) {
 			gsl_matrix_set (LU_double, i, j,
 					gsl_matrix_float_get(LU, i, j));
 		}
 	}
-	
+
 	// LU decomposition.
 	gsl_linalg_LU_decomp (LU_double, p, signum);
-	
+
 	// Copy float matrix to double.
 	for (size_t i=0; i<LU->size1; i++) {
 		for (size_t j=0; j<LU->size2; j++) {
@@ -483,7 +484,7 @@ void LUDecomp (gsl_matrix_float *LU, gsl_permutation *p, int *signum) {
 					      gsl_matrix_get(LU_double, i, j));
 		}
 	}
-	
+
 	// Free matrix.
 	gsl_matrix_free (LU_double);
 	return;
@@ -502,18 +503,18 @@ void LUInvert (const gsl_matrix_float *LU, const gsl_permutation *p,
 	gsl_matrix *LU_double=gsl_matrix_alloc (LU->size1, LU->size2);
 	gsl_matrix *inverse_double=gsl_matrix_alloc (inverse->size1,
 						     inverse->size2);
-	
-	// Copy float matrix to double.	
+
+	// Copy float matrix to double.
 	for (size_t i=0; i<LU->size1; i++) {
 		for (size_t j=0; j<LU->size2; j++) {
 			gsl_matrix_set (LU_double, i, j,
 					gsl_matrix_float_get(LU, i, j));
 		}
 	}
-	
+
 	// LU decomposition.
 	gsl_linalg_LU_invert (LU_double, p, inverse_double);
-	
+
 	// Copy float matrix to double.
 	for (size_t i=0; i<inverse->size1; i++) {
 		for (size_t j=0; j<inverse->size2; j++) {
@@ -522,7 +523,7 @@ void LUInvert (const gsl_matrix_float *LU, const gsl_permutation *p,
 							     i, j));
 		}
 	}
-	
+
 	// Free matrix.
 	gsl_matrix_free (LU_double);
 	gsl_matrix_free (inverse_double);
@@ -539,17 +540,17 @@ double LULndet (gsl_matrix *LU) {
 double LULndet (gsl_matrix_float *LU) {
 	gsl_matrix *LU_double=gsl_matrix_alloc (LU->size1, LU->size2);
 	double d;
-	
-	// Copy float matrix to double.	
+
+	// Copy float matrix to double.
 	for (size_t i=0; i<LU->size1; i++) {
 		for (size_t j=0; j<LU->size2; j++) {
 			gsl_matrix_set (LU_double, i, j, gsl_matrix_float_get(LU, i, j));
 		}
 	}
-	
+
 	// LU decomposition.
 	d=gsl_linalg_LU_lndet (LU_double);
-	
+
 	// Free matrix
 	gsl_matrix_free (LU_double);
 	return d;
@@ -567,8 +568,8 @@ void LUSolve (const gsl_matrix_float *LU, const gsl_permutation *p,
 	      const gsl_vector_float *b, gsl_vector_float *x) {
 	gsl_matrix *LU_double=gsl_matrix_alloc (LU->size1, LU->size2);
 	gsl_vector *b_double=gsl_vector_alloc (b->size);
-	gsl_vector *x_double=gsl_vector_alloc (x->size);	
-	
+	gsl_vector *x_double=gsl_vector_alloc (x->size);
+
 	// Copy float matrix to double.
 	for (size_t i=0; i<LU->size1; i++) {
 		for (size_t j=0; j<LU->size2; j++) {
@@ -576,23 +577,23 @@ void LUSolve (const gsl_matrix_float *LU, const gsl_permutation *p,
 					gsl_matrix_float_get(LU, i, j));
 		}
 	}
-	
+
 	for (size_t i=0; i<b->size; i++) {
 		gsl_vector_set (b_double, i, gsl_vector_float_get(b, i));
 	}
-	
+
 	for (size_t i=0; i<x->size; i++) {
 		gsl_vector_set (x_double, i, gsl_vector_float_get(x, i));
 	}
-	
+
 	// LU decomposition.
 	gsl_linalg_LU_solve (LU_double, p, b_double, x_double);
-	
+
 	// Copy float matrix to double.
 	for (size_t i=0; i<x->size; i++) {
 		gsl_vector_float_set (x, i, gsl_vector_get(x_double, i));
 	}
-	
+
 	// Free matrix.
 	gsl_matrix_free (LU_double);
 	gsl_vector_free (b_double);
diff --git a/src/logistic.cpp b/src/logistic.cpp
index c1ddac1..f9edc68 100644
--- a/src/logistic.cpp
+++ b/src/logistic.cpp
@@ -1,725 +1,724 @@
-#include <stdio.h>

-#include <math.h>

-#include <gsl/gsl_matrix.h>

-#include <gsl/gsl_rng.h>

-#include <gsl/gsl_multimin.h>

-#include <gsl/gsl_sf.h>

-#include <gsl/gsl_linalg.h>

-#include "logistic.h"

-

-// I need to bundle all the data that goes to the function to optimze

-// together.

-typedef struct{

-  gsl_matrix_int *X;

-  gsl_vector_int *nlev;

-  gsl_vector *y;

-  gsl_matrix *Xc; // Continuous covariates matrix Nobs x Kc (NULL if not used).

-  double lambdaL1;

-  double lambdaL2;

-} fix_parm_mixed_T;

-

-double fLogit_mixed(gsl_vector *beta,

-		    gsl_matrix_int *X,

-		    gsl_vector_int *nlev,

-		    gsl_matrix *Xc,

-		    gsl_vector *y,

-		    double lambdaL1,

-		    double lambdaL2) {

-  int n = y->size; 

-  int npar = beta->size; 

-  double total = 0;

-  double aux = 0;

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of

-  // beta_0*\/

-  // #pragma omp parallel for reduction (+:total)

-  for(int i = 1; i < npar; ++i)

-    total += beta->data[i]*beta->data[i];

-  total = (-total*lambdaL2/2);

-  // #pragma omp parallel for reduction (+:aux)

-  for(int i = 1; i < npar; ++i)

-    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);

-  total = total-aux*lambdaL1;

-  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)

-  // #reduction (+:total)

-  for(int i = 0; i < n; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < X->size2; ++k) {

-      if(gsl_matrix_int_get(X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];

-      iParm+=nlev->data[k]-1;

-    }

-    for(int k = 0; k < (Xc->size2); ++k) 

-      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];

-    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));

-  }

-  return -total;

-} 

-

-void logistic_mixed_pred(gsl_vector *beta,     // Vector of parameters

-					       // length = 1 + Sum_k(C_k -1)

-			 gsl_matrix_int *X,    // Matrix Nobs x K 

-			 gsl_vector_int *nlev, // Vector with number categories

-			 gsl_matrix *Xc,       // Continuous covariates matrix:

-			                       // obs x Kc (NULL if not used).

-			 gsl_vector *yhat){    // Vector of prob. predicted by

-					       // the logistic

-  for(int i = 0; i < X->size1; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < X->size2; ++k) {

-      if(gsl_matrix_int_get(X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];

-      iParm+=nlev->data[k]-1;

-    }

-    // Adding the continuous.

-    for(int k = 0; k < (Xc->size2); ++k) 

-      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];

-    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));

-  }

-}

-

-

-// The gradient of f, df = (df/dx, df/dy).

-void wgsl_mixed_optim_df (const gsl_vector *beta, void *params, 

-			  gsl_vector *out) {

-  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;

-  int n = p->y->size; 

-  int K = p->X->size2; 

-  int Kc = p->Xc->size2; 

-  int npar = beta->size;

-  

-  // Intitialize gradient out necessary?

-  for(int i = 0; i < npar; ++i) 

-    out->data[i]= 0;

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.

-  for(int i = 1; i < npar; ++i)

-    out->data[i]= p->lambdaL2*beta->data[i]; 

-  for(int i = 1; i < npar; ++i)

-    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));

-  

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm];

-      iParm+=p->nlev->data[k]-1;

-    }

-    

-    // Adding the continuous.

-    for(int k = 0; k < Kc; ++k) 

-      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm++];

-

-    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );

-

-    out->data[0]+= pn;

-    iParm=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	out->data[gsl_matrix_int_get(p->X,i,k)-1+iParm]+=pn;

-      iParm+=p->nlev->data[k]-1;

-    }

-    

-    // Adding the continuous.

-    for(int k = 0; k < Kc; ++k) {

-      out->data[iParm++] += gsl_matrix_get(p->Xc,i,k)*pn;

-    }

-  }

-

-}

-

-// The Hessian of f.

-void  wgsl_mixed_optim_hessian (const gsl_vector *beta, void *params, 

-				gsl_matrix *out) {

-  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;

-  int n = p->y->size; 

-  int K = p->X->size2; 

-  int Kc = p->Xc->size2; 

-  int npar = beta->size; 

-  gsl_vector *gn = gsl_vector_alloc(npar); // gn

-  

-  // Intitialize Hessian out necessary ???

-  gsl_matrix_set_zero(out);

-  

-  /* Changed loop start at 1 instead of 0 to avoid regularization of beta 0*/

-  for(int i = 1; i < npar; ++i)

-    gsl_matrix_set(out,i,i,(p->lambdaL2)); // Double-check this.

-  

-  // L1 penalty not working yet, as not differentiable, I may need to

-  // do coordinate descent (as in glm_net)

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double aux=0;

-    double Xbetai=beta->data[0];

-    int iParm1=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1];

-      iParm1+=p->nlev->data[k]-1;  //-1?

-    }

-    

-    // Adding the continuous.

-    for(int k = 0; k < Kc; ++k) 

-      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm1++];

-

-    pn= 1/(1 + gsl_sf_exp(-Xbetai));

-    

-    // Add a protection for pn very close to 0 or 1?

-    aux=pn*(1-pn);

-

-    // Calculate sub-gradient vector gn.

-    gsl_vector_set_zero(gn);

-    gn->data[0]= 1;

-    iParm1=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	gn->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1]=1;

-      iParm1+=p->nlev->data[k]-1;

-    }

-    

-    // Adding the continuous.

-    for(int k = 0; k < Kc; ++k) {

-      gn->data[iParm1++] = gsl_matrix_get(p->Xc,i,k);

-    }

-

-    for(int k1=0;k1<npar; ++k1)

-      if(gn->data[k1]!=0)

-	for(int k2=0;k2<npar; ++k2)

-	  if(gn->data[k2]!=0)

-	    *gsl_matrix_ptr(out,k1,k2) += (aux * gn->data[k1] * gn->data[k2]);

-  }

-  gsl_vector_free(gn);

-}

-

-double wgsl_mixed_optim_f(gsl_vector *v, void *params) {

-  double mLogLik=0;

-  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;

-  mLogLik = fLogit_mixed(v,p->X,p->nlev,p->Xc,p->y,p->lambdaL1,p->lambdaL2);

-  return mLogLik; 

-}

-

-// Compute both f and df together.

-void 

-wgsl_mixed_optim_fdf (gsl_vector *x, void *params, double *f, gsl_vector *df) {

-  *f = wgsl_mixed_optim_f(x, params); 

-  wgsl_mixed_optim_df(x, params, df);

-}

-

-// Xc is the matrix of continuous covariates, Nobs x Kc (NULL if not used).

-int logistic_mixed_fit(gsl_vector *beta, gsl_matrix_int *X,

-		       gsl_vector_int *nlev, gsl_matrix *Xc,

-		       gsl_vector *y, double lambdaL1, double lambdaL2) {

-  double mLogLik=0;

-  fix_parm_mixed_T p;

-  int npar = beta->size; 

-  int iter=0;

-  double maxchange=0;

-

-  // Intializing fix parameters.

-  p.X=X;

-  p.Xc=Xc;

-  p.nlev=nlev;

-  p.y=y;

-  p.lambdaL1=lambdaL1;

-  p.lambdaL2=lambdaL2;

-  

-  // Initial fit.

-  mLogLik = wgsl_mixed_optim_f(beta,&p);

-

-  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.

-  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.

-

-  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.

-  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.

-

-  for(iter=0;iter<100;iter++){ 

-    wgsl_mixed_optim_hessian(beta,&p,myH);       // Calculate Hessian.

-    wgsl_mixed_optim_df(beta,&p,myG);            // Calculate Gradient.

-    gsl_linalg_QR_decomp(myH,tau);               // Calculate next beta.

-    gsl_linalg_QR_solve(myH,tau,myG,stBeta);

-    gsl_vector_sub(beta,stBeta);

-    

-    // Monitor convergence.

-    maxchange=0;

-    for(int i=0;i<npar; i++)

-      if(maxchange<fabs(stBeta->data[i]))

-	maxchange=fabs(stBeta->data[i]);

-

-    if(maxchange<1E-4)

-      break;

-  }

-

-  // Final fit.

-  mLogLik = wgsl_mixed_optim_f(beta,&p);

-  

-  gsl_vector_free (tau);

-  gsl_vector_free (stBeta);

-  gsl_vector_free (myG);

-  gsl_matrix_free (myH);

-

-  return 0;

-}

-

-/***************/

-/* Categorical */

-/***************/

-

-// I need to bundle all the data that goes to the function to optimze

-// together.

-typedef struct {

-  gsl_matrix_int *X;

-  gsl_vector_int *nlev;

-  gsl_vector *y;

-  double lambdaL1;

-  double lambdaL2;

-} fix_parm_cat_T;

-

-double fLogit_cat (gsl_vector *beta, gsl_matrix_int *X, gsl_vector_int *nlev,

-		   gsl_vector *y, double lambdaL1, double lambdaL2) {

-  int n = y->size; 

-  int npar = beta->size; 

-  double total = 0;

-  double aux = 0;

-  

-  // omp_set_num_threads(ompthr); /\* Changed loop start at 1 instead

-  // of 0 to avoid regularization of beta 0*\/ /\*#pragma omp parallel

-  // for reduction (+:total)*\/

-  for(int i = 1; i < npar; ++i)

-    total += beta->data[i]*beta->data[i];

-  total = (-total*lambdaL2/2);

-  

-  // /\*#pragma omp parallel for reduction (+:aux)*\/

-  for(int i = 1; i < npar; ++i)

-    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);

-  total = total-aux*lambdaL1;

-  

-  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)

-  // #reduction (+:total)

-  for(int i = 0; i < n; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < X->size2; ++k) {

-      if(gsl_matrix_int_get(X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];

-      iParm+=nlev->data[k]-1;

-    }

-    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));

-  }

-  return -total;

-} 

-

-void logistic_cat_pred (gsl_vector *beta,     // Vector of parameters

-					      // length = 1 + Sum_k(C_k-1).

-		        gsl_matrix_int *X,    // Matrix Nobs x K 

-		        gsl_vector_int *nlev, // Vector with #categories

-		        gsl_vector *yhat){    // Vector of prob. predicted by

-					      // the logistic.

-  for(int i = 0; i < X->size1; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < X->size2; ++k) {

-      if(gsl_matrix_int_get(X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];

-      iParm+=nlev->data[k]-1;

-    }

-    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));

-  }

-}

-

-// The gradient of f, df = (df/dx, df/dy).

-void  wgsl_cat_optim_df (const gsl_vector *beta, void *params, 

-		   gsl_vector *out) {

-  fix_parm_cat_T *p = (fix_parm_cat_T *)params;

-  int n = p->y->size; 

-  int K = p->X->size2; 

-  int npar = beta->size;

-  

-  // Intitialize gradient out necessary?

-  for(int i = 0; i < npar; ++i) 

-    out->data[i]= 0;

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.

-  for(int i = 1; i < npar; ++i)

-    out->data[i]= p->lambdaL2*beta->data[i]; 

-  for(int i = 1; i < npar; ++i)

-    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));

-  

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm];

-      iParm+=p->nlev->data[k]-1;

-    }

-    

-    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );

-

-    out->data[0]+= pn;

-    iParm=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	out->data[gsl_matrix_int_get(p->X,i,k)-1+iParm]+=pn;

-      iParm+=p->nlev->data[k]-1;

-    }

-  }

-}

-

-// The Hessian of f.

-void  wgsl_cat_optim_hessian (const gsl_vector *beta, void *params, 

-			      gsl_matrix *out) {

-  fix_parm_cat_T *p = (fix_parm_cat_T *)params;

-  int n = p->y->size; 

-  int K = p->X->size2; 

-  int npar = beta->size;

-  

-  // Intitialize Hessian out necessary.

-  gsl_matrix_set_zero(out);

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of beta.

-  for(int i = 1; i < npar; ++i)

-    gsl_matrix_set(out,i,i,(p->lambdaL2)); // Double-check this.

-  

-  // L1 penalty not working yet, as not differentiable, I may need to

-  // do coordinate descent (as in glm_net).

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double aux=0;

-    double Xbetai=beta->data[0];

-    int iParm2=1;

-    int iParm1=1;

-    for(int k = 0; k < K; ++k) {

-      if(gsl_matrix_int_get(p->X,i,k)>0)

-	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1];

-      iParm1+=p->nlev->data[k]-1;  //-1?

-    }

-

-    pn= 1/(1 + gsl_sf_exp(-Xbetai));

-    

-    // Add a protection for pn very close to 0 or 1?

-    aux=pn*(1-pn);

-    *gsl_matrix_ptr(out,0,0)+=aux;

-    iParm2=1;

-    for(int k2 = 0; k2 < K; ++k2) {

-      if(gsl_matrix_int_get(p->X,i,k2)>0)

-	*gsl_matrix_ptr(out,0,gsl_matrix_int_get(p->X,i,k2)-1+iParm2)+=aux;

-      iParm2+=p->nlev->data[k2]-1;   //-1?

-    }

-    iParm1=1;

-    for(int k1 = 0; k1 < K; ++k1) {

-      if(gsl_matrix_int_get(p->X,i,k1)>0)

-	*gsl_matrix_ptr(out,gsl_matrix_int_get(p->X,i,k1)-1+iParm1,0)+=aux;

-      iParm2=1;

-      for(int k2 = 0; k2 < K; ++k2) {

-	if((gsl_matrix_int_get(p->X,i,k1)>0) &&

-	   (gsl_matrix_int_get(p->X,i,k2)>0))

-	  *gsl_matrix_ptr(out

-			  ,gsl_matrix_int_get(p->X,i,k1)-1+iParm1

-			  ,gsl_matrix_int_get(p->X,i,k2)-1+iParm2

-			  )+=aux;

-	iParm2+=p->nlev->data[k2]-1;  //-1?

-      }

-      iParm1+=p->nlev->data[k1]-1; //-1?

-    }

-  }

-}

-

-double wgsl_cat_optim_f(gsl_vector *v, void *params) {

-  double mLogLik=0;

-  fix_parm_cat_T *p = (fix_parm_cat_T *)params;

-  mLogLik = fLogit_cat(v,p->X,p->nlev,p->y,p->lambdaL1,p->lambdaL2);

-  return mLogLik; 

-}

-

-// Compute both f and df together.

-void wgsl_cat_optim_fdf (gsl_vector *x, void *params, double *f,

-			 gsl_vector *df) {

-  *f = wgsl_cat_optim_f(x, params); 

-  wgsl_cat_optim_df(x, params, df);

-}

-

-int logistic_cat_fit(gsl_vector *beta,

-		     gsl_matrix_int *X,

-		     gsl_vector_int *nlev,

-		     gsl_vector *y,

-		     double lambdaL1,

-		     double lambdaL2) {

-  double mLogLik=0;

-  fix_parm_cat_T p;

-  int npar = beta->size; 

-  int iter=0;

-  double maxchange=0;

-

-  // Intializing fix parameters.

-  p.X=X;

-  p.nlev=nlev;

-  p.y=y;

-  p.lambdaL1=lambdaL1;

-  p.lambdaL2=lambdaL2;

-  

-  // Initial fit.

-  mLogLik = wgsl_cat_optim_f(beta,&p);

-

-  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.

-  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.

-

-  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.

-  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.

-

-  for(iter=0;iter<100;iter++){ 

-    wgsl_cat_optim_hessian(beta,&p,myH); // Calculate Hessian.

-    wgsl_cat_optim_df(beta,&p,myG);      // Calculate Gradient.

-    gsl_linalg_QR_decomp(myH,tau);       // Calculate next beta.

-    gsl_linalg_QR_solve(myH,tau,myG,stBeta);

-    gsl_vector_sub(beta,stBeta);

-    

-    // Monitor convergence.

-    maxchange=0;

-    for(int i=0;i<npar; i++)

-      if(maxchange<fabs(stBeta->data[i]))

-	maxchange=fabs(stBeta->data[i]);

-

-#ifdef _RPR_DEBUG_

-    mLogLik = wgsl_cat_optim_f(beta,&p);

-#endif

-

-    if(maxchange<1E-4)

-      break;

-  }

-

-  // Final fit.

-  mLogLik = wgsl_cat_optim_f(beta,&p);

-

-  gsl_vector_free (tau);

-  gsl_vector_free (stBeta);

-  gsl_vector_free (myG);

-  gsl_matrix_free (myH);

-

-  return 0;

-}

-

-/***************/

-/* Continuous  */

-/***************/

-

-// I need to bundle all the data that goes to the function to optimze

-// together.

-typedef struct{

-  gsl_matrix *Xc;   // continuous covariates; Matrix Nobs x Kc 

-  gsl_vector *y;

-  double lambdaL1;

-  double lambdaL2;

-}fix_parm_cont_T;

-

-double fLogit_cont(gsl_vector *beta, gsl_matrix *Xc, gsl_vector *y,

-		   double lambdaL1, double lambdaL2) {

-  int n = y->size; 

-  int npar = beta->size; 

-  double total = 0;

-  double aux = 0;

-  

-  // omp_set_num_threads(ompthr); /\* Changed loop start at 1 instead

-  // of 0 to avoid regularization of beta_0*\/ /\*#pragma omp parallel

-  // for reduction (+:total)*\/

-  for(int i = 1; i < npar; ++i)

-    total += beta->data[i]*beta->data[i];

-  total = (-total*lambdaL2/2);

-  

-  // /\*#pragma omp parallel for reduction (+:aux)*\/

-  for(int i = 1; i < npar; ++i)

-    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);

-  total = total-aux*lambdaL1;

-  

-  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)

-  // #reduction (+:total)

-  for(int i = 0; i < n; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < (Xc->size2); ++k) 

-      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];

-    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));

-  }

-  return -total;

-} 

-

-void logistic_cont_pred(gsl_vector *beta,    // Vector of parameters

-					     // length = 1 + Sum_k(C_k-1).

-			gsl_matrix *Xc,      // Continuous covariates matrix,

-			                     // Nobs x Kc (NULL if not used).

-			,gsl_vector *yhat) { // Vector of prob. predicted by

-                                             // the logistic.

-  for(int i = 0; i < Xc->size1; ++i) {

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < (Xc->size2); ++k) 

-      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];

-    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));

-  }

-}

-

-// The gradient of f, df = (df/dx, df/dy).

-void wgsl_cont_optim_df (const gsl_vector *beta, void *params, 

-			 gsl_vector *out) {

-  fix_parm_cont_T *p = (fix_parm_cont_T *)params;

-  int n = p->y->size; 

-  int Kc = p->Xc->size2; 

-  int npar = beta->size;

-  

-  // Intitialize gradient out necessary?

-  for(int i = 0; i < npar; ++i) 

-    out->data[i]= 0;

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.

-  for(int i = 1; i < npar; ++i)

-    out->data[i]= p->lambdaL2*beta->data[i]; 

-  for(int i = 1; i < npar; ++i)

-    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));

-  

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double Xbetai=beta->data[0];

-    int iParm=1;

-    for(int k = 0; k < Kc; ++k) 

-      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm++];

-

-    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );

-

-    out->data[0]+= pn;

-    iParm=1;

-    

-    // Adding the continuous.

-    for(int k = 0; k < Kc; ++k) {

-      out->data[iParm++] += gsl_matrix_get(p->Xc,i,k)*pn;

-    }

-  }

-}

-

-// The Hessian of f.

-void wgsl_cont_optim_hessian (const gsl_vector *beta, void *params, 

-			      gsl_matrix *out) {

-  fix_parm_cont_T *p = (fix_parm_cont_T *)params;

-  int n = p->y->size; 

-  int Kc = p->Xc->size2; 

-  int npar = beta->size; 

-  gsl_vector *gn = gsl_vector_alloc(npar); // gn.

-  

-  // Intitialize Hessian out necessary ???

-  

-  gsl_matrix_set_zero(out);

-  

-  // Changed loop start at 1 instead of 0 to avoid regularization of

-  // beta 0.

-  for(int i = 1; i < npar; ++i)

-    gsl_matrix_set(out,i,i,(p->lambdaL2));  // Double-check this.

-  

-  // L1 penalty not working yet, as not differentiable, I may need to

-  // do coordinate descent (as in glm_net).

-  for(int i = 0; i < n; ++i) {

-    double pn=0;

-    double aux=0;

-    double Xbetai=beta->data[0];

-    int iParm1=1;

-    for(int k = 0; k < Kc; ++k) 

-      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm1++];

-

-    pn= 1/(1 + gsl_sf_exp(-Xbetai));

-    

-    // Add a protection for pn very close to 0 or 1?

-    aux=pn*(1-pn);

-

-    // Calculate sub-gradient vector gn.

-    gsl_vector_set_zero(gn);

-    gn->data[0]= 1;

-    iParm1=1;

-    for(int k = 0; k < Kc; ++k) {

-      gn->data[iParm1++] = gsl_matrix_get(p->Xc,i,k);

-    }

-

-    for(int k1=0;k1<npar; ++k1)

-      if(gn->data[k1]!=0)

-	for(int k2=0;k2<npar; ++k2)

-	  if(gn->data[k2]!=0)

-	    *gsl_matrix_ptr(out,k1,k2) += (aux * gn->data[k1] * gn->data[k2]);

-  }

-  gsl_vector_free(gn);

-}

-

-double wgsl_cont_optim_f(gsl_vector *v, void *params) {

-  double mLogLik=0;

-  fix_parm_cont_T *p = (fix_parm_cont_T *)params;

-  mLogLik = fLogit_cont(v,p->Xc,p->y,p->lambdaL1,p->lambdaL2);

-  return mLogLik; 

-}

-

-// Compute both f and df together.

-void wgsl_cont_optim_fdf (gsl_vector *x, void *params, 

-			  double *f, gsl_vector *df) {

-  *f = wgsl_cont_optim_f(x, params); 

-  wgsl_cont_optim_df(x, params, df);

-}

-

-int logistic_cont_fit (gsl_vector *beta,

-		       gsl_matrix *Xc,   // Continuous covariates matrix,

-		 		         // Nobs x Kc (NULL if not used).

-		       gsl_vector *y,

-		       double lambdaL1,

-		       double lambdaL2) {

-

-  double mLogLik=0;

-  fix_parm_cont_T p;

-  int npar = beta->size; 

-  int iter=0;

-  double maxchange=0;

-

-  // Initializing fix parameters.

-  p.Xc=Xc;

-  p.y=y;

-  p.lambdaL1=lambdaL1;

-  p.lambdaL2=lambdaL2;

-  

-  // Initial fit.

-  mLogLik = wgsl_cont_optim_f(beta,&p);

-

-  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.

-  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.

-

-  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.

-  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.

-

-  for(iter=0;iter<100;iter++){ 

-    wgsl_cont_optim_hessian(beta,&p,myH); // Calculate Hessian.

-    wgsl_cont_optim_df(beta,&p,myG);      // Calculate Gradient.

-    gsl_linalg_QR_decomp(myH,tau);        // Calculate next beta.

-    gsl_linalg_QR_solve(myH,tau,myG,stBeta);

-    gsl_vector_sub(beta,stBeta);

-    

-    // Monitor convergence.

-    maxchange=0;

-    for(int i=0;i<npar; i++)

-      if(maxchange<fabs(stBeta->data[i]))

-	maxchange=fabs(stBeta->data[i]);

-

-#ifdef _RPR_DEBUG_

-    mLogLik = wgsl_cont_optim_f(beta,&p);

-#endif

-

-    if(maxchange<1E-4)

-      break;

-  }

-

-  // Final fit.

-  mLogLik = wgsl_cont_optim_f(beta,&p);

-  

-  gsl_vector_free (tau);

-  gsl_vector_free (stBeta);

-  gsl_vector_free (myG);

-  gsl_matrix_free (myH);

-

-  return 0;

-}

-

+#include <stdio.h>
+#include <math.h>
+#include <gsl/gsl_matrix.h>
+#include <gsl/gsl_rng.h>
+#include <gsl/gsl_multimin.h>
+#include <gsl/gsl_sf.h>
+#include <gsl/gsl_linalg.h>
+#include "logistic.h"
+
+// I need to bundle all the data that goes to the function to optimze
+// together.
+typedef struct{
+  gsl_matrix_int *X;
+  gsl_vector_int *nlev;
+  gsl_vector *y;
+  gsl_matrix *Xc; // Continuous covariates matrix Nobs x Kc (NULL if not used).
+  double lambdaL1;
+  double lambdaL2;
+} fix_parm_mixed_T;
+
+double fLogit_mixed(gsl_vector *beta,
+		    gsl_matrix_int *X,
+		    gsl_vector_int *nlev,
+		    gsl_matrix *Xc,
+		    gsl_vector *y,
+		    double lambdaL1,
+		    double lambdaL2) {
+  int n = y->size;
+  int npar = beta->size;
+  double total = 0;
+  double aux = 0;
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of
+  // beta_0*\/
+  // #pragma omp parallel for reduction (+:total)
+  for(int i = 1; i < npar; ++i)
+    total += beta->data[i]*beta->data[i];
+  total = (-total*lambdaL2/2);
+  // #pragma omp parallel for reduction (+:aux)
+  for(int i = 1; i < npar; ++i)
+    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);
+  total = total-aux*lambdaL1;
+  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)
+  // #reduction (+:total)
+  for(int i = 0; i < n; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < X->size2; ++k) {
+      if(gsl_matrix_int_get(X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];
+      iParm+=nlev->data[k]-1;
+    }
+    for(int k = 0; k < (Xc->size2); ++k)
+      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];
+    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));
+  }
+  return -total;
+}
+
+void logistic_mixed_pred(gsl_vector *beta,     // Vector of parameters
+					       // length = 1 + Sum_k(C_k -1)
+			 gsl_matrix_int *X,    // Matrix Nobs x K
+			 gsl_vector_int *nlev, // Vector with number categories
+			 gsl_matrix *Xc,       // Continuous covariates matrix:
+			                       // obs x Kc (NULL if not used).
+			 gsl_vector *yhat){    // Vector of prob. predicted by
+					       // the logistic
+  for(int i = 0; i < X->size1; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < X->size2; ++k) {
+      if(gsl_matrix_int_get(X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];
+      iParm+=nlev->data[k]-1;
+    }
+    // Adding the continuous.
+    for(int k = 0; k < (Xc->size2); ++k)
+      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];
+    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));
+  }
+}
+
+
+// The gradient of f, df = (df/dx, df/dy).
+void wgsl_mixed_optim_df (const gsl_vector *beta, void *params,
+			  gsl_vector *out) {
+  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;
+  int n = p->y->size;
+  int K = p->X->size2;
+  int Kc = p->Xc->size2;
+  int npar = beta->size;
+
+  // Intitialize gradient out necessary?
+  for(int i = 0; i < npar; ++i)
+    out->data[i]= 0;
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.
+  for(int i = 1; i < npar; ++i)
+    out->data[i]= p->lambdaL2*beta->data[i];
+  for(int i = 1; i < npar; ++i)
+    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));
+
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm];
+      iParm+=p->nlev->data[k]-1;
+    }
+
+    // Adding the continuous.
+    for(int k = 0; k < Kc; ++k)
+      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm++];
+
+    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );
+
+    out->data[0]+= pn;
+    iParm=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	out->data[gsl_matrix_int_get(p->X,i,k)-1+iParm]+=pn;
+      iParm+=p->nlev->data[k]-1;
+    }
+
+    // Adding the continuous.
+    for(int k = 0; k < Kc; ++k) {
+      out->data[iParm++] += gsl_matrix_get(p->Xc,i,k)*pn;
+    }
+  }
+
+}
+
+// The Hessian of f.
+void  wgsl_mixed_optim_hessian (const gsl_vector *beta, void *params,
+				gsl_matrix *out) {
+  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;
+  int n = p->y->size;
+  int K = p->X->size2;
+  int Kc = p->Xc->size2;
+  int npar = beta->size;
+  gsl_vector *gn = gsl_vector_alloc(npar); // gn
+
+  // Intitialize Hessian out necessary ???
+  gsl_matrix_set_zero(out);
+
+  /* Changed loop start at 1 instead of 0 to avoid regularization of beta 0*/
+  for(int i = 1; i < npar; ++i)
+    gsl_matrix_set(out,i,i,(p->lambdaL2)); // Double-check this.
+
+  // L1 penalty not working yet, as not differentiable, I may need to
+  // do coordinate descent (as in glm_net)
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double aux=0;
+    double Xbetai=beta->data[0];
+    int iParm1=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1];
+      iParm1+=p->nlev->data[k]-1;  //-1?
+    }
+
+    // Adding the continuous.
+    for(int k = 0; k < Kc; ++k)
+      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm1++];
+
+    pn= 1/(1 + gsl_sf_exp(-Xbetai));
+
+    // Add a protection for pn very close to 0 or 1?
+    aux=pn*(1-pn);
+
+    // Calculate sub-gradient vector gn.
+    gsl_vector_set_zero(gn);
+    gn->data[0]= 1;
+    iParm1=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	gn->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1]=1;
+      iParm1+=p->nlev->data[k]-1;
+    }
+
+    // Adding the continuous.
+    for(int k = 0; k < Kc; ++k) {
+      gn->data[iParm1++] = gsl_matrix_get(p->Xc,i,k);
+    }
+
+    for(int k1=0;k1<npar; ++k1)
+      if(gn->data[k1]!=0)
+	for(int k2=0;k2<npar; ++k2)
+	  if(gn->data[k2]!=0)
+	    *gsl_matrix_ptr(out,k1,k2) += (aux * gn->data[k1] * gn->data[k2]);
+  }
+  gsl_vector_free(gn);
+}
+
+double wgsl_mixed_optim_f(gsl_vector *v, void *params) {
+  double mLogLik=0;
+  fix_parm_mixed_T *p = (fix_parm_mixed_T *)params;
+  mLogLik = fLogit_mixed(v,p->X,p->nlev,p->Xc,p->y,p->lambdaL1,p->lambdaL2);
+  return mLogLik;
+}
+
+// Compute both f and df together.
+void
+wgsl_mixed_optim_fdf (gsl_vector *x, void *params, double *f, gsl_vector *df) {
+  *f = wgsl_mixed_optim_f(x, params);
+  wgsl_mixed_optim_df(x, params, df);
+}
+
+// Xc is the matrix of continuous covariates, Nobs x Kc (NULL if not used).
+int logistic_mixed_fit(gsl_vector *beta, gsl_matrix_int *X,
+		       gsl_vector_int *nlev, gsl_matrix *Xc,
+		       gsl_vector *y, double lambdaL1, double lambdaL2) {
+  double mLogLik=0;
+  fix_parm_mixed_T p;
+  int npar = beta->size;
+  int iter=0;
+  double maxchange=0;
+
+  // Intializing fix parameters.
+  p.X=X;
+  p.Xc=Xc;
+  p.nlev=nlev;
+  p.y=y;
+  p.lambdaL1=lambdaL1;
+  p.lambdaL2=lambdaL2;
+
+  // Initial fit.
+  mLogLik = wgsl_mixed_optim_f(beta,&p);
+
+  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.
+  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.
+
+  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.
+  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.
+
+  for(iter=0;iter<100;iter++){
+    wgsl_mixed_optim_hessian(beta,&p,myH);       // Calculate Hessian.
+    wgsl_mixed_optim_df(beta,&p,myG);            // Calculate Gradient.
+    gsl_linalg_QR_decomp(myH,tau);               // Calculate next beta.
+    gsl_linalg_QR_solve(myH,tau,myG,stBeta);
+    gsl_vector_sub(beta,stBeta);
+
+    // Monitor convergence.
+    maxchange=0;
+    for(int i=0;i<npar; i++)
+      if(maxchange<fabs(stBeta->data[i]))
+	maxchange=fabs(stBeta->data[i]);
+
+    if(maxchange<1E-4)
+      break;
+  }
+
+  // Final fit.
+  mLogLik = wgsl_mixed_optim_f(beta,&p);
+
+  gsl_vector_free (tau);
+  gsl_vector_free (stBeta);
+  gsl_vector_free (myG);
+  gsl_matrix_free (myH);
+
+  return 0;
+}
+
+/***************/
+/* Categorical */
+/***************/
+
+// I need to bundle all the data that goes to the function to optimze
+// together.
+typedef struct {
+  gsl_matrix_int *X;
+  gsl_vector_int *nlev;
+  gsl_vector *y;
+  double lambdaL1;
+  double lambdaL2;
+} fix_parm_cat_T;
+
+double fLogit_cat (gsl_vector *beta, gsl_matrix_int *X, gsl_vector_int *nlev,
+		   gsl_vector *y, double lambdaL1, double lambdaL2) {
+  int n = y->size;
+  int npar = beta->size;
+  double total = 0;
+  double aux = 0;
+
+  // omp_set_num_threads(ompthr); /\* Changed loop start at 1 instead
+  // of 0 to avoid regularization of beta 0*\/ /\*#pragma omp parallel
+  // for reduction (+:total)*\/
+  for(int i = 1; i < npar; ++i)
+    total += beta->data[i]*beta->data[i];
+  total = (-total*lambdaL2/2);
+
+  // /\*#pragma omp parallel for reduction (+:aux)*\/
+  for(int i = 1; i < npar; ++i)
+    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);
+  total = total-aux*lambdaL1;
+
+  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)
+  // #reduction (+:total)
+  for(int i = 0; i < n; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < X->size2; ++k) {
+      if(gsl_matrix_int_get(X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];
+      iParm+=nlev->data[k]-1;
+    }
+    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));
+  }
+  return -total;
+}
+
+void logistic_cat_pred (gsl_vector *beta,     // Vector of parameters
+					      // length = 1 + Sum_k(C_k-1).
+		        gsl_matrix_int *X,    // Matrix Nobs x K
+		        gsl_vector_int *nlev, // Vector with #categories
+		        gsl_vector *yhat){    // Vector of prob. predicted by
+					      // the logistic.
+  for(int i = 0; i < X->size1; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < X->size2; ++k) {
+      if(gsl_matrix_int_get(X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(X,i,k)-1+iParm];
+      iParm+=nlev->data[k]-1;
+    }
+    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));
+  }
+}
+
+// The gradient of f, df = (df/dx, df/dy).
+void  wgsl_cat_optim_df (const gsl_vector *beta, void *params,
+		   gsl_vector *out) {
+  fix_parm_cat_T *p = (fix_parm_cat_T *)params;
+  int n = p->y->size;
+  int K = p->X->size2;
+  int npar = beta->size;
+
+  // Intitialize gradient out necessary?
+  for(int i = 0; i < npar; ++i)
+    out->data[i]= 0;
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.
+  for(int i = 1; i < npar; ++i)
+    out->data[i]= p->lambdaL2*beta->data[i];
+  for(int i = 1; i < npar; ++i)
+    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));
+
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm];
+      iParm+=p->nlev->data[k]-1;
+    }
+
+    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );
+
+    out->data[0]+= pn;
+    iParm=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	out->data[gsl_matrix_int_get(p->X,i,k)-1+iParm]+=pn;
+      iParm+=p->nlev->data[k]-1;
+    }
+  }
+}
+
+// The Hessian of f.
+void  wgsl_cat_optim_hessian (const gsl_vector *beta, void *params,
+			      gsl_matrix *out) {
+  fix_parm_cat_T *p = (fix_parm_cat_T *)params;
+  int n = p->y->size;
+  int K = p->X->size2;
+  int npar = beta->size;
+
+  // Intitialize Hessian out necessary.
+  gsl_matrix_set_zero(out);
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of beta.
+  for(int i = 1; i < npar; ++i)
+    gsl_matrix_set(out,i,i,(p->lambdaL2)); // Double-check this.
+
+  // L1 penalty not working yet, as not differentiable, I may need to
+  // do coordinate descent (as in glm_net).
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double aux=0;
+    double Xbetai=beta->data[0];
+    int iParm2=1;
+    int iParm1=1;
+    for(int k = 0; k < K; ++k) {
+      if(gsl_matrix_int_get(p->X,i,k)>0)
+	Xbetai+=beta->data[gsl_matrix_int_get(p->X,i,k)-1+iParm1];
+      iParm1+=p->nlev->data[k]-1;  //-1?
+    }
+
+    pn= 1/(1 + gsl_sf_exp(-Xbetai));
+
+    // Add a protection for pn very close to 0 or 1?
+    aux=pn*(1-pn);
+    *gsl_matrix_ptr(out,0,0)+=aux;
+    iParm2=1;
+    for(int k2 = 0; k2 < K; ++k2) {
+      if(gsl_matrix_int_get(p->X,i,k2)>0)
+	*gsl_matrix_ptr(out,0,gsl_matrix_int_get(p->X,i,k2)-1+iParm2)+=aux;
+      iParm2+=p->nlev->data[k2]-1;   //-1?
+    }
+    iParm1=1;
+    for(int k1 = 0; k1 < K; ++k1) {
+      if(gsl_matrix_int_get(p->X,i,k1)>0)
+	*gsl_matrix_ptr(out,gsl_matrix_int_get(p->X,i,k1)-1+iParm1,0)+=aux;
+      iParm2=1;
+      for(int k2 = 0; k2 < K; ++k2) {
+	if((gsl_matrix_int_get(p->X,i,k1)>0) &&
+	   (gsl_matrix_int_get(p->X,i,k2)>0))
+	  *gsl_matrix_ptr(out
+			  ,gsl_matrix_int_get(p->X,i,k1)-1+iParm1
+			  ,gsl_matrix_int_get(p->X,i,k2)-1+iParm2
+			  )+=aux;
+	iParm2+=p->nlev->data[k2]-1;  //-1?
+      }
+      iParm1+=p->nlev->data[k1]-1; //-1?
+    }
+  }
+}
+
+double wgsl_cat_optim_f(gsl_vector *v, void *params) {
+  double mLogLik=0;
+  fix_parm_cat_T *p = (fix_parm_cat_T *)params;
+  mLogLik = fLogit_cat(v,p->X,p->nlev,p->y,p->lambdaL1,p->lambdaL2);
+  return mLogLik;
+}
+
+// Compute both f and df together.
+void wgsl_cat_optim_fdf (gsl_vector *x, void *params, double *f,
+			 gsl_vector *df) {
+  *f = wgsl_cat_optim_f(x, params);
+  wgsl_cat_optim_df(x, params, df);
+}
+
+int logistic_cat_fit(gsl_vector *beta,
+		     gsl_matrix_int *X,
+		     gsl_vector_int *nlev,
+		     gsl_vector *y,
+		     double lambdaL1,
+		     double lambdaL2) {
+  double mLogLik=0;
+  fix_parm_cat_T p;
+  int npar = beta->size;
+  int iter=0;
+  double maxchange=0;
+
+  // Intializing fix parameters.
+  p.X=X;
+  p.nlev=nlev;
+  p.y=y;
+  p.lambdaL1=lambdaL1;
+  p.lambdaL2=lambdaL2;
+
+  // Initial fit.
+  mLogLik = wgsl_cat_optim_f(beta,&p);
+
+  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.
+  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.
+
+  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.
+  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.
+
+  for(iter=0;iter<100;iter++){
+    wgsl_cat_optim_hessian(beta,&p,myH); // Calculate Hessian.
+    wgsl_cat_optim_df(beta,&p,myG);      // Calculate Gradient.
+    gsl_linalg_QR_decomp(myH,tau);       // Calculate next beta.
+    gsl_linalg_QR_solve(myH,tau,myG,stBeta);
+    gsl_vector_sub(beta,stBeta);
+
+    // Monitor convergence.
+    maxchange=0;
+    for(int i=0;i<npar; i++)
+      if(maxchange<fabs(stBeta->data[i]))
+	maxchange=fabs(stBeta->data[i]);
+
+#ifdef _RPR_DEBUG_
+    mLogLik = wgsl_cat_optim_f(beta,&p);
+#endif
+
+    if(maxchange<1E-4)
+      break;
+  }
+
+  // Final fit.
+  mLogLik = wgsl_cat_optim_f(beta,&p);
+
+  gsl_vector_free (tau);
+  gsl_vector_free (stBeta);
+  gsl_vector_free (myG);
+  gsl_matrix_free (myH);
+
+  return 0;
+}
+
+/***************/
+/* Continuous  */
+/***************/
+
+// I need to bundle all the data that goes to the function to optimze
+// together.
+typedef struct{
+  gsl_matrix *Xc;   // continuous covariates; Matrix Nobs x Kc
+  gsl_vector *y;
+  double lambdaL1;
+  double lambdaL2;
+}fix_parm_cont_T;
+
+double fLogit_cont(gsl_vector *beta, gsl_matrix *Xc, gsl_vector *y,
+		   double lambdaL1, double lambdaL2) {
+  int n = y->size;
+  int npar = beta->size;
+  double total = 0;
+  double aux = 0;
+
+  // omp_set_num_threads(ompthr); /\* Changed loop start at 1 instead
+  // of 0 to avoid regularization of beta_0*\/ /\*#pragma omp parallel
+  // for reduction (+:total)*\/
+  for(int i = 1; i < npar; ++i)
+    total += beta->data[i]*beta->data[i];
+  total = (-total*lambdaL2/2);
+
+  // /\*#pragma omp parallel for reduction (+:aux)*\/
+  for(int i = 1; i < npar; ++i)
+    aux += (beta->data[i]>0 ? beta->data[i] : -beta->data[i]);
+  total = total-aux*lambdaL1;
+
+  // #pragma omp parallel for schedule(static) shared(n,beta,X,nlev,y)
+  // #reduction (+:total)
+  for(int i = 0; i < n; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < (Xc->size2); ++k)
+      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];
+    total += y->data[i]*Xbetai-gsl_sf_log_1plusx(gsl_sf_exp(Xbetai));
+  }
+  return -total;
+}
+
+void logistic_cont_pred(gsl_vector *beta,    // Vector of parameters
+					     // length = 1 + Sum_k(C_k-1).
+			gsl_matrix *Xc,      // Continuous covariates matrix,
+			                     // Nobs x Kc (NULL if not used).
+			gsl_vector *yhat) {  // Vector of prob. predicted by
+                                             // the logistic.
+  for(int i = 0; i < Xc->size1; ++i) {
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < (Xc->size2); ++k)
+      Xbetai+= gsl_matrix_get(Xc,i,k)*beta->data[iParm++];
+    yhat->data[i]=1/(1 + gsl_sf_exp(-Xbetai));
+  }
+}
+
+// The gradient of f, df = (df/dx, df/dy).
+void wgsl_cont_optim_df (const gsl_vector *beta, void *params,
+			 gsl_vector *out) {
+  fix_parm_cont_T *p = (fix_parm_cont_T *)params;
+  int n = p->y->size;
+  int Kc = p->Xc->size2;
+  int npar = beta->size;
+
+  // Intitialize gradient out necessary?
+  for(int i = 0; i < npar; ++i)
+    out->data[i]= 0;
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of beta 0.
+  for(int i = 1; i < npar; ++i)
+    out->data[i]= p->lambdaL2*beta->data[i];
+  for(int i = 1; i < npar; ++i)
+    out->data[i]+= p->lambdaL1*((beta->data[i]>0)-(beta->data[i]<0));
+
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double Xbetai=beta->data[0];
+    int iParm=1;
+    for(int k = 0; k < Kc; ++k)
+      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm++];
+
+    pn= -( p->y->data[i] - 1/(1 + gsl_sf_exp(-Xbetai)) );
+
+    out->data[0]+= pn;
+    iParm=1;
+
+    // Adding the continuous.
+    for(int k = 0; k < Kc; ++k) {
+      out->data[iParm++] += gsl_matrix_get(p->Xc,i,k)*pn;
+    }
+  }
+}
+
+// The Hessian of f.
+void wgsl_cont_optim_hessian (const gsl_vector *beta, void *params,
+			      gsl_matrix *out) {
+  fix_parm_cont_T *p = (fix_parm_cont_T *)params;
+  int n = p->y->size;
+  int Kc = p->Xc->size2;
+  int npar = beta->size;
+  gsl_vector *gn = gsl_vector_alloc(npar); // gn.
+
+  // Intitialize Hessian out necessary ???
+
+  gsl_matrix_set_zero(out);
+
+  // Changed loop start at 1 instead of 0 to avoid regularization of
+  // beta 0.
+  for(int i = 1; i < npar; ++i)
+    gsl_matrix_set(out,i,i,(p->lambdaL2));  // Double-check this.
+
+  // L1 penalty not working yet, as not differentiable, I may need to
+  // do coordinate descent (as in glm_net).
+  for(int i = 0; i < n; ++i) {
+    double pn=0;
+    double aux=0;
+    double Xbetai=beta->data[0];
+    int iParm1=1;
+    for(int k = 0; k < Kc; ++k)
+      Xbetai+= gsl_matrix_get(p->Xc,i,k)*beta->data[iParm1++];
+
+    pn= 1/(1 + gsl_sf_exp(-Xbetai));
+
+    // Add a protection for pn very close to 0 or 1?
+    aux=pn*(1-pn);
+
+    // Calculate sub-gradient vector gn.
+    gsl_vector_set_zero(gn);
+    gn->data[0]= 1;
+    iParm1=1;
+    for(int k = 0; k < Kc; ++k) {
+      gn->data[iParm1++] = gsl_matrix_get(p->Xc,i,k);
+    }
+
+    for(int k1=0;k1<npar; ++k1)
+      if(gn->data[k1]!=0)
+	for(int k2=0;k2<npar; ++k2)
+	  if(gn->data[k2]!=0)
+	    *gsl_matrix_ptr(out,k1,k2) += (aux * gn->data[k1] * gn->data[k2]);
+  }
+  gsl_vector_free(gn);
+}
+
+double wgsl_cont_optim_f(gsl_vector *v, void *params) {
+  double mLogLik=0;
+  fix_parm_cont_T *p = (fix_parm_cont_T *)params;
+  mLogLik = fLogit_cont(v,p->Xc,p->y,p->lambdaL1,p->lambdaL2);
+  return mLogLik;
+}
+
+// Compute both f and df together.
+void wgsl_cont_optim_fdf (gsl_vector *x, void *params,
+			  double *f, gsl_vector *df) {
+  *f = wgsl_cont_optim_f(x, params);
+  wgsl_cont_optim_df(x, params, df);
+}
+
+int logistic_cont_fit (gsl_vector *beta,
+		       gsl_matrix *Xc,   // Continuous covariates matrix,
+		 		         // Nobs x Kc (NULL if not used).
+		       gsl_vector *y,
+		       double lambdaL1,
+		       double lambdaL2) {
+
+  double mLogLik=0;
+  fix_parm_cont_T p;
+  int npar = beta->size;
+  int iter=0;
+  double maxchange=0;
+
+  // Initializing fix parameters.
+  p.Xc=Xc;
+  p.y=y;
+  p.lambdaL1=lambdaL1;
+  p.lambdaL2=lambdaL2;
+
+  // Initial fit.
+  mLogLik = wgsl_cont_optim_f(beta,&p);
+
+  gsl_matrix *myH = gsl_matrix_alloc(npar,npar); // Hessian matrix.
+  gsl_vector *stBeta = gsl_vector_alloc(npar);   // Direction to move.
+
+  gsl_vector *myG = gsl_vector_alloc(npar);      // Gradient.
+  gsl_vector *tau = gsl_vector_alloc(npar);      // tau for QR.
+
+  for(iter=0;iter<100;iter++){
+    wgsl_cont_optim_hessian(beta,&p,myH); // Calculate Hessian.
+    wgsl_cont_optim_df(beta,&p,myG);      // Calculate Gradient.
+    gsl_linalg_QR_decomp(myH,tau);        // Calculate next beta.
+    gsl_linalg_QR_solve(myH,tau,myG,stBeta);
+    gsl_vector_sub(beta,stBeta);
+
+    // Monitor convergence.
+    maxchange=0;
+    for(int i=0;i<npar; i++)
+      if(maxchange<fabs(stBeta->data[i]))
+	maxchange=fabs(stBeta->data[i]);
+
+#ifdef _RPR_DEBUG_
+    mLogLik = wgsl_cont_optim_f(beta,&p);
+#endif
+
+    if(maxchange<1E-4)
+      break;
+  }
+
+  // Final fit.
+  mLogLik = wgsl_cont_optim_f(beta,&p);
+
+  gsl_vector_free (tau);
+  gsl_vector_free (stBeta);
+  gsl_vector_free (myG);
+  gsl_matrix_free (myH);
+
+  return 0;
+}