diff --git a/src/SCAN/Descendents.cc b/src/SCAN/Descendents.cc
index d1f3ee2..7374ea7 100644
--- a/src/SCAN/Descendents.cc
+++ b/src/SCAN/Descendents.cc
@@ -21,9 +21,10 @@ using namespace ABACUS;
 namespace ABACUS {
 
 
-  Vect<string> Descendent_States_with_iK_Stepped_Up (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2, const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, bool disperse_only_current_exc, bool preserve_nexc)
+  Vect<string> Descendent_States_with_iK_Stepped_Up
+  (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2,
+   const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    //cout << "Up start" << endl;
     // Given an OriginIx2 and a BaseScanIx2 (by which we mean a set of Ix2 providing a basis for scanning),
     // this function returns a vector of the labels of the states obtained by each allowable
     // one-step increase of the quantum numbers.
@@ -58,9 +59,6 @@ namespace ABACUS {
     } while (!excfound);
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
 
-    //cout << "exclevel = " << exclevel << endl;
-    //cout << "excindex = " << excindex << endl;
-
     // The quantum numbers which we can move right are thus those
     // with (j < exclevel) and (j == exclelev and alpha <= excindex)
 
@@ -107,10 +105,10 @@ namespace ABACUS {
 	  // The descendent is acceptable if disperse_only_current_exc == true,
 	  // or if preserve_nexc == true and nexc labels match,
 	  // or if preserve_nexc == false and nexc labels don't match:
-	  //if (!disperse_only_current_exc && (BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] + 2) != (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0))) ABACUSerror("Inconsistency in check in Descendents");
 	  if (disperse_only_current_exc
 	      //|| (preserve_nexc == BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] + 2)))
-	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
+	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]
+						       ).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
 	    ndesc_found++;
 	}
       }
@@ -121,17 +119,14 @@ namespace ABACUS {
     for (int idesc = 0; idesc < ndesc_found; ++idesc)
       desclabelfound_resized[idesc] = desclabelfound[idesc];
 
-    //cout << "Up done" << endl;
-
-    //return(descIx2found);
-    //return(desclabelfound);
     return(desclabelfound_resized);
   }
 
 
-  Vect<string> Descendent_States_with_iK_Stepped_Down (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2, const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, bool disperse_only_current_exc, bool preserve_nexc)
+  Vect<string> Descendent_States_with_iK_Stepped_Down
+  (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2,
+   const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    //cout << "Down start" << endl;
     // Given an OriginIx2 and a BaseScanIx2 (by which we mean a set of Ix2 providing a basis for scanning),
     // this function returns a vector of the labels of the states obtained by each allowable
     // one-step decrease of the quantum numbers.
@@ -153,14 +148,12 @@ namespace ABACUS {
     int excindex = 0;
     bool excfound = false;
 
-    //cout << "Looking for exclevel and excindex for " << endl << "\tBaseIx2 = " << BaseScanIx2 << endl << "\tScanIx2 = " << ScanIx2 << endl;
     do {
       exclevel++;
       if (exclevel == ScanIx2.size()) { // there isn't a single left-moving quantum number in ScanIx2
 	break;
       }
       for (int alpha = ScanIx2[exclevel].size() - 1; alpha >= 0; --alpha) {
-	//cout << exclevel << "\t" << alpha << "\t" << ScanIx2[exclevel][alpha] << "\t" << BaseScanIx2[exclevel][alpha] << "\t" << (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) << endl;
 	if (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) {
 	  excindex = alpha;
 	  excfound = true;
@@ -169,15 +162,10 @@ namespace ABACUS {
       }
     } while (!excfound);
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
-    //if (!excfound) excindex = ScanIx2[exclevel].size() - 1;
-
-    //cout << "exclevel = " << exclevel << endl;
-    //cout << "excindex = " << excindex << endl;
 
     // The quantum numbers which we can move left are thus those
     // with (j < exclevel) and (j == exclelev and alpha >= excindex)
 
-
     int ndesc_possible = 1;
     if (!disperse_only_current_exc) {
       ndesc_possible = 0;
@@ -207,7 +195,6 @@ namespace ABACUS {
     if (exclevel < ScanIx2.size()) { // excfound == true, excindex is now guaranteed < ScanIx2[exclevel].size()
       int alphamin = (disperse_only_current_exc ? excindex : excindex + 1);
       int alphamax = (disperse_only_current_exc ? excindex : ScanIx2[exclevel].size() - 1);
-      //for (int alpha = ScanIx2[exclevel].size() - 1; alpha >= excindex; --alpha) {
       for (int alpha = alphamax; alpha >= alphamin; --alpha) {
 	if (ScanIx2[exclevel][alpha] <= BaseScanIx2[exclevel][alpha]
 	    && !ScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] - 2)
@@ -215,11 +202,9 @@ namespace ABACUS {
 	  ScanIx2[exclevel][alpha] -= 2;
 	  desclabelfound[ndesc_found] = Return_State_Label (ScanIx2, OriginIx2);
 	  ScanIx2[exclevel][alpha] += 2;
-	  //cout << "\tTesting .compare: " << Extract_nexc_Label(desclabelfound[ndesc_found]) << "\t" << Extract_nexc_Label(ScanIx2_label) << "\t" << (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0) << "\tpreserve_nexc: " << preserve_nexc << endl;
 	  if (disperse_only_current_exc
-	      //|| (preserve_nexc == BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] - 2)))
-	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
-	      //|| (preserve_nexc == strcmp(Extract_nexc_Label(desclabelfound[ndesc_found]), Extract_nexc_Label(ScanIx2_label))))
+	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]
+						       ).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
 	    ndesc_found++;
 	}
       }
@@ -230,22 +215,25 @@ namespace ABACUS {
     for (int idesc = 0; idesc < ndesc_found; ++idesc)
       desclabelfound_resized[idesc] = desclabelfound[idesc];
 
-    //cout << "Down done" << endl;
-    //return(descIx2found);
-    //return(desclabelfound);
     return(desclabelfound_resized);
   }
 
 
-  Vect<string> Descendent_States_with_iK_Preserved (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2, const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, bool disperse_only_current_exc_up, bool preserve_nexc_up, bool disperse_only_current_exc_down, bool preserve_nexc_down)
+  Vect<string> Descendent_States_with_iK_Preserved
+  (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2,
+   const Vect<int>& Ix2_min, const Vect<int>& Ix2_max,
+   bool disperse_only_current_exc_up, bool preserve_nexc_up,
+   bool disperse_only_current_exc_down, bool preserve_nexc_down)
   {
     // Returns the labels of all states which are obtained from ScanIx2 by stepping p one step, and down one step.
-    Vect<string> labels_up = Descendent_States_with_iK_Stepped_Up (ScanIx2_label, OriginIx2, BaseScanIx2, Ix2_min, Ix2_max, disperse_only_current_exc_up, preserve_nexc_up);
-    //cout << "labels_up = " << labels_up << endl;
+    Vect<string> labels_up = Descendent_States_with_iK_Stepped_Up
+      (ScanIx2_label, OriginIx2, BaseScanIx2, Ix2_min, Ix2_max, disperse_only_current_exc_up, preserve_nexc_up);
 
     Vect<string> labels_found(0);
     for (int i = 0; i < labels_up.size(); ++i) {
-      labels_found.Append (Descendent_States_with_iK_Stepped_Down (labels_up[i], OriginIx2, BaseScanIx2, Ix2_min, Ix2_max, disperse_only_current_exc_down, preserve_nexc_down));
+      labels_found.Append (Descendent_States_with_iK_Stepped_Down
+			   (labels_up[i], OriginIx2, BaseScanIx2, Ix2_min, Ix2_max,
+			    disperse_only_current_exc_down, preserve_nexc_down));
     }
 
     return(labels_found);
@@ -263,12 +251,8 @@ namespace ABACUS {
 
     // ASSUMPTIONS: OriginIx2 is a symmetric state.
 
-    //cout << "Desc Up A" << endl;
-
     Vect<Vect<int> > ScanIx2 = Return_Ix2_from_Label (ScanIx2_label, OriginIx2);
 
-    //cout << "Desc Up B" << endl;
-
     // Determine the level and index of the bottom-most left-most right-moving quantum number sits:
     int exclevel = -1;
     int excindex = 0;
@@ -287,8 +271,6 @@ namespace ABACUS {
     } while (!excfound);
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
 
-    //cout << "exclevel = " << exclevel << "\t" << "excindex = " << excindex << endl;
-
     // The quantum numbers which we can move right are thus those
     // with (j < exclevel) and (j == exclelev and alpha <= excindex)
 
@@ -325,8 +307,6 @@ namespace ABACUS {
       int alphamin = (disperse_only_current_exc ? excindex : 0);
       alphamin = ABACUS::max((ScanIx2[exclevel].size() + 1)/2, alphamin);
       int alphamax = (disperse_only_current_exc ? excindex : excindex - 1);
-      //for (int alpha = 0; alpha <= excindex; ++alpha) {
-      //cout << "alphamin = " << alphamin << "\talphamax = " << alphamax << endl;
       for (int alpha = alphamin; alpha <= alphamax; ++alpha) {
 	if (ScanIx2[exclevel][alpha] >= BaseScanIx2[exclevel][alpha]
 	    && !ScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] + 2)
@@ -334,36 +314,26 @@ namespace ABACUS {
 	  // We've found a descendent
 	  ScanIx2[exclevel][alpha] += 2;
 	  ScanIx2[exclevel][ScanIx2[exclevel].size() - 1 - alpha] -= 2;
-	  //cout << "Desc Up a" << endl;
-	  //cout << "ScanIx2[exclevel] = " << ScanIx2[exclevel] << endl;
 	  desclabelfound[ndesc_found] = Return_State_Label (ScanIx2, OriginIx2);
-	  //cout << "Desc Up b" << endl;
 	  ScanIx2[exclevel][alpha] -= 2;
 	  ScanIx2[exclevel][ScanIx2[exclevel].size() - 1 - alpha] += 2;
 	  // If we're dispersing a subleading Ix2, check whether we match the preserve_nexc condition:
 	  // The descendent is acceptable if disperse_only_current_exc == true,
 	  // or if preserve_nexc == true and nexc labels match,
 	  // or if preserve_nexc == false and nexc labels don't match:
-	  //if (!disperse_only_current_exc && (BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] + 2) != (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0))) ABACUSerror("Inconsistency in check in Descendents");
 	  if (disperse_only_current_exc
-	      //|| (preserve_nexc == BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] + 2)))
-	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
+	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]
+						       ).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
 	    ndesc_found++;
 	}
       }
     } //if (exclevel < ScanIx2.size())
 
-    //cout << "Desc Up D" << endl;
-
     // Resize desc:
     Vect<string> desclabelfound_resized(ndesc_found);
     for (int idesc = 0; idesc < ndesc_found; ++idesc)
       desclabelfound_resized[idesc] = desclabelfound[idesc];
 
-    //cout << "Up done" << endl;
-
-    //return(descIx2found);
-    //return(desclabelfound);
     return(desclabelfound_resized);
   }
 
@@ -379,14 +349,12 @@ namespace ABACUS {
     int excindex = 0;
     bool excfound = false;
 
-    //cout << "Looking for exclevel and excindex for " << endl << "\tBaseIx2 = " << BaseScanIx2 << endl << "\tScanIx2 = " << ScanIx2 << endl;
     do {
       exclevel++;
       if (exclevel == ScanIx2.size()) { // there isn't a single left-moving quantum number in ScanIx2
 	break;
       }
       for (int alpha = ScanIx2[exclevel].size() - 1; alpha >= (ScanIx2[exclevel].size() + 1)/2 ; --alpha) {
-	//cout << exclevel << "\t" << alpha << "\t" << ScanIx2[exclevel][alpha] << "\t" << BaseScanIx2[exclevel][alpha] << "\t" << (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) << endl;
 	if (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) {
 	  excindex = alpha;
 	  excfound = true;
@@ -395,15 +363,10 @@ namespace ABACUS {
       }
     } while (!excfound);
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
-    //if (!excfound) excindex = ScanIx2[exclevel].size() - 1;
-
-    //cout << "exclevel = " << exclevel << endl;
-    //cout << "excindex = " << excindex << endl;
 
     // The quantum numbers which we can move left are thus those
     // with (j < exclevel) and (j == exclevel and alpha >= excindex)
 
-
     int ndesc_possible = 1;
     if (!disperse_only_current_exc) {
       ndesc_possible = 0;
@@ -436,7 +399,6 @@ namespace ABACUS {
       int alphamin = (disperse_only_current_exc ? excindex : excindex + 1);
       alphamin = ABACUS::max((ScanIx2[exclevel].size() + 1)/2, alphamin);
       int alphamax = (disperse_only_current_exc ? excindex : ScanIx2[exclevel].size() - 1);
-      //for (int alpha = ScanIx2[exclevel].size() - 1; alpha >= excindex; --alpha) {
       for (int alpha = alphamax; alpha >= alphamin; --alpha) {
 	if (ScanIx2[exclevel][alpha] <= BaseScanIx2[exclevel][alpha]
 	    && !ScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] - 2)
@@ -446,11 +408,9 @@ namespace ABACUS {
 	  desclabelfound[ndesc_found] = Return_State_Label (ScanIx2, OriginIx2);
 	  ScanIx2[exclevel][alpha] += 2;
 	  ScanIx2[exclevel][ScanIx2[exclevel].size() - 1 - alpha] -= 2;
-	  //cout << "\tTesting .compare: " << Extract_nexc_Label(desclabelfound[ndesc_found]) << "\t" << Extract_nexc_Label(ScanIx2_label) << "\t" << (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0) << "\tpreserve_nexc: " << preserve_nexc << endl;
 	  if (disperse_only_current_exc
-	      //|| (preserve_nexc == BaseScanIx2[exclevel].includes(ScanIx2[exclevel][alpha] - 2)))
-	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
-	      //|| (preserve_nexc == strcmp(Extract_nexc_Label(desclabelfound[ndesc_found]), Extract_nexc_Label(ScanIx2_label))))
+	      || (preserve_nexc == (Extract_nexc_Label(desclabelfound[ndesc_found]
+						       ).compare(Extract_nexc_Label(ScanIx2_label)) == 0)))
 	    ndesc_found++;
 	}
       }
@@ -461,68 +421,62 @@ namespace ABACUS {
     for (int idesc = 0; idesc < ndesc_found; ++idesc)
       desclabelfound_resized[idesc] = desclabelfound[idesc];
 
-    //cout << "Down done" << endl;
-    //return(descIx2found);
-    //return(desclabelfound);
     return(desclabelfound_resized);
   }
 
 
 
   // Specialization for Lieb-Liniger case:
-  //Vect<string> Descendent_States_with_iK_Stepped_Up (string ScanIx2_label, const Vect<int>& OriginIx2, const Vect<int>& BaseScanIx2)
-  Vect<string> Descendent_States_with_iK_Stepped_Up (string ScanIx2_label, const LiebLin_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
+  Vect<string> Descendent_States_with_iK_Stepped_Up (string ScanIx2_label, const LiebLin_Bethe_State& OriginState,
+						     bool disperse_only_current_exc, bool preserve_nexc)
   {
-    //Vect<Vect<int> > ScanIx2here(1);
-    //ScanIx2here[0] = ScanIx2;
     Vect<Vect<int> > OriginIx2here(1);
     OriginIx2here[0] = OriginState.Ix2;
     Vect<Vect<int> > BaseScanIx2here(1);
-    //BaseScanIx2here[0] = BaseScanIx2;
     BaseScanIx2here[0] = OriginState.Ix2;
     Vect<int> Ix2_min(1);
     Ix2_min[0] = LIEBLIN_Ix2_MIN;
     Vect<int> Ix2_max(1);
     Ix2_max[0] = LIEBLIN_Ix2_MAX;
 
-    return (Descendent_States_with_iK_Stepped_Up (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
-  }
-  // Specialization for Lieb-Liniger case:
-  //Vect<string> Descendent_States_with_iK_Stepped_Down (string ScanIx2_label, const Vect<int>& OriginIx2, const Vect<int>& BaseScanIx2)
-  Vect<string> Descendent_States_with_iK_Stepped_Down (string ScanIx2_label, const LiebLin_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
-  {
-    //Vect<Vect<int> > ScanIx2here(1);
-    //ScanIx2here[0] = ScanIx2;
-    Vect<Vect<int> > OriginIx2here(1);
-    OriginIx2here[0] = OriginState.Ix2;
-    Vect<Vect<int> > BaseScanIx2here(1);
-    //BaseScanIx2here[0] = BaseScanIx2;
-    BaseScanIx2here[0] = OriginState.Ix2;
-    Vect<int> Ix2_min(1);
-    Ix2_min[0] = LIEBLIN_Ix2_MIN;
-    Vect<int> Ix2_max(1);
-    Ix2_max[0] = LIEBLIN_Ix2_MAX;
-
-    return (Descendent_States_with_iK_Stepped_Down (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return (Descendent_States_with_iK_Stepped_Up (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max,
+						  disperse_only_current_exc, preserve_nexc));
   }
 
   // Specialization for Lieb-Liniger case:
-  //Vect<string> Descendent_States_with_iK_Preserved (string ScanIx2_label, const Vect<int>& OriginIx2, const Vect<int>& BaseScanIx2)
-  Vect<string> Descendent_States_with_iK_Preserved (string ScanIx2_label, const LiebLin_Bethe_State& OriginState, bool disperse_only_current_exc_up, bool preserve_nexc_up, bool disperse_only_current_exc_down, bool preserve_nexc_down)
+  Vect<string> Descendent_States_with_iK_Stepped_Down (string ScanIx2_label, const LiebLin_Bethe_State& OriginState,
+						       bool disperse_only_current_exc, bool preserve_nexc)
   {
-    //Vect<Vect<int> > ScanIx2here(1);
-    //ScanIx2here[0] = ScanIx2;
     Vect<Vect<int> > OriginIx2here(1);
     OriginIx2here[0] = OriginState.Ix2;
     Vect<Vect<int> > BaseScanIx2here(1);
-    //BaseScanIx2here[0] = BaseScanIx2;
     BaseScanIx2here[0] = OriginState.Ix2;
     Vect<int> Ix2_min(1);
     Ix2_min[0] = LIEBLIN_Ix2_MIN;
     Vect<int> Ix2_max(1);
     Ix2_max[0] = LIEBLIN_Ix2_MAX;
 
-    return (Descendent_States_with_iK_Preserved (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max, disperse_only_current_exc_up, preserve_nexc_up, disperse_only_current_exc_down, preserve_nexc_down));
+    return (Descendent_States_with_iK_Stepped_Down (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max,
+						    disperse_only_current_exc, preserve_nexc));
+  }
+
+  // Specialization for Lieb-Liniger case:
+  Vect<string> Descendent_States_with_iK_Preserved (string ScanIx2_label, const LiebLin_Bethe_State& OriginState,
+						    bool disperse_only_current_exc_up, bool preserve_nexc_up,
+						    bool disperse_only_current_exc_down, bool preserve_nexc_down)
+  {
+    Vect<Vect<int> > OriginIx2here(1);
+    OriginIx2here[0] = OriginState.Ix2;
+    Vect<Vect<int> > BaseScanIx2here(1);
+    BaseScanIx2here[0] = OriginState.Ix2;
+    Vect<int> Ix2_min(1);
+    Ix2_min[0] = LIEBLIN_Ix2_MIN;
+    Vect<int> Ix2_max(1);
+    Ix2_max[0] = LIEBLIN_Ix2_MAX;
+
+    return (Descendent_States_with_iK_Preserved (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max,
+						 disperse_only_current_exc_up, preserve_nexc_up,
+						 disperse_only_current_exc_down, preserve_nexc_down));
   }
 
   // Specialization for Lieb-Liniger case:
@@ -532,14 +486,14 @@ namespace ABACUS {
     Vect<Vect<int> > OriginIx2here(1);
     OriginIx2here[0] = OriginState.Ix2;
     Vect<Vect<int> > BaseScanIx2here(1);
-    //BaseScanIx2here[0] = BaseScanIx2;
     BaseScanIx2here[0] = OriginState.Ix2;
     Vect<int> Ix2_min(1);
     Ix2_min[0] = LIEBLIN_Ix2_MIN;
     Vect<int> Ix2_max(1);
     Ix2_max[0] = LIEBLIN_Ix2_MAX;
 
-    return(Descendent_States_with_iK_Stepped_Up_rightIx2only (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Up_rightIx2only (ScanIx2_label, OriginIx2here, BaseScanIx2here,
+							      Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
   }
 
   Vect<string> Descendent_States_with_iK_Stepped_Down_rightIx2only
@@ -548,14 +502,14 @@ namespace ABACUS {
     Vect<Vect<int> > OriginIx2here(1);
     OriginIx2here[0] = OriginState.Ix2;
     Vect<Vect<int> > BaseScanIx2here(1);
-    //BaseScanIx2here[0] = BaseScanIx2;
     BaseScanIx2here[0] = OriginState.Ix2;
     Vect<int> Ix2_min(1);
     Ix2_min[0] = LIEBLIN_Ix2_MIN;
     Vect<int> Ix2_max(1);
     Ix2_max[0] = LIEBLIN_Ix2_MAX;
 
-    return(Descendent_States_with_iK_Stepped_Down_rightIx2only (ScanIx2_label, OriginIx2here, BaseScanIx2here, Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Down_rightIx2only (ScanIx2_label, OriginIx2here, BaseScanIx2here,
+								Ix2_min, Ix2_max, disperse_only_current_exc, preserve_nexc));
   }
 
 
@@ -563,27 +517,37 @@ namespace ABACUS {
   Vect<string> Descendent_States_with_iK_Stepped_Up
   (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    return(Descendent_States_with_iK_Stepped_Up (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Up (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min,
+						 OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
   }
   Vect<string> Descendent_States_with_iK_Stepped_Down
   (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    return(Descendent_States_with_iK_Stepped_Down (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Down (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min,
+						   OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
   }
   Vect<string> Descendent_States_with_iK_Preserved
-  (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc_up, bool preserve_nexc_up, bool disperse_only_current_exc_down, bool preserve_nexc_down)
+  (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc_up, bool preserve_nexc_up,
+   bool disperse_only_current_exc_down, bool preserve_nexc_down)
   {
-    return(Descendent_States_with_iK_Preserved (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc_up, preserve_nexc_up, disperse_only_current_exc_down, preserve_nexc_down));
+    return(Descendent_States_with_iK_Preserved
+	   (ScanIx2_label, OriginState.Ix2, OriginState.Ix2,
+	    OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc_up, preserve_nexc_up,
+	    disperse_only_current_exc_down, preserve_nexc_down));
   }
   Vect<string> Descendent_States_with_iK_Stepped_Up_rightIx2only
   (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    return(Descendent_States_with_iK_Stepped_Up_rightIx2only (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Up_rightIx2only
+	   (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max,
+	    disperse_only_current_exc, preserve_nexc));
   }
   Vect<string> Descendent_States_with_iK_Stepped_Down_rightIx2only
   (string ScanIx2_label, const Heis_Bethe_State& OriginState, bool disperse_only_current_exc, bool preserve_nexc)
   {
-    return(Descendent_States_with_iK_Stepped_Down_rightIx2only (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max, disperse_only_current_exc, preserve_nexc));
+    return(Descendent_States_with_iK_Stepped_Down_rightIx2only
+	   (ScanIx2_label, OriginState.Ix2, OriginState.Ix2, OriginState.base.Ix2_min, OriginState.base.Ix2_max,
+	    disperse_only_current_exc, preserve_nexc));
   }
 
 
@@ -592,7 +556,6 @@ namespace ABACUS {
   {
     // Given a state, returns the acceptable new hole positions.
 
-
     // Define the objects for the newstatedata:
     Vect<int> type_new = currentdata.type;
     Vect<int> M_new = currentdata.M;
@@ -600,7 +563,6 @@ namespace ABACUS {
     nexc_new[exclevel_newph] += 1; // we drill one more particle-hole pair at this level
     int index_new = (currentdata.nexc[exclevel_newph] + 1)/2; // we put the new p-h pair at index index_new.
 
-    //int ntypespresent = ScanIx2.size();
     int ntypespresent = currentdata.type.size();
     Vect<Vect<int> > Ix2old_new(ntypespresent);
     Vect<Vect<int> > Ix2exc_new(ntypespresent);
@@ -615,8 +577,6 @@ namespace ABACUS {
       }
     }
 
-    //cout << "Here 1" << endl;
-
     State_Label_Data descdatanewph (type_new, M_new, nexc_new, Ix2old_new, Ix2exc_new);
 
     // We now look for all possible hole positions,
@@ -636,14 +596,14 @@ namespace ABACUS {
       // C- it is next to an OriginIx2 vacancy or immediately right of Ix2old[index_new - 1] (if this exists)
       //      or immediately left of Ix2old[index_new] (if this exists)
       // D- it does not break the `towards the center' rule
-      //      (it will break the rule at this point if it is created away from an OriginIx2 boundary (and thus next to a preexisting excitation),
-      //      and if this excitation and it are not in the same sideblock (in other words: if there is a sideblock boundary between them)
+      //      (it will break the rule at this point if it is created away from an OriginIx2 boundary
+      //      (and thus next to a preexisting excitation),
+      //      and if this excitation and it are not in the same sideblock
+      //      (in other words: if there is a sideblock boundary between them)
 
-      //cout << "candidateIx2old " << candidateIx2old << " being tested" << endl;
       // A
       if (!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old)) {
 	// is not contained in OriginIx2
-	//cout << "candidateIx2old " << candidateIx2old << " rejected for reason A" << endl;
 	continue;
       }
 
@@ -651,7 +611,6 @@ namespace ABACUS {
       if (currentdata.nexc[exclevel_newph] > 0
 	  && candidateIx2old <= currentdata.Ix2old[exclevel_newph][index_new - 1]) {
 	// there is at least one hole exc to the left, and the candidate position isn't right of Ix2old[index_new - 1]
-	//cout << "candidateIx2old " << candidateIx2old << " rejected for reason B1" << endl;
 	continue;
       }
 
@@ -659,20 +618,21 @@ namespace ABACUS {
       if (currentdata.nexc[exclevel_newph] > 1
 	  && candidateIx2old >= currentdata.Ix2old[exclevel_newph][index_new]) {
 	// there is at least one hole exc to the right, and the candidate position isn't left of Ix2old[index_new]
-	//cout << "candidateIx2old " << candidateIx2old << " rejected for reason B2" << endl;
 	continue;
       }
 
       // C- it is next to an OriginIx2 vacancy or immediately right of Ix2old[index_new - 1] (if this exists)
       //      or immediately left of Ix2old[index_new] (if this exists)
-      if (!(!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2) || !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2))
+      if (!(!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2)
+	    || !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2))
 	  // doesn't sit next to an OriginIx2 vacancy
-	  && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
+	  && (currentdata.nexc[exclevel_newph] == 0
+	      || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
 	  // doesn't sit immediately right of first hole excitation to the left
-	  && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new] - 2)
+	  && (currentdata.nexc[exclevel_newph] <= 1
+	      || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new] - 2)
 	  // doesn't sit immediately left of first hole  excitation to the right
 	  ) {
-	//cout << "candidateIx2old " << candidateIx2old << " rejected for reason C" << endl;
 	continue;
       }
 
@@ -695,28 +655,28 @@ namespace ABACUS {
       if (hole_candidate_is_left_moving
 	  && (currentdata.nexc[exclevel_newph] > 0 && candidateIx2old == currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
 	  // it is created to the right of a preexisting hole excitation
-	  && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2) // and is not sitting at the boundary
+	  && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2)
+	  // and is not sitting at the boundary
 	  && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new] - 2)
 	  // and is not sitting just left of another preexisting hole excitation which is also left moving
 	  )
 	{
-	  //cout << "candidateIx2old " << candidateIx2old << " rejected for reason D1" << endl;
 	  continue;
 	}
 
       if (hole_candidate_is_right_moving
 	  && (currentdata.nexc[exclevel_newph] > 1 && candidateIx2old == currentdata.Ix2old[exclevel_newph][index_new] - 2)
 	  // it is created to the left of a preexisting hole excitation
-	  && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2) // and is not sitting at the boundary
-	  && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
+	  && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2)
+	  // and is not sitting at the boundary
+	  && (currentdata.nexc[exclevel_newph] == 0
+	      || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
 	  // and is not sitting just right of another preexisting hole excitation which is also right moving
 	  )
 	{
-	  //cout << "candidateIx2old " << candidateIx2old << " rejected for reason D2" << endl;
 	  continue;
 	}
 
-
       // If we have reached this point, candidateIx2old is acceptable.
 
       isgoodnewholepos[ih] = true;
@@ -729,8 +689,6 @@ namespace ABACUS {
 
 
 
-
-  //Vect<string> Descendents (const Bethe_State& ScanState, string type_required)
   Vect<string> Descendents (string ScanStateLabel, const Vect<Vect<int> >& ScanIx2, const Vect<Vect<int> >& OriginIx2,
 			    const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, int type_required)
   {
@@ -743,28 +701,20 @@ namespace ABACUS {
     // type_required == 0: inner (right)most hole moved
     // type_required == 1: inner (right)most particle moved
     // type_required == 2: new particle-hole pair added, keeping only closest p < h and p > h cases
-    // type_required == 3: at level 0, move leftmost particle one step left, rightmost one step right (fixed iK logic using skeletons).
-    // type_required == 4: generalized Umklapp: increase distance between latest-added p-h pair, staying on boundary of blocks
-
-    //cout << "\tCalling Descendents for type = " << type_required << endl;
+    // type_required == 3: at level 0, move leftmost particle one step left, rightmost one step right
+    //                     (fixed iK logic using skeletons).
+    // type_required == 4: generalized Umklapp: increase distance between latest-added p-h pair,
+    //                     staying on boundary of blocks
 
     bool cinbreaks = false;
 
     // Number of descendents:
-    //int ndesc_possible = (type_required == 0 ? 1 : ScanState.base.charge * 2); // upper bound guess; should be refined.
-    //int ndesc_possible = (type_required == 0 ? 1 : 2* ScanIx2[0].size() * 2 * ScanIx2[0].size()); // upper bound guess; should be refined
     int ndesc_possible = 1;
     if (type_required == 1 || type_required == 2) {
       for (int i = 0; i < ScanIx2.size(); ++i)
-	ndesc_possible = ABACUS::max (ndesc_possible, 2* ScanIx2[i].size() * 2 * ScanIx2[i].size()); // inexact, should be refined
+	ndesc_possible = ABACUS::max (ndesc_possible, 2* ScanIx2[i].size() * 2 * ScanIx2[i].size());
+      // inexact, should be refined
     }
-    //cout << "ndesc_possible = " << ndesc_possible << endl;
-
-    //cout << "OriginIx2.size() = " << OriginIx2.size() << endl;
-    //for (int i = 0; i < OriginIx2.size(); ++i) cout << "i = " << i << "\tOriginIx2[i].size() = " << OriginIx2[i].size() << endl;
-    //for (int i = 0; i < OriginIx2.size(); ++i) cout << "i = " << i << "\tOriginIx2[i] = " << OriginIx2[i] << endl;
-    //cout << "OriginIx2 = " << endl;
-    //cout << OriginIx2 << endl;
 
     Vect<string> desclabelfound (ndesc_possible);
     Vect<int> desctypefound (ndesc_possible);
@@ -774,19 +724,8 @@ namespace ABACUS {
     State_Label_Data currentdata = Read_State_Label (ScanStateLabel, OriginIx2);
 
     // Determine the level at which the highest current excitation sits
-    //int exclevel = ScanIx2.size() - 1;
     int exclevel = currentdata.type.size() - 1;
     while (exclevel > 0 && currentdata.nexc[exclevel] == 0) exclevel--;
-    //cout << "exclevel = " << exclevel << endl;
-
-    //cout << "***** Looking for descendents of type " << type_required << " for state with OriginIx2, ScanIx2: *******" << endl;
-    //cout << OriginIx2[currentdata.type[exclevel] ] << endl;
-    //cout << ScanIx2[currentdata.type[exclevel] ] << endl;
-    //if (currentdata.nexc[exclevel] > 0) {
-    //cout << "with current excitations (holes, particles):" << endl;
-    //cout << currentdata.Ix2old[exclevel] << endl;
-    //cout << currentdata.Ix2exc[exclevel] << endl;
-    //}
 
     if (type_required == 0) {
 
@@ -805,10 +744,12 @@ namespace ABACUS {
 
 	// Determine the size of the block of OriginState.Ix2 in which this hole sits:
 	int nroccupiedtoleft = 0;
-	while (OriginIx2[descdata.type[exclevel] ].includes (currentdata.Ix2old[exclevel][innerindex] -2*(nroccupiedtoleft + 1)))
+	while (OriginIx2[descdata.type[exclevel] ].includes (currentdata.Ix2old[exclevel][innerindex]
+							     -2*(nroccupiedtoleft + 1)))
 	  nroccupiedtoleft++;
 	int nroccupiedtoright = 0;
-	while (OriginIx2[descdata.type[exclevel] ].includes (currentdata.Ix2old[exclevel][innerindex] +2*(nroccupiedtoright + 1)))
+	while (OriginIx2[descdata.type[exclevel] ].includes (currentdata.Ix2old[exclevel][innerindex]
+							     +2*(nroccupiedtoright + 1)))
 	  nroccupiedtoright++;
 
 	if (nroccupiedtoleft - nroccupiedtoright + 2 < 0) newholepos += 2;
@@ -821,15 +762,6 @@ namespace ABACUS {
 	    descdata.Ix2old[exclevel][innerindex] = newholepos;
 	    desclabelfound[ndesc_found] = Return_State_Label (descdata, OriginIx2);
 	    desctypefound[ndesc_found] = 0;
-	    //cout << "For state with OriginIx2, ScanIx2:" << endl;
-	    //cout << OriginIx2[exclevel] << endl;
-	    //cout << ScanIx2[exclevel] << endl;
-	    //if (currentdata.nexc[exclevel] > 0) {
-	    //cout << "with current excitations (holes, particles):" << endl;
-	    //cout << currentdata.Ix2old[exclevel] << endl;
-	    //cout << currentdata.Ix2exc[exclevel] << endl;
-	    //}
-	    //cout << "\tFound new descendent of type " << desctypefound[ndesc_found] << ": label " << desclabelfound[ndesc_found] << "\tholes " << descdata.Ix2old[exclevel] << "\tpart " << descdata.Ix2exc[exclevel] << endl;
 	    ndesc_found++;
 	    if (cinbreaks) { char a;    cin >> a;}
 	  }
@@ -841,9 +773,6 @@ namespace ABACUS {
 
     if (type_required == 1) {
 
-      //cout << "exclevel = " << exclevel << endl;
-      //cout << "currentdata.nexc[exclevel] = " << currentdata.nexc[exclevel] << endl;
-
       // We move the inner(right)most particle of the highest excited level one more step towards
       // the center of the block of Ix2 vacancies in which it sits.
 
@@ -869,8 +798,12 @@ namespace ABACUS {
 	else while (!OriginIx2[descdata.type[exclevel] ].includes (partpos +2*(nremptytoright + 1)))
 	       nremptytoright++;
 
-	if (!partpos_is_left_of_all_OriginIx2 && (partpos_is_right_of_all_OriginIx2 || nremptytoleft - nremptytoright + 2 < 0)) partpos += 2;
-	else if (!partpos_is_right_of_all_OriginIx2 && (partpos_is_left_of_all_OriginIx2 || nremptytoleft - nremptytoright - 2 >= 0)) partpos -= 2;
+	if (!partpos_is_left_of_all_OriginIx2 && (partpos_is_right_of_all_OriginIx2
+						  || nremptytoleft - nremptytoright + 2 < 0))
+	  partpos += 2;
+	else if (!partpos_is_right_of_all_OriginIx2 && (partpos_is_left_of_all_OriginIx2
+							|| nremptytoleft - nremptytoright - 2 >= 0))
+	  partpos -= 2;
 
 	if (partpos != currentdata.Ix2exc[exclevel][innerindex] // we have successfully moved the particle
 	    && !OriginIx2[descdata.type[exclevel] ].includes(partpos) // it's actually a new particle position
@@ -882,36 +815,23 @@ namespace ABACUS {
 	    descdata.Ix2exc[exclevel][innerindex] = partpos;
 	    desclabelfound[ndesc_found] = Return_State_Label (descdata, OriginIx2);
 	    desctypefound[ndesc_found] = 1;
-	    //cout << "For state with OriginIx2, ScanIx2:" << endl;
-	    //cout << OriginIx2[exclevel] << endl;
-	    //cout << ScanIx2[exclevel] << endl;
-	    //if (currentdata.nexc[exclevel] > 0) {
-	    //cout << "with current excitations (holes, particles):" << endl;
-	    //cout << currentdata.Ix2old[exclevel] << endl;
-	    //cout << currentdata.Ix2exc[exclevel] << endl;
-	    //}
-	    //cout << "\tFound new descendent of type " << desctypefound[ndesc_found] << ": label " << desclabelfound[ndesc_found] << "\tholes " << descdata.Ix2old[exclevel] << "\tpart " << descdata.Ix2exc[exclevel] << endl;
 	    ndesc_found++;
 	    if (cinbreaks) { char a;    cin >> a;}
 	  }
 
       } // if (currentdata.nexc[exclevel] > 0)
 
-
     } // if (type_required == 1)
 
-
     if (type_required == 2) {
 
       // Now add a new p-h pair at the inner(right)most position, at each level from exclevel upwards,
       // putting the particle and hole to each available positions at the edge of vacancy/occupancy blocks.
 
-      //cout << "Trying for type 2 descendent. exclevel = " << exclevel << "\tcurrentdata.nexc.size() = " << currentdata.nexc.size() << endl;
-
-      //for (int exclevel_newph = exclevel; exclevel_newph < ScanIx2.size(); ++exclevel_newph) {
       for (int exclevel_newph = exclevel; exclevel_newph < currentdata.nexc.size(); ++exclevel_newph) {
 
-	if (ScanIx2[currentdata.type[exclevel_newph] ].size() <= currentdata.nexc[exclevel_newph]) continue; // no space for another p-h
+	if (ScanIx2[currentdata.type[exclevel_newph] ].size() <= currentdata.nexc[exclevel_newph])
+	  continue; // no space for another p-h
 
 	// Define the objects for the newstatedata:
 	Vect<int> type_new = currentdata.type;
@@ -935,8 +855,6 @@ namespace ABACUS {
 	  }
 	}
 
-	//cout << "Here 1" << endl;
-
 	State_Label_Data descdatanewph (type_new, M_new, nexc_new, Ix2old_new, Ix2exc_new);
 
 	// We now look for all possible hole positions,
@@ -944,122 +862,7 @@ namespace ABACUS {
 	// under the condition of obeying the `towards the block center' rule.
 
 	Vect<bool> isgoodnewholepos = Is_Good_New_Hole_Position (OriginIx2, currentdata, exclevel_newph);
-	/* The function call above replaces the whole block here:
-	Vect<bool> isgoodnewholepos(false, OriginIx2[descdatanewph.type[exclevel_newph] ].size());
 
-	for (int ih = 0; ih < OriginIx2[descdatanewph.type[exclevel_newph] ].size(); ++ih) {
-
-	  int candidateIx2old = OriginIx2[descdatanewph.type[exclevel_newph] ][ih];
-
-	  // candidateIx2old is an acceptable position for the new hole provided the following conditions are fulfilled:
-	  // A- it is in OriginIx2
-	  // B- it follows the ordering rule, i.e. it sits in the middle of previous particle excitations,
-	  //      namely between Ix2old[index_new - 1] (if this exists) and Ix2old[index_new] (if this exists)
-	  // C- it is next to an OriginIx2 vacancy or immediately right of Ix2old[index_new - 1] (if this exists)
-	  //      or immediately left of Ix2old[index_new] (if this exists)
-	  // D- it does not break the `towards the center' rule
-	  //      (it will break the rule at this point if it is created away from an OriginIx2 boundary (and thus next to a preexisting excitation),
-	  //      and if this excitation and it are not in the same sideblock (in other words: if there is a sideblock boundary between them)
-
-	  //cout << "candidateIx2old " << candidateIx2old << " being tested" << endl;
-	  // A
-	  if (!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old)) {
-	    // is not contained in OriginIx2
-	    //cout << "candidateIx2old " << candidateIx2old << " rejected for reason A" << endl;
-	    continue;
-	  }
-
-	  // B1
-	  if (currentdata.nexc[exclevel_newph] > 0
-	      && candidateIx2old <= currentdata.Ix2old[exclevel_newph][index_new - 1]) {
-	    // there is at least one hole exc to the left, and the candidate position isn't right of Ix2old[index_new - 1]
-	    //cout << "candidateIx2old " << candidateIx2old << " rejected for reason B1" << endl;
-	    continue;
-	  }
-
-	  // B2
-	  if (currentdata.nexc[exclevel_newph] > 1
-	      && candidateIx2old >= currentdata.Ix2old[exclevel_newph][index_new]) {
-	    // there is at least one hole exc to the right, and the candidate position isn't left of Ix2old[index_new]
-	    //cout << "candidateIx2old " << candidateIx2old << " rejected for reason B2" << endl;
-	    continue;
-	  }
-
-	  // C- it is next to an OriginIx2 vacancy or immediately right of Ix2old[index_new - 1] (if this exists)
-	  //      or immediately left of Ix2old[index_new] (if this exists)
-	  if (!(!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2) || !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2))
-	      // doesn't sit next to an OriginIx2 vacancy
-	      && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
-	      // doesn't sit immediately right of first hole excitation to the left
-	      && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new] - 2)
-	      // doesn't sit immediately left of first hole  excitation to the right
-	      ) {
-	    //cout << "candidateIx2old " << candidateIx2old << " rejected for reason C" << endl;
-	    continue;
-	  }
-
-	  // D- it does not break the `towards the center' rule
-	  // In other words, if created away from a block boundary but next to a preexisting hole,
-	  // must be in same sideblock as this particle:
-
-	  // Determine the size of the block of OriginIx2 in which this hole sits:
-	  int nroccupiedtoleft = 0;
-	  while (OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old -2*(nroccupiedtoleft + 1)))
-	    nroccupiedtoleft++;
-	  int nroccupiedtoright = 0;
-	  while (OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old +2*(nroccupiedtoright + 1)))
-	    nroccupiedtoright++;
-
-	  // We can determine whether the new hole would be left- or right-moving
-	  bool hole_candidate_is_left_moving = nroccupiedtoleft >= nroccupiedtoright;
-	  bool hole_candidate_is_right_moving = nroccupiedtoleft < nroccupiedtoright;
-
-	  if (hole_candidate_is_left_moving
-	      && (currentdata.nexc[exclevel_newph] > 0 && candidateIx2old == currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
-		  // it is created to the right of a preexisting hole excitation
-	      && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old + 2) // and is not sitting at the boundary
-	      && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new] - 2)
-	      // and is not sitting just left of another preexisting hole excitation which is also left moving
-	      )
-	    {
-	      //cout << "candidateIx2old " << candidateIx2old << " rejected for reason D1" << endl;
-	      continue;
-	    }
-
-	  if (hole_candidate_is_right_moving
-	      && (currentdata.nexc[exclevel_newph] > 1 && candidateIx2old == currentdata.Ix2old[exclevel_newph][index_new] - 2)
-		  // it is created to the left of a preexisting hole excitation
-	      && OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2old - 2) // and is not sitting at the boundary
-	      && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2old != currentdata.Ix2old[exclevel_newph][index_new - 1] + 2)
-	      // and is not sitting just right of another preexisting hole excitation which is also right moving
-	      )
-	    {
-	      //cout << "candidateIx2old " << candidateIx2old << " rejected for reason D2" << endl;
-	      continue;
-	    }
-
-
-	  // If we have reached this point, candidateIx2old is acceptable.
-
-	  isgoodnewholepos[ih] = true;
-
-	} // for (int ih
-	*/
-
-
-
-	//cout << "Here 2" << endl;
-
-	//cout << "\tisgoodnewholdpos = " << isgoodnewholepos << endl;
-
-	//cout << "Looking for a new particle position: OriginIx2, ScanIx2:" << endl;
-	//cout << OriginIx2[exclevel_newph] << endl;
-	//cout << ScanIx2[exclevel_newph] << endl;
-	//if (currentdata.nexc[exclevel_newph] > 0) {
-	//  cout << "Current excitations (holes, particles):" << endl;
-	//  cout << currentdata.Ix2old[exclevel_newph] << endl;
-	//  cout << currentdata.Ix2exc[exclevel_newph] << endl;
-	//}
 
 	// We now look for all possible particle positions,
 	// the allowable ones being either at the edge of a block, or next to an existing particle excitation,
@@ -1079,8 +882,6 @@ namespace ABACUS {
 
 	for (int candidateIx2exc = Ix2excmin; candidateIx2exc <= Ix2excmax; candidateIx2exc += 2) {
 
-	  //cout << "Here a" << endl;
-
 	  // candidateIx2exc is an acceptable position for the new particle provided the following conditions are fulfilled:
 	  // A- it is not in OriginIx2
 	  // B- it follows the ordering rule, i.e. it sits in the middle of previous particle excitations,
@@ -1088,67 +889,63 @@ namespace ABACUS {
 	  // C- it is next to an OriginIx2 or immediately right of Ix2exc[index_new - 1] (if this exists)
 	  //      or immediately left of Ix2exc[index_new] (if this exists)
 	  // D- it does not break the `towards the center' rule
-	  //      (it will break the rule at this point if it is created away from an OriginIx2 boundary (and thus next to a preexisting excitation),
-	  //      and if this excitation and it are not in the same sideblock (in other words: if there is a sideblock boundary between them)
+	  //      (it will break the rule at this point if it is created away from an OriginIx2 boundary
+	  //      (and thus next to a preexisting excitation),
+	  //      and if this excitation and it are not in the same sideblock
+	  //      (in other words: if there is a sideblock boundary between them)
 
-	  //cout << "candidateIx2exc " << candidateIx2exc << " being tested" << endl;
 	  // A
 	  if (OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc)) {
 	    // is contained in OriginIx2
-	    //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason A" << endl;
 	    continue;
 	  }
 
-	  //cout << "Here b" << endl;
-
 	  // B1
 	  if (currentdata.nexc[exclevel_newph] > 0
 	      && candidateIx2exc <= currentdata.Ix2exc[exclevel_newph][index_new - 1]) {
 	    // there is at least one particle exc to the left, and the candidate position isn't right of Ix2exc[index_new - 1]
-	    //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason B1" << endl;
 	    continue;
 	  }
 
-	  //cout << "Here c" << endl;
-
 	  // B2
 	  if (currentdata.nexc[exclevel_newph] > 1
 	      && candidateIx2exc >= currentdata.Ix2exc[exclevel_newph][index_new]) {
 	    // there is at least one particle exc to the right, and the candidate position isn't left of Ix2exc[index_new]
-	    //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason B2" << endl;
 	    continue;
 	  }
 
-	  //cout << "Here d" << endl;
-
 	  // C- it is next to an OriginIx2 or immediately right of Ix2exc[index_new - 1] (if this exists)
 	  //      or immediately left of Ix2exc[index_new] (if this exists)
-	  if (!(OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc + 2) || OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc - 2))
+	  if (!(OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc + 2)
+		|| OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc - 2))
 	      // doesn't sit next to an OriginIx2
-	      && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
+	      && (currentdata.nexc[exclevel_newph] == 0
+		  || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
 	      // doesn't sit immediately right of first particle excitation to the left
-	      && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new] - 2)
+	      && (currentdata.nexc[exclevel_newph] <= 1
+		  || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new] - 2)
 	      // doesn't sit immediately left of first particle excitation to the right
 	      ) {
-	    //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason C" << endl;
 	    continue;
 	  }
 
-	  //cout << "Here e" << endl;
-
 	  // D- it does not break the `towards the center' rule
 	  // In other words, if created away from a block boundary but next to a preexisting particle,
 	  // must be in same sideblock as this particle:
 
 	  // Determine the size of the block of OriginIx2 vacancies in which this particle sits:
-	  bool candidate_is_left_of_all_OriginIx2 = (candidateIx2exc < OriginIx2[descdatanewph.type[exclevel_newph] ].min()); // this makes it left-moving
-	  bool candidate_is_right_of_all_OriginIx2 = (candidateIx2exc > OriginIx2[descdatanewph.type[exclevel_newph] ].max()); // this makes it right-moving
+	  bool candidate_is_left_of_all_OriginIx2 =
+	    (candidateIx2exc < OriginIx2[descdatanewph.type[exclevel_newph] ].min()); // this makes it left-moving
+	  bool candidate_is_right_of_all_OriginIx2 =
+	    (candidateIx2exc > OriginIx2[descdatanewph.type[exclevel_newph] ].max()); // this makes it right-moving
 	  int nremptytoleft = 0;
-	  if (candidate_is_left_of_all_OriginIx2) nremptytoleft = (candidateIx2exc - Ix2_min[descdatanewph.type[exclevel_newph] ])/2;
+	  if (candidate_is_left_of_all_OriginIx2)
+	    nremptytoleft = (candidateIx2exc - Ix2_min[descdatanewph.type[exclevel_newph] ])/2;
 	  else while (!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc -2*(nremptytoleft + 1)))
 		 nremptytoleft++;
 	  int nremptytoright = 0;
-	  if (candidate_is_right_of_all_OriginIx2) nremptytoright = (Ix2_max[descdatanewph.type[exclevel_newph] ] - candidateIx2exc)/2;
+	  if (candidate_is_right_of_all_OriginIx2)
+	    nremptytoright = (Ix2_max[descdatanewph.type[exclevel_newph] ] - candidateIx2exc)/2;
 	  else while (!OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc +2*(nremptytoright + 1)))
 		 nremptytoright++;
 	  // We can determine whether the new particle would be left- or right-moving
@@ -1157,82 +954,61 @@ namespace ABACUS {
 	  bool candidate_is_right_moving = candidate_is_right_of_all_OriginIx2
 	    || (!candidate_is_left_of_all_OriginIx2 && nremptytoleft < nremptytoright);
 	  // Consistency checks:
-	  if (candidate_is_left_moving && candidate_is_right_moving) ABACUSerror("New particle moving left and right at same time");
-	  if (!candidate_is_left_moving && !candidate_is_right_moving) ABACUSerror("New particle not moving either left or right");
-
-	  //cout << "Here f" << endl;
+	  if (candidate_is_left_moving && candidate_is_right_moving)
+	    ABACUSerror("New particle moving left and right at same time");
+	  if (!candidate_is_left_moving && !candidate_is_right_moving)
+	    ABACUSerror("New particle not moving either left or right");
 
 	  if (candidate_is_left_moving
-	      && (currentdata.nexc[exclevel_newph] > 0 && candidateIx2exc == currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
-		  // it is created to the right of a preexisting particle excitation
-	      && !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc + 2) // and is not sitting at the boundary
-	      && (currentdata.nexc[exclevel_newph] <= 1 || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new] - 2)
+	      && (currentdata.nexc[exclevel_newph] > 0
+		  && candidateIx2exc == currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
+	      // it is created to the right of a preexisting particle excitation
+	      && !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc + 2)
+	      // and is not sitting at the boundary
+	      && (currentdata.nexc[exclevel_newph] <= 1
+		  || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new] - 2)
 	      // and is not sitting just left of another preexisting particle excitation which is also left moving
 	      )
 	    {
-	      //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason D1" << endl;
 	      continue;
 	    }
 
-	  //cout << "Here g" << endl;
-
 	  if (candidate_is_right_moving
-	      && (currentdata.nexc[exclevel_newph] > 1 && candidateIx2exc == currentdata.Ix2exc[exclevel_newph][index_new] - 2)
-		  // it is created to the left of a preexisting particle excitation
-	      && !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc - 2) // and is not sitting at the boundary
-	      && (currentdata.nexc[exclevel_newph] == 0 || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
+	      && (currentdata.nexc[exclevel_newph] > 1
+		  && candidateIx2exc == currentdata.Ix2exc[exclevel_newph][index_new] - 2)
+	      // it is created to the left of a preexisting particle excitation
+	      && !OriginIx2[descdatanewph.type[exclevel_newph] ].includes (candidateIx2exc - 2)
+	      // and is not sitting at the boundary
+	      && (currentdata.nexc[exclevel_newph] == 0
+		  || candidateIx2exc != currentdata.Ix2exc[exclevel_newph][index_new - 1] + 2)
 	      // and is not sitting just right of another preexisting particle excitation which is also right moving
 	      )
 	    {
-	      //cout << "candidateIx2exc " << candidateIx2exc << " rejected for reason D2" << endl;
 	      continue;
 	    }
 
-	  //cout << "\tFound a possible exc position at " << candidateIx2exc << endl;
-
 	  // If we have reached this point, candidateIx2exc is acceptable.
 	  // Immediately construct all descendents with this part position:
 
-	  //cout << "Here h" << endl;
-
 	  // We now select the hole position closest to left or right:
 	  int ihclosestleft = -1;
 	  int ihclosestright = -1;
 	  for (int ih = 0; ih < OriginIx2[descdatanewph.type[exclevel_newph] ].size(); ++ih)
 	    if (isgoodnewholepos[ih])
 	      {
-		/*
-		//cout << "Here alpha" << endl;
-		descdatanewph.Ix2old[exclevel_newph][index_new] = OriginIx2[descdatanewph.type[exclevel_newph] ][ih];
-		//cout << "Here beta" << endl;
-		descdatanewph.Ix2exc[exclevel_newph][index_new] = candidateIx2exc;
-		//cout << "Here gamma" << endl;
-		desclabelfound[ndesc_found] = Return_State_Label (descdatanewph, OriginIx2);
-		//cout << "Here delta" << endl;
-		desctypefound[ndesc_found] = 2;
-		//cout << "For state with OriginIx2, ScanIx2:" << endl;
-		//cout << OriginIx2[exclevel_newph] << endl;
-		//cout << ScanIx2[exclevel_newph] << endl;
-		//if (currentdata.nexc[exclevel_newph] > 0) {
-		//  cout << "with current excitations (holes, particles):" << endl;
-		//  cout << currentdata.Ix2old[exclevel_newph] << endl;
-		//  cout << currentdata.Ix2exc[exclevel_newph] << endl;
-		//}
-		//cout << "\tFound new descendent of type " << desctypefound[ndesc_found] << ": label " << desclabelfound[ndesc_found] << "\tholes " << descdatanewph.Ix2old[exclevel_newph] << "\tpart " << descdatanewph.Ix2exc[exclevel_newph] << endl;
-		ndesc_found++;
-
-		if (cinbreaks) { char a;    cin >> a;}
-		*/
-		// New in ++G_3.1:
-		if (OriginIx2[descdatanewph.type[exclevel_newph] ][ih] - candidateIx2exc < 0) // new hole is left of new particle
+		if (OriginIx2[descdatanewph.type[exclevel_newph] ][ih] - candidateIx2exc < 0)
+		  // new hole is left of new particle
 		  if (ihclosestleft == -1 // we hadn't found a hole previously
 		      || ihclosestleft >= 0 // we had aleady found a new hole to the left
-		      && OriginIx2[descdatanewph.type[exclevel_newph] ][ih] > OriginIx2[descdatanewph.type[exclevel_newph] ][ihclosestleft]) //
+		      && OriginIx2[descdatanewph.type[exclevel_newph] ][ih] >
+		      OriginIx2[descdatanewph.type[exclevel_newph] ][ihclosestleft])
 		    ihclosestleft = ih;
-		if (OriginIx2[descdatanewph.type[exclevel_newph] ][ih] - candidateIx2exc > 0) // new hole is right of new particle
+		if (OriginIx2[descdatanewph.type[exclevel_newph] ][ih] - candidateIx2exc > 0)
+		  // new hole is right of new particle
 		  if (ihclosestright == -1 // we hadn't found a hole previously
 		      || ihclosestright >= 0 // we had aleady found a new hole to the right
-		      && OriginIx2[descdatanewph.type[exclevel_newph] ][ih] < OriginIx2[descdatanewph.type[exclevel_newph] ][ihclosestright]) //
+		      && OriginIx2[descdatanewph.type[exclevel_newph] ][ih] <
+		      OriginIx2[descdatanewph.type[exclevel_newph] ][ihclosestright])
 		    ihclosestright = ih;
 	      } // if (isgoodnewholepos[ih])
 	  // for ih
@@ -1255,8 +1031,6 @@ namespace ABACUS {
 
 	} // for (int candidateIx2exc)
 
-	//cout << "Here 3" << endl;
-
       } // for (int exclevel_newph)
 
     } // if (type_required == 2)
@@ -1286,7 +1060,8 @@ namespace ABACUS {
 
       // Start by finding the possible hole positions of the state without the innermost p-h pair:
       Vect<int> nexc_new = currentdata.nexc;
-      if (currentdata.nexc[exclevel] < 1) ABACUSerror("Should not call type 3 descendent if there are no ph exc at exclevel");
+      if (currentdata.nexc[exclevel] < 1)
+	ABACUSerror("Should not call type 3 descendent if there are no ph exc at exclevel");
       nexc_new[exclevel] -= 1; // we remove the innermost ph pair
 
       int innerindex = currentdata.nexc[exclevel]/2; // index of ph pair we remove
@@ -1339,15 +1114,11 @@ namespace ABACUS {
     } // if (type_required == 4)
 
 
-
-
     Vect<string> desclabelfoundresized (ndesc_found);
     for (int i = 0; i < ndesc_found; ++i) {
       desclabelfoundresized[i] = desclabelfound[i];
     }
 
-    //cout << "For state with label " << ScanStateLabel << ", found " << ndesc_found << " descendents of type " << type_required << endl;
-    //cout << desclabelfoundresized << endl;
     if (cinbreaks) { char a;    cin >> a;}
 
     return(desclabelfoundresized);
@@ -1355,12 +1126,9 @@ namespace ABACUS {
   } // Vect<string> Descendents
 
 
-
   // Specialization for LiebLin gas:
 
   Vect<string> Descendents (const LiebLin_Bethe_State& ScanState, const LiebLin_Bethe_State& OriginState, int type_required)
-  //string ScanStateLabel, const Vect<Vect<int> >& ScanIx2, const Vect<Vect<int> >& OriginIx2,
-  //			    const Vect<int>& Ix2_min, const Vect<int>& Ix2_max, int type_required)
   {
     Vect<Vect<int> > ScanIx2here(1);
     ScanIx2here[0] = ScanState.Ix2;
@@ -1375,25 +1143,7 @@ namespace ABACUS {
     // If the state is an outer skeleton, we use a slightly modified OriginIx2 (in which the outermost
     // rapidities at level zero are put to the outer skeleton position convention) for computing the descendents.
 
-    //if (ScanState.Is_Outer_Skeleton()) { // since the state is an outer skeleton, we first modify the OriginIx2
-    //if (ScanState.N >= 2 && ScanState.Ix2[0] == LIEBLIN_Ix2_MIN + (ScanState.N % 2) + 1 && ScanState.Ix2[ScanState.N-1] == LIEBLIN_Ix2_MAX - (ScanState.N % 2) - 1) { // since the state is an outer skeleton, we first modify the OriginIx2
     if (Is_Outer_Skeleton(ScanState)) {
-      //cout << "\tDetected outer skeleton in Descendents: " << ScanState.label << endl;
-      /* First (failed) attempt
-      OriginIx2here[0][0] = LIEBLIN_Ix2_MIN + (ScanState.N % 2) + 3; // The OriginIx2here endpoints are set one step internally next to the Skeleton position conventions
-      OriginIx2here[0][OriginState.N - 1] = LIEBLIN_Ix2_MAX - (ScanState.N % 2) - 3;
-      // Obtain the descendends using this modified OriginIx2:
-      Vect<string> desc_mod = Descendents (ScanState.label, ScanIx2here, OriginIx2here, Ix2_min, Ix2_max, type_required);
-      // Translate the obtained descendends into ones corresponding to OriginIx2:
-      LiebLin_Bethe_State LabellingState = ScanState;
-      for (int idesc = 0; idesc < desc_mod.size(); ++idesc) {
-	LabellingState.Set_to_Label (desc_mod[idesc], OriginIx2here[0]); // Set quantum numbers according to OriginIx2here
-	LabellingState.Set_Label_from_Ix2 (OriginState.Ix2); // Now reset label on OriginIx2 from the correctly set Ix2
-	desc_mod[idesc] = LabellingState.label;
-	cout << "\tidesc " << idesc << "\tlabel " << LabellingState.label << "\tIx2 " << LabellingState.Ix2 << endl;
-      }
-      return(desc_mod);
-      */
       // Construct and descend a state with 2 less particles:
       if (ScanState.N < 3) ABACUSerror("Skeleton descendent logic at fixed iK not implemented for N < 3.");
       LiebLin_Bethe_State ReducedScanState (ScanState.c_int, ScanState.L, ScanState.N - 2);
@@ -1414,391 +1164,25 @@ namespace ABACUS {
 	LabellingState.Ix2[ScanState.N - 1] = LIEBLIN_Ix2_MAX - (ScanState.N % 2) - 1;
 	LabellingState.Set_Label_from_Ix2 (OriginState.Ix2); // Now reset label on OriginIx2 from the correctly set Ix2
 	desc_mod[idesc] = LabellingState.label;
-	//cout << "\tidesc " << idesc << "\tlabel " << LabellingState.label << "\tIx2 " << LabellingState.Ix2 << endl;
       }
-      //cout << "\tFound " << desc_mod.size() << "descendents, " << endl;
-      //cout << "\t" << desc_mod << endl;
       desc_found = desc_mod;
     }
 
     // If not outer skeleton, just return straight descendents
     else desc_found = Descendents (ScanState.label, ScanIx2here, OriginIx2here, Ix2_min, Ix2_max, type_required);
 
-    //return(Descendents (ScanState.label, ScanIx2here, OriginIx2here, Ix2_min, Ix2_max, type_required));
     return(desc_found);
   }
 
+
+
   // Specialization for Heisenberg:
 
   Vect<string> Descendents (const Heis_Bethe_State& ScanState, const Heis_Bethe_State& OriginState, int type_required)
   {
-    return(Descendents (ScanState.label, ScanState.Ix2, OriginState.Ix2, ScanState.base.Ix2_min, ScanState.base.Ix2_max, type_required));
+    return(Descendents (ScanState.label, ScanState.Ix2, OriginState.Ix2,
+			ScanState.base.Ix2_min, ScanState.base.Ix2_max, type_required));
   }
 
 
-
-
-
-
-  // ABACUS++T version:
-  /*
-  //Vect<string> Descendents (const LiebLin_Bethe_State& ScanState, const LiebLin_Bethe_State& OriginState)
-  Vect<string> Descendents (const LiebLin_Bethe_State& ScanState, const LiebLin_Bethe_State& OriginState, int type_required)
-  //Descendent_Data Descendents (const LiebLin_Bethe_State& ScanState, const LiebLin_Bethe_State& OriginState, int type_required)
-  {
-
-    // THESE EXPLANATIONS ARE DEPRECATED
-
-    // A given state is labeled by pairs (Ix2old, Ix2exc) of modified quantum numbers,
-    // say (Ix2old[0][0], Ix2exc[0][0]) ... (Ix2old[0][nexc[0] ], Ix2exc[0][nexc[0] ]) at level 0
-    // with the constraint that Ix2old and Ix2exc are strictly ordered.
-    // Higher levels are treated similarly.
-
-    // By definition, the descendents are constructed according to two possibilities:
-
-    // First, if type_required == 0, by taking the inner(right)most index particle (so at index (nexc[i]+1)/2)
-    // and moving it one step further in the direction it was moving, if possible.
-    // The expected data_value in this case is simply data_value_ref of the ancestor.
-
-    // Second, if type_required == 1, by adding another Ix2old Ix2exc pair,
-    // letting the hole take all possible values of Ix2old between Ix2old[i][(nexc[i]+1)/2 ] and Ix2old[i][(nexc[i]+1)/2 + 1]
-    // and putting the corresponding Ix2exc to the first available position on the left or right (if any),
-    // without annihilating another hole. In other words, we add a Ix2old Ix2exc pair in the (right)middle, respecting the ordering rule.
-    // The expected data_value in this case is the ancestor's data_value multiplied by the cost of adding a particle-hole (ph_cost).
-
-    // IMPORTANT NOTE: THE IMPLEMENTATION IS FOR NOW ONLY VALID FOR LIEB-LINIGER.
-    // The logic is however extensible to the case of multiple base levels.
-
-    // Number of descendents:
-    int ndesc_possible = (type_required == 0 ? 1 : ScanState.N * 2); // upper bound guess; should be refined.
-    Vect<string> desclabelfound (ndesc_possible);
-    Vect<int> desctypefound (ndesc_possible);
-    int ndesc_found = 0;
-
-    // First read the current state data:
-    State_Label_Data currentdata = Read_State_Label (ScanState.label, OriginState.Ix2);
-
-    if (type_required == 0) {
-      // We move the inner(right)most hole one more step towards the center of the block of Ix2 in which it sits.
-
-      // Is there already an excitation at level 0? If so, move inner(right)most particle one step further.
-      if (currentdata.nexc[0] > 0) {
-
-	//cout << "Here a" << endl;
-
-	// Produce a new state_label_data:
-	State_Label_Data descdata = Read_State_Label (ScanState.label, OriginState.Ix2);
-	// Identify the inner(right)most excitation:
-	int innerindex = currentdata.nexc[0]/2;
-
-	int newholepos = currentdata.Ix2old[0][innerindex];
-
-	// Determine the size of the block of OriginState.Ix2 in which this hole sits:
-	int nroccupiedtoleft = 0;
-	while (OriginState.Ix2.includes (currentdata.Ix2old[0][innerindex] -2*(nroccupiedtoleft + 1)))
-	//while (ScanState.Ix2.includes (currentdata.Ix2old[0][innerindex] -2*(nroccupiedtoleft + 1)))
-	//while (OriginState.Ix2.includes (currentdata.Ix2old[0][innerindex] -2*(nroccupiedtoleft + 1))
-	//&& ScanState.Ix2.includes (currentdata.Ix2old[0][innerindex] -2*(nroccupiedtoleft + 1)))
-	  ////&& !currentdata.Ix2old[0].includes(currentdata.Ix2old[0][innerindex] -2*(nroccupiedtoleft + 1)))
-	  nroccupiedtoleft++;
-	int nroccupiedtoright = 0;
-	while (OriginState.Ix2.includes (currentdata.Ix2old[0][innerindex] +2*(nroccupiedtoright + 1)))
-	//while (ScanState.Ix2.includes (currentdata.Ix2old[0][innerindex] +2*(nroccupiedtoright + 1)))
-	//while (OriginState.Ix2.includes (currentdata.Ix2old[0][innerindex] +2*(nroccupiedtoright + 1))
-	//     && ScanState.Ix2.includes (currentdata.Ix2old[0][innerindex] +2*(nroccupiedtoright + 1)))
-	  ////&& !currentdata.Ix2old[0].includes(currentdata.Ix2old[0][innerindex] +2*(nroccupiedtoright + 1)))
-	  nroccupiedtoright++;
-
-	//cout << "Here b" << endl;
-
-	//cout << "holeIx2 = " << currentdata.Ix2old[0][innerindex] << "\tnroccupiedtoleft = " << nroccupiedtoleft << "\tnroccupiedtoright = " << nroccupiedtoright << endl;
-	// Move the hole further in:
-	// Requirements for end configuration: hole must be further near middle, so
-	// if nroccupiedtoleft < nroccupiedtoright, move towards the right.
-	// if nroccupiedtoleft > nroccupiedtoright, move towards the left.
-	// If the two are then equal, choose hole moved from right to left.
-	// if moved towards the right: nroccupiedtoleft + 1 < nroccupiedtoright - 1
-	// if moved towards the left:  nroccupiedtoleft - 1 <= nroccupiedtoright + 1
-
-	if (//nroccupiedleft < nroccupiedtoright && // redundant from next condition
-	    nroccupiedtoleft - nroccupiedtoright + 2 < 0) newholepos += 2;
-	else if (// nroccupiedright > nroccupiedtoright && // redundant from next condition
-		 nroccupiedtoleft - nroccupiedtoright - 2 >= 0) newholepos -= 2;
-
-	if (newholepos != currentdata.Ix2old[0][innerindex] // we have successfully moved the hole
-	    && !currentdata.Ix2old[0].includes(newholepos) // new hole position is not already taken
-	    )
-	  { // we have found a descendent
-	    descdata.Ix2old[0][innerindex] = newholepos;
-	    desclabelfound[ndesc_found] = Return_State_Label (descdata, OriginState.Ix2);
-	    desctypefound[ndesc_found++] = 0;
-	  }
-
-	//cout << "Here c" << endl;
-
-      } // if (nexc > 0)
-    } // if (type_required == 0)
-
-
-    if (type_required == 1) {
-      // Is there already an excitation at level 0? If so, move inner(right)most particle one step further.
-      if (currentdata.nexc[0] > 0) {
-	// Produce a new state_label_data:
-	State_Label_Data descdata = Read_State_Label (ScanState.label, OriginState.Ix2);
-	// Identify the inner(right)most excitation:
-	int innerindex = currentdata.nexc[0]/2;
-
-	int newpartpos = currentdata.Ix2exc[0][innerindex];
-
-	// Determine the size of the vacancy block of OriginState.Ix2 in which this particle sits:
-	int nremptytoleft = 0;
-	while (!OriginState.Ix2.includes (currentdata.Ix2exc[0][innerindex] -2*(nremptytoleft+1)))
-	  nremptytoleft++;
-	int nremptytoright = 0;
-	while (!OriginState.Ix2.includes (currentdata.Ix2exc[0][innerindex] + 2*(nremptytoright+1)))
-	  nremptytoright++;
-	// BUG HERE: doesn't terminate if there is no particle to the left or right...
-
-
-	if (nremptytoleft - nremptytoright + 2 < 0) newpartpos += 2;
-	else if (nremptytoleft - nremptytoright + 2 >= 0) newpartpos -= 2;
-
-	if (newpartpos != currentdata.Ix2exc[0][innerindex] // we have successfully moved the hole
-	    && !currentdata.Ix2exc[0].includes(newpartpost) // new particle position is not already taken
-	    )
-	  { // we have found a descendent
-	    descdata.Ix2exc[0][innerindex] = newpartpos;
-	    desclabelfound[ndesc_found] = Return_State_Label (descdata, OriginState.Ix2);
-	    desctypefound[ndesc_found++] = 1;
-	  }
-
-	// FROM ++T, DEPRECATED IN ++G:
-	// Can this Ix2exc be moved one more step in the same direction? Try it out.
-	////descdata.Ix2exc[0][innerindex] += (currentdata.Ix2exc[0][innerindex] > currentdata.Ix2old[0][innerindex] ? 2 : -2);
-	//int newpartpos = currentdata.Ix2exc[0][innerindex] + (currentdata.Ix2exc[0][innerindex] > currentdata.Ix2old[0][innerindex] ? 2 : -2);
-	// Move until we get a consistent new configuration:
-	// First, this quantum number was not already occupied
-	// Second, this quantum number does not annihilate one of the holes:
-	//while (ScanState.Ix2.includes(newpartpos) || currentdata.Ix2old[0].includes(newpartpos))
-	// newpartpos += (currentdata.Ix2exc[0][innerindex] > currentdata.Ix2old[0][innerindex] ? 2 : -2);
-
-	// If this new particle position is to the right of the Ix2exc of one index lower, we've got a descendent:
-	//if (innerindex == 0 || newpartpos > currentdata.Ix2exc[0][innerindex - 1]
-	//  //&& abs(newpartpos) <= 13 // TEMPORARY LIMIT: INDUCE FINITE NR OF STATES
-	  //  && (innerindex == currentdata.nexc[0] - 1 || newpartpos < currentdata.Ix2exc[0][innerindex + 1])) {
-	  // We have a descendent,
-	  ////cout << "\tFound increased displacement at innerindex " << innerindex << endl;
-	  ////cout << descdata.Ix2exc[0][innerindex] << " to " << newpartpos << endl;
-	  //descdata.Ix2exc[0][innerindex] = newpartpos;
-	  //desclabelfound[ndesc_found] = Return_State_Label (descdata, OriginState.Ix2);
-	  //desctypefound[ndesc_found++] = 1;
-	//}
-
-      } //if (currentdata.nexc[0] > 0)
-    } // if (type_required == 1)
-
-
-    if (type_required == 2) {
-      // Now add a new p-h pair at the inner(right)most position, scanning the hole position immediately
-      // but putting the particle to the first available left and right positions (if any).
-
-      // type_required == 2 means that the hole must be either at the edge of an OriginIx2 domain, or next to an existing hole.
-
-      // Define the objects for the newstatedata:
-      Vect<int> type_new = currentdata.type;
-      Vect<int> M_new = currentdata.M;
-      Vect<int> nexc_new = currentdata.nexc;
-      nexc_new[0] += 1; // we drill one more particle-hole pair at level 0
-      int index_new = (currentdata.nexc[0] + 1)/2; // we put the new p-h pair at index index_new.
-
-      int ntypespresent = 1; // only one type for LiebLin
-      Vect<Vect<int> > Ix2old_new(ntypespresent);
-      Vect<Vect<int> > Ix2exc_new(ntypespresent);
-      for (int it = 0; it < ntypespresent; ++it) Ix2old_new[it] = Vect<int>(ABACUS::max(nexc_new[it],1));
-      for (int it = 0; it < ntypespresent; ++it) Ix2exc_new[it] = Vect<int>(ABACUS::max(nexc_new[it],1));
-
-      // Copy earlier data in:
-      for (int it = 0; it < ntypespresent; ++it) {
-	for (int i = 0; i < currentdata.nexc[it]; ++i) {
-	  Ix2old_new[it][i + (i >= index_new)] = currentdata.Ix2old[it][i];
-	  Ix2exc_new[it][i + (i >= index_new)] = currentdata.Ix2exc[it][i];
-	}
-	//cout << "current Ix2old: " << currentdata.Ix2old[0] << endl;
-	//cout << "current Ix2exc: " << currentdata.Ix2exc[0] << endl;
-	//cout << "allocated new Ix2old: " << Ix2old_new[0] << endl;
-	//cout << "allocated new Ix2exc: " << Ix2exc_new[0] << endl;
-      }
-
-      State_Label_Data descdatanewph (type_new, M_new, nexc_new, Ix2old_new, Ix2exc_new);
-
-      //cout << "\ttype: " << descdatanewph.type << endl;
-      //cout << "\tM: " << descdatanewph.M << endl;
-      //cout << "\tnexc: " << descdatanewph.nexc << endl;
-      //cout << "\tIx2old: " << descdatanewph.Ix2old[0] << endl;
-      //cout << "\tIx2new: " << descdatanewph.Ix2exc[0] << endl;
-
-      // Now find all the possible positions for the new hole:
-      // The new hole should be between Ix2old_new[0][innerindex - 1] and Ix2old_new[0][innerindex + 1]
-      // if those exist (i.e., modulo lack of constraint at the left/right boundaries).
-      int newpartpos;
-      for (int i = 0; i < ScanState.N; ++i) {
-
-	// Determine the size of the block of OriginState.Ix2 in which this hole sits:
-	int nroccupiedtoleft = 0;
-	while (OriginState.Ix2.includes (ScanState.Ix2[i] -2*(nroccupiedtoleft + 1)))
-	  nroccupiedtoleft++;
-	int nroccupiedtoright = 0;
-	while (OriginState.Ix2.includes (ScanState.Ix2[i] +2*(nroccupiedtoright + 1)))
-	  nroccupiedtoright++;
-	// This will be used in the next conditions to check whether the new hole would sit in a left or right block.
-
-	if ((currentdata.nexc[0] == 0 // no earlier p-h in state so no left boundary for new hole; index_new == 0
-	     || ScanState.Ix2[i] > currentdata.Ix2old[0][index_new - 1])
-	    && (currentdata.nexc[0] <= 1 // at most one p-h in state so no right boundary for new hole; index_new <= 1
-		|| ScanState.Ix2[i] < currentdata.Ix2old[0][index_new])
-	    //&& (currentdata.nexc[0] == 0 || !currentdata.Ix2exc[0].includes(ScanState.Ix2[i]))
-	    // new hole is not one of the already existing p excitations // FULFILLED BY ABOVE CONDITIONS IF EXC ARE ORDERED
-	    //&& (!ScanState.Ix2.includes(ScanState.Ix2[i] - 2) || !ScanState.Ix2.includes(ScanState.Ix2[i] + 2))
-	    && (
-		// New hole must be created on the boundary of an OriginState Ix2 domain,
-		// or next to a preexisting hole in the domain.
-		(!OriginState.Ix2.includes(ScanState.Ix2[i] - 2))
-		// new hole is on the left boundary of an OriginState Ix2 domain.
-		|| !OriginState.Ix2.includes(ScanState.Ix2[i] + 2)
-		// new hole is on the right boundary of an OriginState Ix2 domain.
-		//|| (currentdata.nexc[0] > 0 && ScanState.Ix2[i] == currentdata.Ix2old[0][index_new - 1] + 2)
-		|| type_required == 2 && (currentdata.nexc[0] > 0 && nroccupiedtoleft < nroccupiedtoright
-		    && ScanState.Ix2[i] == currentdata.Ix2old[0][index_new - 1] + 2)
-		// new hole is in a left domain and immediately to the right of first hole to its left
-		//|| (currentdata.nexc[0] > 1 && ScanState.Ix2[i] == currentdata.Ix2old[0][index_new] - 2)
-		|| type_required == 2 && (currentdata.nexc[0] > 1 && nroccupiedtoleft >= nroccupiedtoright
-		    && ScanState.Ix2[i] == currentdata.Ix2old[0][index_new] - 2)
-		// new hole is in a right domain and immediately to the left of first hole to its right
-		)
-	    ) {
-
-	  bool acceptable_hole = true;
-
-	  if (currentdata.nexc[0] > 0 && currentdata.Ix2exc[0].includes(ScanState.Ix2[i])) acceptable_hole = false;
-
-	  // Determine the size of the block of Ix2 in which this hole sits:
-	  int nroccupiedtoleft = 0;
-	  while (OriginState.Ix2.includes (ScanState.Ix2[i] -2*(nroccupiedtoleft + 1)))
-	    nroccupiedtoleft++;
-	  int nroccupiedtoright = 0;
-	  while (OriginState.Ix2.includes (ScanState.Ix2[i] +2*(nroccupiedtoright + 1)))
-	    nroccupiedtoright++;
-
-	  // The hole is unacceptable if it breaks the `towards the block center' rule:
-	  if (nroccupiedtoleft < nroccupiedtoright // new hole would be in left part of block
-	      && OriginState.Ix2.includes(ScanState.Ix2[i] - 2) // and is not at the boundary
-	      && currentdata.nexc[0] > 0 && ScanState.Ix2[i] != currentdata.Ix2old[0][index_new - 1] + 2)
-	    // and it's not immediately to the left of a preexisting hole
-	    acceptable_hole = false;
-	  if (nroccupiedtoleft >= nroccupiedtoright // new hole could be in right part of block
-	      && OriginState.Ix2.includes(ScanState.Ix2[i] + 2) // and is not at the boundary
-	      && currentdata.nexc[0] > 2 && ScanState.Ix2[i] != currentdata.Ix2old[0][index_new] - 2)
-	    // and it's not immediately to the right of a preexisting hole
-	    acceptable_hole = false;
-
-	  if (acceptable_hole) {
-	    //cout << "Found a possible hole position at index " << i << " and Ix2 " << ScanState.Ix2[i] << endl;
-
-	    // ScanState.Ix2[i] is an allowable new hole position.
-
-
-	    // We now look for all possible particle positions,
-	    // the allowable ones being either at the edge of a block, or next to an existing particle:
-
-
-	    // Find the first available particle position to the left:
-	    newpartpos = ScanState.Ix2[i] - 2;
-	    // This must be lower than the Ix2exc of one index higher (if any):
-	    if (currentdata.nexc[0] >= 2) newpartpos = ABACUS::min(newpartpos, currentdata.Ix2exc[0][index_new] - 2);
-	    // The new particle position must not already be occupied in ScanState,
-	    // and must not be at one of the already specified holes
-	    while (ScanState.Ix2.includes(newpartpos)
-		   || (currentdata.nexc[0] > 0 && currentdata.Ix2old[0].includes(newpartpos))) newpartpos -= 2;
-	    // If this new particle position is to the right of the Ix2exc of one index lower (if any), we've got a descendent:
-	    if (currentdata.nexc[0] == 0 || newpartpos > currentdata.Ix2exc[0][index_new - 1]) {
-	      //cout << "\tFound a particle position (moving L) at " << newpartpos << " with index " << index_new << endl;
-	      //if (abs(newpartpos) <= 13) { // TEMPORARY LIMIT: INDUCE FINITE NR OF STATES
-	      descdatanewph.Ix2old[0][index_new] = ScanState.Ix2[i];
-	      descdatanewph.Ix2exc[0][index_new] = newpartpos;
-	      //cout << "ScanState.Ix2[i] = " << ScanState.Ix2[i] << "\tnewpartpos = " << newpartpos << endl;
-	      //cout << "\tIx2old: " << descdatanewph.Ix2old[0] << endl;
-	      //cout << "\tIx2new: " << descdatanewph.Ix2exc[0] << endl;
-	      desclabelfound[ndesc_found] = Return_State_Label (descdatanewph, OriginState.Ix2);
-	      desctypefound[ndesc_found++] = type_required;
-	      //cout << "\tLabel found = " << desclabelfound[ndesc_found - 1] << endl;
-	      //} // TEMP
-	    }
-
-
-	    // Now find the first particle position to the right:
-	    newpartpos = ScanState.Ix2[i] + 2;
-	    // This must be higher than the Ix2exc of one index lower (if any):
-	    if (index_new > 0) newpartpos = ABACUS::max(newpartpos, currentdata.Ix2exc[0][index_new - 1] + 2);
-	    // The new particle position must not already be occupied in ScanState,
-	    // and must not be at one of the already specified holes
-	    while (ScanState.Ix2.includes(newpartpos)
-		   || (currentdata.nexc[0] > 0 && currentdata.Ix2old[0].includes(newpartpos))) newpartpos += 2;
-	    // If this new particle position is to the left of the Ix2exc of one index higher (if any), we've got a descendent:
-	    if (currentdata.nexc[0] <= 1 || newpartpos < currentdata.Ix2exc[0][index_new]) {
-	      //// If this new particle position is to the left of the Ix2exc of one index higher (if any), got descendent:
-	      //if (currentdata.nexc[0] <= 1 || newpartpos < currentdata.Ix2exc[0][index_new]) {
-	      //cout << "\tFound a particle position (moving R) at " << newpartpos << " with index " << index_new << endl;
-
-	      //if (abs(newpartpos) <= 13) { // TEMPORARY LIMIT: INDUCE FINITE NR OF STATES
-	      descdatanewph.Ix2old[0][index_new] = ScanState.Ix2[i];
-	      descdatanewph.Ix2exc[0][index_new] = newpartpos;
-	      //cout << "ScanState.Ix2[i] = " << ScanState.Ix2[i] << "\tnewpartpos = " << newpartpos << endl;
-	      //cout << "\tIx2old: " << descdatanewph.Ix2old[0] << endl;
-	      //cout << "\tIx2new: " << descdatanewph.Ix2exc[0] << endl;
-	      desclabelfound[ndesc_found] = Return_State_Label (descdatanewph, OriginState.Ix2);
-	      desctypefound[ndesc_found++] = type_required;
-	      //cout << "\tLabel found = " << desclabelfound[ndesc_found - 1] << endl;
-	      //} // TEMP
-	    }
-	  } // if (acceptable_hole
-	}
-      } // for i
-    } // if (type_required == 2 || type_required == 3)
-
-
-    Vect<string> desclabelfoundresized (ndesc_found);
-    //Vect_DP desctypefoundresized (ndesc_found);
-    for (int i = 0; i < ndesc_found; ++i) {
-      desclabelfoundresized[i] = desclabelfound[i];
-      //desctypefoundresized[i] = descvalue[i];
-    }
-
-    //for (int i = 0; i < ndesc_found; ++i) if (desclabelfound[i] == "16_2_13@17:15@19") cout << "16_2_13@17:15@19 descended from " << ScanState.label << endl;
-
-    //if (ScanState.label == "16_0_") cout << "State 16_0_: descendents " << desclabelfound << endl;
-    //if (ScanState.label == "16_1_13@17") cout << "State 16_1_13@17: descendents " << desclabelfound << endl;
-
-    //cout << "Found " << ndesc_found << " descendents, " << desclabelfoundresized << endl;
-
-    //Descendent_Data descdata();
-
-    //descdata.label = desclabelfoundresized;
-    //descdata.type = desctypefoundresized;
-
-    return(desclabelfoundresized);
-    //return(descdata);
-  }
-
-
-  //Vect<string> Descendents (const Heis_Bethe_State& ScanState, const Heis_Bethe_State& OriginState)
-  Vect<string> Descendents (const Heis_Bethe_State& ScanState, const Heis_Bethe_State& OriginState, int type_required)
-  //Descendent_Data Descendents (const Heis_Bethe_State& ScanState, const Heis_Bethe_State& OriginState, int type_required);
-  {
-    // NOT IMPLEMENTED YET
-    ABACUSerror("Descendents for Heis not implemented yet.");
-    return(Vect<string>());
-  }
-
-  */
-
 } // namespace ABACUS
diff --git a/src/SCAN/General_Scan.cc b/src/SCAN/General_Scan.cc
index bd9a6d1..88d878a 100644
--- a/src/SCAN/General_Scan.cc
+++ b/src/SCAN/General_Scan.cc
@@ -55,7 +55,8 @@ namespace ABACUS {
   // There is then no need of scanning over types 0 - 8.
   // By convention, types 9, 10 and 11 can call types 9 - 14; types 12-14 can only call types 12-14.
 
-  bool Expect_ph_Recombination_iK_Up (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2)
+  bool Expect_ph_Recombination_iK_Up (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2,
+				      const Vect<Vect<int> >& BaseScanIx2)
   {
     // This function returns true if descending further can lead to a particle-hole recombination.
     // The criteria which are used are:
@@ -82,12 +83,16 @@ namespace ABACUS {
     } while (!excfound);
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
 
-    if (excfound && !BaseScanIx2[exclevel].includes(ScanIx2[exclevel][excindex])) { // there exists an already dispersing excitation which isn't in Origin
+    if (excfound && !BaseScanIx2[exclevel].includes(ScanIx2[exclevel][excindex])) {
+      // there exists an already dispersing excitation which isn't in Origin
       // Is there a possible recombination?
-      if (excindex < ScanIx2[exclevel].size() - 1) { // a particle to the right of excitation has already move right, so there is a hole
-	// check that there exists an occupied Ix2 in Origin sitting between the excitation and the next Ix2 to its right in ScanIx2
+      if (excindex < ScanIx2[exclevel].size() - 1) {
+	// a particle to the right of excitation has already move right, so there is a hole
+	// check that there exists an occupied Ix2 in Origin sitting between the excitation
+	// and the next Ix2 to its right in ScanIx2
 	for (int alpha = BaseScanIx2[exclevel].size() - 1; alpha >= 0; --alpha)
-	  if (BaseScanIx2[exclevel][alpha] > ScanIx2[exclevel][excindex] && BaseScanIx2[exclevel][alpha] < ScanIx2[exclevel][excindex + 1]) {
+	  if (BaseScanIx2[exclevel][alpha] > ScanIx2[exclevel][excindex]
+	      && BaseScanIx2[exclevel][alpha] < ScanIx2[exclevel][excindex + 1]) {
 	    return(true);
 	  }
       }
@@ -114,7 +119,8 @@ namespace ABACUS {
 
 
 
-  bool Expect_ph_Recombination_iK_Down (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2, const Vect<Vect<int> >& BaseScanIx2)
+  bool Expect_ph_Recombination_iK_Down (string ScanIx2_label, const Vect<Vect<int> >& OriginIx2,
+					const Vect<Vect<int> >& BaseScanIx2)
   {
     // This function returns true if descending further can lead to a particle-hole recombination.
     // The criteria which are used are:
@@ -128,14 +134,12 @@ namespace ABACUS {
     int excindex = 0;
     bool excfound = false;
 
-    //cout << "Looking for exclevel and excindex for " << endl << "\tBaseIx2 = " << BaseScanIx2 << endl << "\tScanIx2 = " << ScanIx2 << endl;
     do {
       exclevel++;
       if (exclevel == ScanIx2.size()) { // there isn't a single left-moving quantum number in ScanIx2
 	break;
       }
       for (int alpha = ScanIx2[exclevel].size() - 1; alpha >= 0; --alpha) {
-	//cout << exclevel << "\t" << alpha << "\t" << ScanIx2[exclevel][alpha] << "\t" << BaseScanIx2[exclevel][alpha] << "\t" << (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) << endl;
 	if (ScanIx2[exclevel][alpha] < BaseScanIx2[exclevel][alpha]) {
 	  excindex = alpha;
 	  excfound = true;
@@ -146,12 +150,15 @@ namespace ABACUS {
     // If we haven't found an excitation, then exclevel == ScanIx2.size() and excindex = 0;
     if (!excfound) excindex = ScanIx2[exclevel].size() - 1;
 
-    if (excfound && !BaseScanIx2[exclevel].includes(ScanIx2[exclevel][excindex])) { // there exists an already dispersing excitation which isn't in Origin
+    if (excfound && !BaseScanIx2[exclevel].includes(ScanIx2[exclevel][excindex])) {
+      // there exists an already dispersing excitation which isn't in Origin
       // Is there a possible recombination?
       if (excindex > 0) { // a particle to the left of excitation has already moved left, so there is a hole
-	// check that there exists an occupied Ix2 in Origin sitting between the excitation and the next Ix2 to its left in ScanIx2
+	// check that there exists an occupied Ix2 in Origin sitting between the excitation
+	// and the next Ix2 to its left in ScanIx2
 	for (int alpha = 0; alpha < BaseScanIx2[exclevel].size(); ++alpha)
-	  if (BaseScanIx2[exclevel][alpha] > ScanIx2[exclevel][excindex - 1] && BaseScanIx2[exclevel][alpha] < ScanIx2[exclevel][excindex]) {
+	  if (BaseScanIx2[exclevel][alpha] > ScanIx2[exclevel][excindex - 1]
+	      && BaseScanIx2[exclevel][alpha] < ScanIx2[exclevel][excindex]) {
 	    return(true);
 	  }
       }
@@ -178,265 +185,12 @@ namespace ABACUS {
 
 
 
-  /*
-  template<class Tstate>
-  void Descend_and_Compute_for_Fixed_Base (char whichDSF, Tstate& AveragingState, Tstate& BaseScanState, Tstate& ScanState,
-					   int type_required, int iKmin, int iKmax, int iKmod,
-					   //Scan_Thread_List& paused_thread_list,
-					   //Scan_Thread_Set& paused_thread_set,
-					   Scan_Thread_Data& paused_thread_data,
-					   //thresholdremoved DP& running_scan_threshold, //DP ref_abs_data_value,
-					   DP& ph_cost, int Max_Secs, DP sumrule_factor, DP Chem_Pot, Scan_Info& scan_info,
-					   fstream& RAW_outfile, fstream& INADM_outfile, int& ninadm,
-					   fstream& CONV0_outfile, int& nconv0, fstream& STAT_outfile)
-  {
-
-    //cout << "Calling descent with type_required " << type_required << " on state " << ScanState.label << "\t" << Return_Ix2_from_Label(ScanState.label, AveragingState.Ix2) << endl;
-    //cout << "Calling descent with type_required " << type_required << " on state " << ScanState.label << endl;
-    ScanState.Compute_Momentum();
-    Vect<string> desc_label;
-
-    // ++G_7 logic
-    bool disperse_only_current_exc_up = false;
-    if (type_required == 14 || type_required == 8 || type_required == 7 || type_required == 6) disperse_only_current_exc_up = true;
-    bool preserve_nexc_up = false;
-    if (type_required == 13 || type_required == 5 || type_required == 4 || type_required == 3) preserve_nexc_up = true;
-    bool disperse_only_current_exc_down = false;
-    if (type_required == 11 || type_required == 8 || type_required == 5 || type_required == 2) disperse_only_current_exc_down = true;
-    bool preserve_nexc_down = false;
-    if (type_required == 10 || type_required == 7 || type_required == 4 || type_required == 1) preserve_nexc_down = true;
-
-    if (whichDSF == 'B') { // symmetric state scanning
-      if (type_required >= 9 && type_required <= 11)
-	desc_label = Descendent_States_with_iK_Stepped_Down_rightIx2only (ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
-      else if (type_required >= 12 && type_required <= 14)
-	desc_label = Descendent_States_with_iK_Stepped_Up_rightIx2only (ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
-    }
-    else {
-      if (type_required >= 0 && type_required <= 8) {
-	desc_label = Descendent_States_with_iK_Preserved(ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up, disperse_only_current_exc_down, preserve_nexc_down);
-      }
-      else if (type_required >= 9 && type_required <= 11)
-	desc_label = Descendent_States_with_iK_Stepped_Down (ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
-      else if (type_required >= 12 && type_required <= 14)
-	desc_label = Descendent_States_with_iK_Stepped_Up (ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
-    }
-    //cout << "Found " << desc_label.size() << " descendents." << endl;
-    //for (int is = 0; is < desc_label.size(); ++is) cout << "is " << is << "\tdesc: " << desc_label[is] << "\t" << Return_Ix2_from_Label(desc_label[is], AveragingState.Ix2) << endl;
-    //char a;
-    //cin >> a;
-
-    //cout << "OK for descend on " << ScanState.label << " with type_required = " << type_required << endl;
-    //cout << desc_label << endl;
-
-    //Vect<string> desc_label = desc_data.label;
-    //Vect<int> desc_type = desc_data.type;
-
-    //bool disp_OK = false;
-
-    //if (ScanState.label == "7|2:1_0|1_|0@2") {
-    //if (ScanState.label == "64_1_63@319") {
-    //if (ScanState.label == "32_1_-29@-33") {
-
-    if (ScanState.label == LABEL_TO_CHECK) {
-      cout << "Called Descend on state " << ScanState << endl;
-      cout << "For type_required == " << type_required << ", found " << desc_label.size() << " descendents, ";
-      for (int i = 0; i < desc_label.size(); ++i) {
-	//cout << desc_label[i] << "\t";    cout << endl;
-	ScanState.Set_to_Label (desc_label[i], BaseScanState.Ix2);
-	ScanState.Compute_All(true);
-	cout << ScanState << endl;
-      }
-      cout << "Do you want to follow one of these descendents? (y/n)" << endl;
-      char a; cin >> a;
-      if (a == 'y') {
-	cout << "Which label do you want to follow?" << endl;
-	cin >> LABEL_TO_CHECK;
-      }
-    }
-
-
-      string label_here = ScanState.label;
-      //int ScanState_iK = ScanState.iK;
-
-      for (int idesc = 0; idesc < desc_label.size(); ++idesc) {
-
-	//cout << "\tDealing with descendent " << idesc << " out of " << desc_label.size() << " with label " << desc_label[idesc] << endl;
-	//cout << "\tfrom state with label " << label_here << " and of type_required " << type_required << endl;
-
-	clock_t start_time_here = clock();
-
-	//if (desc_label[idesc] == "64_2_0yvv7") {
-	if (false) {
-	  cout << "Found " << desc_label[idesc] << " as descendent of type " << type_required << " of " << label_here << endl;
-	  ScanState.Set_to_Label (label_here, BaseScanState.Ix2);
-	  cout << ScanState.Ix2 << endl;
-	  //cout << "Found " << desc_label.size() << " descendents, " << endl;
-	  //for (int i = 0; i < desc_label.size(); ++i) cout << desc_label[i] << "\t";    cout << endl;
-	  ScanState.Set_to_Label (desc_label[idesc], BaseScanState.Ix2);
-	  cout << ScanState.Ix2 << endl;
-	  //ScanState.Compute_All(true);
-	  //cout << "Resulting Ix2: " << ScanState.Ix2 << endl;
-	  //cout << ScanState << endl;
-	  //cout << "Admissible: " << ScanState.Check_Admissibility(whichDSF) << endl;
-	  //char a; cin >> a;
-	}
-
-
-	ScanState.Set_to_Label (desc_label[idesc], BaseScanState.Ix2);
-
-	bool admissible = ScanState.Check_Admissibility(whichDSF);
-
-	DP data_value = 0.0;
-
-	scan_info.Ndata++;
-
-	ScanState.conv = false;
-	ScanState.Compute_Momentum(); // since momentum is used as forced descent criterion
-
-
-	if (admissible) {
-
-	  ScanState.Compute_All (idesc == 0);
-	  //ScanState.Compute_All (true);
-
-	  //scan_info.Ndata++;
-
-	  if (ScanState.conv) {
-	    scan_info.Ndata_conv++;
-
-	    // Put momentum in fundamental window, if possible:
-	    int iKexc = ScanState.iK - AveragingState.iK;
-	    while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
-	    while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
-
-	    data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState, Chem_Pot, RAW_outfile);
-	    if (iKexc >= iKmin && iKexc <= iKmax) scan_info.sumrule_obtained += data_value*sumrule_factor;
-	    //cout << "data_value found = " << data_value * sumrule_factor << endl;
-
-	    // Uncomment line below if .stat file is desired:
-	    //STAT_outfile << setw(20) << label_here << "\t" << setw(5) << type_required << "\t" << setw(16) << std::scientific << running_scan_threshold << "\t" << setw(20) << ScanState.label << "\t" << setw(16) << data_value << "\t" << setw(16) << std::fixed << setprecision(8) << data_value/running_scan_threshold << endl;
-
-	  } // if (ScanState.conv)
-	  else {
-	    if (nconv0++ < 1000)
-	      CONV0_outfile << setw(25) << ScanState.label << setw(25) << ScanState.diffsq << setw(5) << ScanState.Check_Rapidities()
-			    << setw(25) << ScanState.String_delta() << endl;
-	    scan_info.Ndata_conv0++;
-	    //cout << "State did not converge." << endl;
-	  }
-	} // if (admissible)
-
-	else {
-	  if (ninadm++ < 1000000) INADM_outfile << ScanState.label << endl;
-	  scan_info.Ninadm++;
-	  //cout << "State was inadmissible." << endl;
-	  // Set data_value to enable continued scanning later on:
-	  //thresholdremoved data_value = 0.1* running_scan_threshold;
-	}
-
-	clock_t stop_time_here = clock();
-
-	scan_info.CPU_ticks += stop_time_here - start_time_here;
-
-	Tstate state_to_descend;  state_to_descend = ScanState; // for checking
-
-	ScanState.Compute_Momentum();
-	// Put momentum in fundamental window, if possible:
-	int iKexc = ScanState.iK - AveragingState.iK;
-	while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
-	while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
-
-	// ++G_7 logic
-	// Momentum-preserving are only descended to momentum-preserving.
-	// Momentum-increasing are only descended to momentum-preserving and momentum-increasing.
-	// Momentum-decreasing are only descended to momentum-preserving and momentum-decreasing.
-	Vect<bool> allowed(false, 15);
-	if (whichDSF == 'B') {
-	  // We scan over symmetric states. Only types 14 down to 9 are allowed.
-	  if (type_required >= 9 && type_required <= 11) { // iK stepped down on rightIx2; step further up or down
-	  allowed[9] = true; allowed[10] = true; allowed[11] = true;
-	  allowed[12] = true; allowed[13] = true; allowed[14] = true;
-	  }
-	  else if (type_required >= 12 && type_required <= 14) { // iK stepped up on rightIx2; only step further up
-	    allowed[12] = true; allowed[13] = true; allowed[14] = true;
-	  }
-	}
-	else {
-	  if (type_required >= 0 && type_required <= 8) { // momentum-preserving
-	    allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-	    allowed[9] = false;
-	    allowed[12] = false;
-	  }
-	  if (type_required >= 9 && type_required <= 11) { // momentum-decreasing
-	    allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-	    allowed[9] = (iKexc > iKmin);
-	    allowed[12] = false;
-	  }
-	  if (type_required >= 12 && type_required <= 14) { // momentum-increasing
-	    allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-	    allowed[9] = false;
-	    allowed[12] = (iKexc < iKmax);
-	  }
-	  // The others are just copies of the ones above:
-	  allowed[1] = allowed[0]; allowed[2] = allowed[0]; allowed[3] = allowed[0]; allowed[4] = allowed[0]; allowed[5] = allowed[0]; allowed[6] = allowed[0]; allowed[7] = allowed[0]; allowed[8] = allowed[0];
-	  allowed[10] = allowed[9]; allowed[11] = allowed[9];
-	  allowed[13] = allowed[12]; allowed[14] = allowed[12];
-	}
-
-
-	for (int type_required_here = 0; type_required_here < 15; ++type_required_here) {
-
-	  if (!allowed[type_required_here]) continue;
-
-	  // Reset ScanState to what it was, if change on first pass
-	  if (type_required_here > 0) ScanState = state_to_descend;
-
-	  // We determine if we carry on scanning based on the data_value obtained, or forcing conditions:
-	  // Forcing conditions:
-	  //if (!admissible || Force_Descent(whichDSF, ScanState, AveragingState, type_required_here, iKmod, Chem_Pot))
-	  ////data_value = 1.01 * running_scan_threshold/ph_cost; // force for all types of desc
-	  //data_value = 1.01 * running_scan_threshold; // only force for no new ph pairs
-	  ////data_value = 1.0; // force for all types of desc
-	  // If we're sitting out of the iKmin & iKmax window, stop:
-	  //if (iKmin != iKmax && (ScanState.iK - AveragingState.iK > iKmax || ScanState.iK - AveragingState.iK < iKmin)) data_value = 0.0;
-
-	  //if (abs(data_value) * (type_required_here != 2 ? 1.0 : ph_cost) > running_scan_threshold
-	  //if ((abs(data_value) > running_scan_threshold
-	  //|| Nr_ph_Recombinations_Possible (ScanState.label, BaseScanState, type_required_here) > 0)
-
-	  //DP expected_abs_data_value = abs(data_value)/pow(ph_cost, DP(Nr_ph_Recombinations_Possible (ScanState.label, BaseScanState, type_required_here)));
-	  DP expected_abs_data_value = abs(data_value);
-
-	  //++G_7 logic
-	  if ((type_required_here == 14 || type_required_here == 8 || type_required_here == 7 || type_required_here == 6)
-	      && Expect_ph_Recombination_iK_Up (ScanState.label, BaseScanState)) expected_abs_data_value /= ph_cost;
-	  if (type_required_here == 12 || type_required_here == 2 || type_required_here == 1 || type_required_here == 0)
-	    expected_abs_data_value *= ph_cost;
-	  if ((type_required_here == 11 || type_required_here == 8 || type_required_here == 5 || type_required_here == 2)
-	      && Expect_ph_Recombination_iK_Down (ScanState.label, BaseScanState)) expected_abs_data_value /= ph_cost;
-	  if (type_required_here == 9 || type_required_here == 6 || type_required_here == 3 || type_required_here == 0)
-	    expected_abs_data_value *= ph_cost;
-
-	  //cout << "\tIncluding thread " << expected_abs_data_value << "\t" << ScanState.label << "\t" << type_required_here << endl;
-	  //paused_thread_set.Include_Thread (expected_abs_data_value, ScanState.label, type_required_here);
-	  paused_thread_data.Include_Thread (expected_abs_data_value, ScanState.label, type_required_here);
-	  //cout << "\tDone including thread." << endl;
-	} // for type_required_here
-
-	//cout << "\tFinished with descendent " << idesc << " out of " << desc_label.size() << " with label " << desc_label[idesc] << endl;
-	//cout << "\tfrom state with label " << label_here << endl;
-      } // for idesc
-
-      //cout << "Finished Descend on state " << endl << ScanState.label << endl;
-
-    return;
-  }
-  */
 
   template<class Tstate>
-  Scan_Info General_Scan (char whichDSF, int iKmin, int iKmax, int iKmod, DP kBT, Tstate& AveragingState, Tstate& SeedScanState,
-			  string defaultScanStatename, int Max_Secs, DP target_sumrule, bool refine, int paralevel, Vect<int> rank, Vect<int> nr_processors)
+  Scan_Info General_Scan (char whichDSF, int iKmin, int iKmax, int iKmod, DP kBT,
+			  Tstate& AveragingState, Tstate& SeedScanState, string defaultScanStatename,
+			  int Max_Secs, DP target_sumrule, bool refine,
+			  int paralevel, Vect<int> rank, Vect<int> nr_processors)
   {
     // Performs the scan over excited states, writing data to file.
 
@@ -449,16 +203,16 @@ namespace ABACUS {
     // In fact, the parallelization can be done in incremental levels.
     // If paralevel == 0, the run is serial.
     // If paralevel == n, the run is parallelized in a tree with n levels of branching.
-    // A paralevel == 1 branching's files have a suffix of the form "_3_8", meaning that this is the rank 3 out of 8 processors.
-    // A paralevel == 2 branching's files have a suffix of the form "_3_8_2_8", meaning that this is the rank 2 out of 8 subscan of the _3_8 scan.
+    // A paralevel == 1 branching's files have a suffix of the form "_3_8", meaning that this
+    //  is the rank 3 out of 8 processors.
+    // A paralevel == 2 branching's files have a suffix of the form "_3_8_2_8", meaning that this
+    //  is the rank 2 out of 8 subscan of the _3_8 scan.
 
-    //clock_t start_time = clock();
-    //clock_t current_time = clock();
 
-    //bool in_parallel = (nr_processors > 1);
     bool in_parallel = (paralevel > 0);
     if (in_parallel && (rank.size() != paralevel || nr_processors.size() != paralevel)) {
-      cout << "paralevel = " << paralevel << "\trank.size() = " << rank.size() << "\tnr_processors.size() = " << nr_processors.size() << endl;
+      cout << "paralevel = " << paralevel << "\trank.size() = " << rank.size()
+	   << "\tnr_processors.size() = " << nr_processors.size() << endl;
       cout << "rank = " << rank << endl;
       cout << "nr_processors = " << nr_processors << endl;
       ABACUSerror("Inconsistent paralevel, rank or nr_processors in General_Scan.");
@@ -466,13 +220,12 @@ namespace ABACUS {
 
     if (in_parallel && !refine) ABACUSerror("Must refine when using parallel ABACUS");
 
-    DP ph_cost = Particle_Hole_Excitation_Cost (whichDSF, AveragingState); // expected cost on data_value of adding a particle-hole excitation.
+    // expected cost on data_value of adding a particle-hole excitation.
+    DP ph_cost = Particle_Hole_Excitation_Cost (whichDSF, AveragingState);
 
     int Max_Secs_used = int(0.9 * Max_Secs); // we don't start any new ithread loop beyond this point
     int Max_Secs_alert = int(0.95 * Max_Secs); // we break any ongoing ithread loop beyond this point
 
-    //clock_t start_time_local = clock();
-
     stringstream filenameprefix;
     Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, kBT, AveragingState, SeedScanState, defaultScanStatename);
 
@@ -481,8 +234,10 @@ namespace ABACUS {
     string prefix = filenameprefix.str();
 
     stringstream filenameprefix_prevparalevel;  // without the rank and nr_processors of the highest paralevel
-    Data_File_Name (filenameprefix_prevparalevel, whichDSF, iKmin, iKmax, kBT, AveragingState, SeedScanState, defaultScanStatename);
-    if (in_parallel) for (int r = 0; r < paralevel - 1; ++r) filenameprefix << "_" << rank[r] << "_" << nr_processors[r];
+    Data_File_Name (filenameprefix_prevparalevel, whichDSF, iKmin, iKmax, kBT,
+		    AveragingState, SeedScanState, defaultScanStatename);
+    if (in_parallel) for (int r = 0; r < paralevel - 1; ++r)
+		       filenameprefix << "_" << rank[r] << "_" << nr_processors[r];
 
     string prefix_prevparalevel = filenameprefix_prevparalevel.str();
 
@@ -494,9 +249,7 @@ namespace ABACUS {
     stringstream THR_stringstream;    string THR_string;
     stringstream THRDIR_stringstream;    string THRDIR_string;
     stringstream SRC_stringstream;    string SRC_string;
-    //stringstream FSR_stringstream;    string FSR_string;
     stringstream SUM_stringstream;    string SUM_string;
-    //stringstream SUM_prevparalevel_stringstream;    string SUM_prevparalevel_string;
 
     RAW_stringstream << prefix << ".raw";
     INADM_stringstream << prefix << ".inadm";
@@ -506,9 +259,7 @@ namespace ABACUS {
     THR_stringstream << prefix << ".thr";
     THRDIR_stringstream << prefix << "_thrdir";
     SRC_stringstream << prefix << ".src";
-    //FSR_stringstream << prefix << ".fsr";
     SUM_stringstream << prefix << ".sum";
-    //SUM_prevparalevel_stringstream << prefix_prevparalevel << ".sum";
 
     RAW_string = RAW_stringstream.str();    const char* RAW_Cstr = RAW_string.c_str();
     INADM_string = INADM_stringstream.str();    const char* INADM_Cstr = INADM_string.c_str();
@@ -517,9 +268,7 @@ namespace ABACUS {
     LOG_string = LOG_stringstream.str();    const char* LOG_Cstr = LOG_string.c_str();
     THR_string = THR_stringstream.str();    const char* THR_Cstr = THR_string.c_str();
     SRC_string = SRC_stringstream.str();    const char* SRC_Cstr = SRC_string.c_str();
-    //FSR_string = FSR_stringstream.str();    const char* FSR_Cstr = FSR_string.c_str();
     SUM_string = SUM_stringstream.str();    const char* SUM_Cstr = SUM_string.c_str();
-    //SUM_prevparalevel_string = SUM_prevparalevel_stringstream.str();    const char* SUM_prevparalevel_Cstr = SUM_prevparalevel_string.c_str();
 
     THRDIR_string = THRDIR_stringstream.str();
 
@@ -561,18 +310,14 @@ namespace ABACUS {
       LOG_outfile.open(LOG_Cstr, fstream::out | fstream::trunc);
       if (LOG_outfile.fail()) ABACUSerror("Could not open LOG_outfile... ");
       LOG_outfile.precision(16);
-      //LOG_outfile << endl;
     }
 
     Scan_Info scan_info;
 
-    //Scan_Thread_Set paused_thread_set;
-    //Scan_Thread_Set paused_thread_set_this_run;
     if (!refine) mkdir(THRDIR_string.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
     Scan_Thread_Data paused_thread_data (THRDIR_string, refine);
 
     if (refine) {
-      //paused_thread_set.Load(THR_Cstr);
       paused_thread_data.Load();
       if (!in_parallel) scan_info.Load(SRC_Cstr);
     }
@@ -590,8 +335,6 @@ namespace ABACUS {
     DP Chem_Pot = Chemical_Potential (AveragingState);
     DP sumrule_factor = Sumrule_Factor (whichDSF, AveragingState, Chem_Pot, iKmin, iKmax);
 
-    //clock_t stop_time_local = clock();
-
 
     // Now go for it !
 
@@ -613,36 +356,25 @@ namespace ABACUS {
       if ((paused_thread_data.lowest_il_with_nthreads_neq_0 == paused_thread_data.nlists - 1)
 	  && omp_thread_nr > 0) {
 	double start_time_wait = omp_get_wtime();
-	//cout << "omp_thread " << omp_thread_nr << " sleeping for 5 seconds... " << endl;
-	//sleep(5); // give time to master omp_thread to populate threads
 	double stop_time_wait;
 	do {
 	  for (int i = 0; i < 100000; ++i) { }
 	  stop_time_wait = omp_get_wtime();
 	} while (stop_time_wait - start_time_wait < 5.0);
-	//cout << "omp_thread " << omp_thread_nr << " restarting" << endl;
       }
 
       double start_time_cycle_omp = omp_get_wtime();
 
       at_least_one_new_flag_raised = false;
 
-      //if (!in_parallel) { // flag raising not allowed in parallel mode
-      //if (!in_parallel || rank == 0) { // flag raising only allowed if not in parallel mode, or if rank == 0
-      //if (!in_parallel || rank.sum() == 0) { // flag raising only allowed if not in parallel mode, or if rank == 0 at all paralevels
 
-      //if (!in_parallel) { // flag raising not allowed in parallel mode
 #pragma omp master
       {
-	//clock_t start_time_flags = clock();
 	double start_time_flags = omp_get_wtime();
 
 	// First flag the new base/type 's that we need to include:
-	//thresholdremoved ScanStateList.Raise_Scanning_Flags (running_scan_threshold);
 	ScanStateList.Raise_Scanning_Flags (exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0));
 
-	//cout << "flags: " << endl << ScanStateList.flag_for_scan << endl;
-
 
 	// Get these base/type started:
 	for (int i = 0; i < ScanStateList.ndef; ++i) {
@@ -656,12 +388,8 @@ namespace ABACUS {
 
 	    Tstate ScanState;
 
-	    //scan_info_before_descent = scan_info;
-
 	    ScanState = ScanStateList.State[i];
 
-	    //cout << "ScanStateList.State[i] = " << ScanState << endl;
-
 	    DP data_value = -1.0;
 
 	    bool admissible = ScanState.Check_Admissibility(whichDSF);
@@ -677,16 +405,13 @@ namespace ABACUS {
 		while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
 		while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
 
-		//if (iKexc >= iKmin && iKexc <= iKmax) RAW_outfile << endl;
-
-		//data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState, Chem_Pot, RAW_outfile);
 		stringstream rawfile_entry;
 		data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState, Chem_Pot, rawfile_entry);
 		{
 #pragma omp critical
 		  RAW_outfile << rawfile_entry.str();
 		}
-		//cout << "data_value for ScanState.label " << ScanState.label << " = " << data_value << endl;
+
 		{
 #pragma omp critical
 		  if (iKexc >= iKmin && iKexc <= iKmax) {
@@ -695,18 +420,14 @@ namespace ABACUS {
 		    scan_info_flags.sumrule_obtained += data_value*sumrule_factor;
 		  }
 		}
-		//cout << data_value * sumrule_factor << endl;
 
 		// If we force descent:  modify data_value by hand so that descent is forced on next scanning pass
-		//if (Force_Descent(whichDSF, ScanState, AveragingState, iKmod, Chem_Pot) && ScanState.iK - AveragingState.iK < iKmax && Sca nState.iK - AveragingState.iK > iKmin)
-		//if (Force_Descent(whichDSF, ScanState, AveragingState, iKmod, Chem_Pot))
 		for (int itype = 0; itype < 15; ++itype) {
 		  DP data_value_used = 0.1* exp(-paused_thread_data.logscale * ABACUS::min(0, paused_thread_data.lowest_il_with_nthreads_neq_0));
 		  if (Force_Descent(whichDSF, ScanState, AveragingState, itype, iKmod, Chem_Pot))
-		    //data_value = 0.1* exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0);
 		    data_value = data_value_used;
 		}
-		// ++G_7 logic
+
 		Vect<bool> allowed(false, 15);
 		if (whichDSF == 'B') { // symmetric state scanning
 		  allowed[9] = true; allowed[10] = true; allowed[11] = true;
@@ -714,18 +435,19 @@ namespace ABACUS {
 		}
 		else {
 		  allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-		  allowed[1] = allowed[0]; allowed[2] = allowed[0]; allowed[3] = allowed[0]; allowed[4] = allowed[0]; allowed[5] = allowed[0]; allowed[6] = allowed[0]; allowed[7] = allowed[0]; allowed[8] = allowed[0];
-		  //allowed[9] = (iKexc <= 0 && iKexc > iKmin);
+		  allowed[1] = allowed[0]; allowed[2] = allowed[0];
+		  allowed[3] = allowed[0]; allowed[4] = allowed[0];
+		  allowed[5] = allowed[0]; allowed[6] = allowed[0];
+		  allowed[7] = allowed[0]; allowed[8] = allowed[0];
 		  allowed[9] = (iKexc > iKmin);
 		  allowed[10] = allowed[9]; allowed[11] = allowed[9];
-		  //allowed[12] = (iKexc >= 0 && iKexc < iKmax);
 		  allowed[12] = (iKexc < iKmax);
 		  allowed[13] = allowed[12]; allowed[14] = allowed[12];
 		}
 		for (int type_required_here = 0; type_required_here < 15; ++type_required_here) {
 		  if (!allowed[type_required_here]) continue;
-		  // All cases here are such that the ScanState hasn't been descended yet, so we simply use data_value as expected data value:
-		  //paused_thread_set_this_run.Include_Thread (abs(data_value), ScanState.label, type_required_here);
+		  // All cases here are such that the ScanState hasn't been descended yet,
+		  // so we simply use data_value as expected data value:
 		  {
 #pragma omp critical
 		    paused_thread_data.Include_Thread (abs(data_value), ScanState.label, type_required_here);
@@ -735,7 +457,8 @@ namespace ABACUS {
 
 	      else {
 		if (nconv0++ < 1000)
-		  CONV0_outfile << setw(25) << ScanState.label << setw(25) << ScanState.diffsq << setw(5) << ScanState.Check_Rapidities()
+		  CONV0_outfile << setw(25) << ScanState.label << setw(25) << ScanState.diffsq
+				<< setw(5) << ScanState.Check_Rapidities()
 				<< setw(25) << ScanState.String_delta() << endl;
 		scan_info_flags.Ndata++;
 		scan_info_flags.Ndata_conv0++;
@@ -753,15 +476,11 @@ namespace ABACUS {
 	      while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
 	      while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
 
-	      //thresholdremoved DP data_value = 2.0* running_scan_threshold;
-	      //DP data_value = 2.0 * exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0);
-	      //DP data_value = 0.1* running_scan_threshold;
 	      DP data_value = 1.0e-32;
 	      for (int itype = 0; itype < 15; ++itype)
 		if (Force_Descent(whichDSF, ScanState, AveragingState, itype, iKmod, Chem_Pot))
 		  data_value = 0.1* exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0);
 
-	      // ++G_7 logic
 	      Vect<bool> allowed(false, 15);
 	      if (whichDSF == 'B') {
 		// We scan over symmetric states. Only types 14 down to 9 are allowed.
@@ -770,17 +489,17 @@ namespace ABACUS {
 	      }
 	      else {
 		allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-		allowed[1] = allowed[0]; allowed[2] = allowed[0]; allowed[3] = allowed[0]; allowed[4] = allowed[0]; allowed[5] = allowed[0]; allowed[6] = allowed[0]; allowed[7] = allowed[0]; allowed[8] = allowed[0];
-		//allowed[9] = (iKexc <= 0 && iKexc > iKmin);
+		allowed[1] = allowed[0]; allowed[2] = allowed[0];
+		allowed[3] = allowed[0]; allowed[4] = allowed[0];
+		allowed[5] = allowed[0]; allowed[6] = allowed[0];
+		allowed[7] = allowed[0]; allowed[8] = allowed[0];
 		allowed[9] = (iKexc > iKmin);
 		allowed[10] = allowed[9]; allowed[11] = allowed[9];
-		//allowed[12] = (iKexc >= 0 && iKexc < iKmax);
 		allowed[12] = (iKexc < iKmax);
 		allowed[13] = allowed[12]; allowed[14] = allowed[12];
 	      }
 	      for (int type_required_here = 0; type_required_here < 15; ++type_required_here) {
 		if (!allowed[type_required_here]) continue;
-		//paused_thread_set_this_run.Include_Thread (abs(data_value), ScanState.label, type_required_here);
 		{
 #pragma omp critical
 		  paused_thread_data.Include_Thread (abs(data_value), ScanState.label, type_required_here);
@@ -788,364 +507,258 @@ namespace ABACUS {
 	      }
 	    } // inadmissible
 
-	    //scan_info_obtained_in_descent = scan_info;
-	    //scan_info_obtained_in_descent -= scan_info_before_descent;
-
 	    scan_info_flags.TT += omp_get_wtime() - start_time_flags;
 
 	    // Put this info into the appropriate ScanStateList.info
 	    {
 #pragma omp critical
-	      //ScanStateList.Include_Info(scan_info_obtained_in_descent, ScanStateList.base_label[i]);
-	      //cout << "Including info_flags: " << scan_info_flags << endl;
 	      ScanStateList.Include_Info(scan_info_flags, ScanStateList.base_label[i]);
 	      scan_info += scan_info_flags;
 	    }
 
-	    //cout << "Done with state " << ScanState.label << endl;
-
 	  } // if flag_for_scan
 	} // for i
 
-	//clock_t stop_time_flags = clock();
-
-	//scan_info.CPU_ticks += stop_time_flags - start_time_flags;
-	//scan_info.TT += (stop_time_flags - start_time_flags)/CLOCKS_PER_SEC;
-	//scan_info.TT += omp_get_wtime() - start_time_flags;
-
-	//} // if (!in_parallel || rank == 0)
-
       } // #pragma omp master
 
-      //cout << "Done raising flags." << endl;
 
       // Now we deal with the previously existing paused threads:
 
-      //if (scan_info.CPU_ticks < ((long long int) Max_Secs) * ((long long int) CLOCKS_PER_SEC)) {
-
-      //Vect<Scan_Thread> threads_to_do = paused_thread_data.Extract_Next_Scan_Threads();
       Vect<Scan_Thread> threads_to_do;
       int il_to_do = paused_thread_data.lowest_il_with_nthreads_neq_0; // for resaving threads in case we're out of time
       {
 #pragma omp critical
 	threads_to_do = paused_thread_data.Extract_Next_Scan_Threads();
-	//threads_to_do = paused_thread_data.Extract_Next_Scan_Threads(100);
       }
-      //cout << "Size of threads_to_do: " << threads_to_do.size() << endl;
-      //for (int i = 0; i < threads_to_do.size(); ++i) cout << threads_to_do[i].label << "\t" << threads_to_do[i].type << "\t";
-      //cout << endl;
 
-	int ithread;
+      int ithread;
 
-	//omp1#pragma omp parallel
-	{
-	  //omp1#pragma omp for
-	  for (ithread = 0; ithread < threads_to_do.size(); ++ithread) {
+      {
+	for (ithread = 0; ithread < threads_to_do.size(); ++ithread) {
 
-	    //cout << "\tithread = " << ithread << endl;
-	    //scan_info_before_descent = scan_info;
+	  Scan_Info scan_info_this_ithread;
+	  double start_time_this_ithread = omp_get_wtime();
 
-	    //int tid = omp_get_thread_num();
-	    //{
-	      //#pragma omp critical
-	      //cout << "Thread " << tid << " handling ithread " << ithread << " out of " << threads_to_do.size() << "\t" << threads_to_do[ithread].label << "\t" << threads_to_do[ithread].type << endl;
-	    //}
-
-	    Scan_Info scan_info_this_ithread;
-	    double start_time_this_ithread = omp_get_wtime();
-
-	    // If we don't have time anymore, resave the threads instead of computing them:
-	    if (start_time_this_ithread - start_time_omp > Max_Secs_alert) {
-	      for (int ith = ithread; ith < threads_to_do.size(); ++ith) {
+	  // If we don't have time anymore, resave the threads instead of computing them:
+	  if (start_time_this_ithread - start_time_omp > Max_Secs_alert) {
+	    for (int ith = ithread; ith < threads_to_do.size(); ++ith) {
 #pragma omp critical
-		paused_thread_data.Include_Thread (il_to_do, threads_to_do[ith].label, threads_to_do[ith].type);
-	      }
-	      break; // jump out of ithread loop
+	      paused_thread_data.Include_Thread (il_to_do, threads_to_do[ith].label, threads_to_do[ith].type);
 	    }
+	    break; // jump out of ithread loop
+	  }
 
-	    Tstate ScanState;
-	    {
+	  Tstate ScanState;
+	  {
 #pragma omp critical
-	      ScanState = ScanStateList.Return_State(Extract_Base_Label(threads_to_do[ithread].label));
+	    ScanState = ScanStateList.Return_State(Extract_Base_Label(threads_to_do[ithread].label));
+	  }
+	  Tstate BaseScanState;  BaseScanState = ScanState;
+
+	  ScanState.Set_to_Label(threads_to_do[ithread].label, BaseScanState.Ix2);
+
+
+	  // STARTING Descend_and_Compute block:
+	  int type_required = threads_to_do[ithread].type;
+
+	  ScanState.Compute_Momentum();
+	  Vect<string> desc_label;
+
+	  bool disperse_only_current_exc_up = false;
+	  if (type_required == 14 || type_required == 8 || type_required == 7 || type_required == 6)
+	    disperse_only_current_exc_up = true;
+	  bool preserve_nexc_up = false;
+	  if (type_required == 13 || type_required == 5 || type_required == 4 || type_required == 3)
+	    preserve_nexc_up = true;
+	  bool disperse_only_current_exc_down = false;
+	  if (type_required == 11 || type_required == 8 || type_required == 5 || type_required == 2)
+	    disperse_only_current_exc_down = true;
+	  bool preserve_nexc_down = false;
+	  if (type_required == 10 || type_required == 7 || type_required == 4 || type_required == 1)
+	    preserve_nexc_down = true;
+
+	  if (whichDSF == 'B') { // symmetric state scanning
+	    if (type_required >= 9 && type_required <= 11)
+	      desc_label = Descendent_States_with_iK_Stepped_Down_rightIx2only
+		(ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
+	    else if (type_required >= 12 && type_required <= 14)
+	      desc_label = Descendent_States_with_iK_Stepped_Up_rightIx2only
+		(ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
+	  }
+	  else {
+	    if (type_required >= 0 && type_required <= 8) {
+	      desc_label = Descendent_States_with_iK_Preserved
+		(ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up,
+		 disperse_only_current_exc_down, preserve_nexc_down);
 	    }
-	    Tstate BaseScanState;  BaseScanState = ScanState;
+	    else if (type_required >= 9 && type_required <= 11)
+	      desc_label = Descendent_States_with_iK_Stepped_Down
+		(ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
+	    else if (type_required >= 12 && type_required <= 14)
+	      desc_label = Descendent_States_with_iK_Stepped_Up
+		(ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
+	  }
 
-	    //cout << "Setting to label = " << threads_to_do[ithread].label << ", descending type " << threads_to_do[ithread].type << endl;
-	    ScanState.Set_to_Label(threads_to_do[ithread].label, BaseScanState.Ix2);
-	    //cout << "ScanState after setting label: " << threads_to_do[ithread].label << endl << ScanState << endl;
+	  string label_here = ScanState.label;
+
+	  for (int idesc = 0; idesc < desc_label.size(); ++idesc) {
+
+	    ScanState.Set_to_Label (desc_label[idesc], BaseScanState.Ix2);
+
+	    bool admissible = ScanState.Check_Admissibility(whichDSF);
+
+	    DP data_value = 0.0;
+
+	    ScanState.conv = false;
+	    ScanState.Compute_Momentum(); // since momentum is used as forced descent criterion
 
 
-	    //cout << "Calling Descend_and_Compute with type " << paused_thread_list.type[ithread] << " on state" << endl << ScanState << endl;
-	    /*
-	      Descend_and_Compute_for_Fixed_Base (whichDSF, AveragingState, BaseScanState, ScanState,
-	      //paused_thread_set.type[ilist][ithread], iKmin, iKmax, iKmod,
-	      threads_to_do[ithread].type, iKmin, iKmax, iKmod,
-	      //paused_thread_set_this_run, running_scan_threshold,
-	      paused_thread_data, //thresholdremoved running_scan_threshold,
-	      //paused_thread_set[ilist].abs_data_value[ithread],
-	      ph_cost,
-	      Max_Secs_used, sumrule_factor, Chem_Pot, scan_info, RAW_outfile,
-	      INADM_outfile, ninadm, CONV0_outfile, nconv0, STAT_outfile);
-	    */
+	    if (admissible) {
 
-	    // STARTING Descend_and_Compute block:
-	    int type_required = threads_to_do[ithread].type;
+	      ScanState.Compute_All (idesc == 0);
 
-	    ScanState.Compute_Momentum();
-	    Vect<string> desc_label;
+	      if (ScanState.conv) {
 
-	    // ++G_7 logic
-	    bool disperse_only_current_exc_up = false;
-	    if (type_required == 14 || type_required == 8 || type_required == 7 || type_required == 6) disperse_only_current_exc_up = true;
-	    bool preserve_nexc_up = false;
-	    if (type_required == 13 || type_required == 5 || type_required == 4 || type_required == 3) preserve_nexc_up = true;
-	    bool disperse_only_current_exc_down = false;
-	    if (type_required == 11 || type_required == 8 || type_required == 5 || type_required == 2) disperse_only_current_exc_down = true;
-	    bool preserve_nexc_down = false;
-	    if (type_required == 10 || type_required == 7 || type_required == 4 || type_required == 1) preserve_nexc_down = true;
+		// Put momentum in fundamental window, if possible:
+		int iKexc = ScanState.iK - AveragingState.iK;
+		while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
+		while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
 
-	    if (whichDSF == 'B') { // symmetric state scanning
-	      if (type_required >= 9 && type_required <= 11)
-		desc_label = Descendent_States_with_iK_Stepped_Down_rightIx2only (ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
-	      else if (type_required >= 12 && type_required <= 14)
-		desc_label = Descendent_States_with_iK_Stepped_Up_rightIx2only (ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
-	    }
-	    else {
-	      if (type_required >= 0 && type_required <= 8) {
-		desc_label = Descendent_States_with_iK_Preserved(ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up, disperse_only_current_exc_down, preserve_nexc_down);
-	      }
-	      else if (type_required >= 9 && type_required <= 11)
-		desc_label = Descendent_States_with_iK_Stepped_Down (ScanState.label, BaseScanState, disperse_only_current_exc_down, preserve_nexc_down);
-	      else if (type_required >= 12 && type_required <= 14)
-		desc_label = Descendent_States_with_iK_Stepped_Up (ScanState.label, BaseScanState, disperse_only_current_exc_up, preserve_nexc_up);
-	    }
+		stringstream rawfile_entry;
+		data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState,
+							     Chem_Pot, rawfile_entry);
 
-	    string label_here = ScanState.label;
-	    //int ScanState_iK = ScanState.iK;
-
-	    for (int idesc = 0; idesc < desc_label.size(); ++idesc) {
-
-	      //clock_t start_time_here = clock();
-
-	      ScanState.Set_to_Label (desc_label[idesc], BaseScanState.Ix2);
-
-	      bool admissible = ScanState.Check_Admissibility(whichDSF);
-
-	      DP data_value = 0.0;
-
-	      //scan_info.Ndata++;
-	      //scan_info_this_ithread.Ndata++;
-
-	      ScanState.conv = false;
-	      ScanState.Compute_Momentum(); // since momentum is used as forced descent criterion
-
-
-	      if (admissible) {
-
-		ScanState.Compute_All (idesc == 0);
-		//ScanState.Compute_All (true);
-
-		//scan_info.Ndata++;
-
-		if (ScanState.conv) {
-		  //scan_info_this_ithread.Ndata_conv++;
-
-		  // Put momentum in fundamental window, if possible:
-		  int iKexc = ScanState.iK - AveragingState.iK;
-		  while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
-		  while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
-
-		  //data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState, Chem_Pot, RAW_outfile);
-		  stringstream rawfile_entry;
-		  data_value = Compute_Matrix_Element_Contrib (whichDSF, iKmin, iKmax, ScanState, AveragingState, Chem_Pot, rawfile_entry);
-
-		  {
-#pragma omp critical
-		    RAW_outfile << rawfile_entry.str();
-		    if (iKexc >= iKmin && iKexc <= iKmax) {
-		      scan_info_this_ithread.Ndata++;
-		      scan_info_this_ithread.Ndata_conv++;
-		      scan_info_this_ithread.sumrule_obtained += data_value*sumrule_factor;
-		    }
-		  }
-
-		  //if (iKexc >= iKmin && iKexc <= iKmax) scan_info_this_ithread.sumrule_obtained += data_value*sumrule_factor;
-		  //cout << "data_value found = " << data_value * sumrule_factor << endl;
-
-		  // Uncomment line below if .stat file is desired:
-		  //STAT_outfile << setw(20) << label_here << "\t" << setw(5) << type_required << "\t" << setw(16) << std::scientific << running_scan_threshold << "\t" << setw(20) << ScanState.label << "\t" << setw(16) << data_value << "\t" << setw(16) << std::fixed << setprecision(8) << data_value/running_scan_threshold << endl;
-
-		} // if (ScanState.conv)
-		else {
-		  if (nconv0++ < 1000)
-		    CONV0_outfile << setw(25) << ScanState.label << setw(25) << ScanState.diffsq << setw(5) << ScanState.Check_Rapidities()
-				  << setw(25) << ScanState.String_delta() << endl;
-		  //scan_info.Ndata_conv0++;
-		  scan_info_this_ithread.Ndata++;
-		  scan_info_this_ithread.Ndata_conv0++;
-		  //cout << "State did not converge." << endl;
-		}
-	      } // if (admissible)
-
-	      else {
-		if (ninadm++ < 1000000) INADM_outfile << ScanState.label << endl;
-		//scan_info.Ninadm++;
-		scan_info_this_ithread.Ndata++;
-		scan_info_this_ithread.Ninadm++;
-		//cout << "State was inadmissible." << endl;
-		// Set data_value to enable continued scanning later on:
-		//thresholdremoved data_value = 0.1* running_scan_threshold;
-	      }
-
-
-	      //clock_t stop_time_here = clock();
-
-	      //scan_info.CPU_ticks += stop_time_here - start_time_here;
-	      //scan_info_this_ithread.CPU_ticks += stop_time_here - start_time_here;
-	      //scan_info_this_ithread.TT += (stop_time_here - start_time_here)/CLOCKS_PER_SEC;
-
-	      Tstate state_to_descend;  state_to_descend = ScanState; // for checking
-
-	      ScanState.Compute_Momentum();
-	      // Put momentum in fundamental window, if possible:
-	      int iKexc = ScanState.iK - AveragingState.iK;
-	      while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
-	      while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
-
-	      // ++G_7 logic
-	      // Momentum-preserving are only descended to momentum-preserving.
-	      // Momentum-increasing are only descended to momentum-preserving and momentum-increasing.
-	      // Momentum-decreasing are only descended to momentum-preserving and momentum-decreasing.
-	      Vect<bool> allowed(false, 15);
-	      if (whichDSF == 'B') {
-		// We scan over symmetric states. Only types 14 down to 9 are allowed.
-		if (type_required >= 9 && type_required <= 11) { // iK stepped down on rightIx2; step further up or down
-		  allowed[9] = true; allowed[10] = true; allowed[11] = true;
-		  allowed[12] = true; allowed[13] = true; allowed[14] = true;
-		}
-		else if (type_required >= 12 && type_required <= 14) { // iK stepped up on rightIx2; only step further up
-		  allowed[12] = true; allowed[13] = true; allowed[14] = true;
-		}
-	      }
-	      else {
-		if (type_required >= 0 && type_required <= 8) { // momentum-preserving
-		  allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-		  allowed[9] = false;
-		  allowed[12] = false;
-		}
-		if (type_required >= 9 && type_required <= 11) { // momentum-decreasing
-		  allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-		  allowed[9] = (iKexc > iKmin);
-		  allowed[12] = false;
-		}
-		if (type_required >= 12 && type_required <= 14) { // momentum-increasing
-		  allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
-		  allowed[9] = false;
-		  allowed[12] = (iKexc < iKmax);
-		}
-		// The others are just copies of the ones above:
-		allowed[1] = allowed[0]; allowed[2] = allowed[0]; allowed[3] = allowed[0]; allowed[4] = allowed[0]; allowed[5] = allowed[0]; allowed[6] = allowed[0]; allowed[7] = allowed[0]; allowed[8] = allowed[0];
-		allowed[10] = allowed[9]; allowed[11] = allowed[9];
-		allowed[13] = allowed[12]; allowed[14] = allowed[12];
-	      }
-
-
-	      for (int type_required_here = 0; type_required_here < 15; ++type_required_here) {
-
-		if (!allowed[type_required_here]) continue;
-
-		// Reset ScanState to what it was, if change on first pass
-		if (type_required_here > 0) ScanState = state_to_descend;
-
-		// We determine if we carry on scanning based on the data_value obtained, or forcing conditions:
-		// Forcing conditions:
-		//if (!admissible || Force_Descent(whichDSF, ScanState, AveragingState, type_required_here, iKmod, Chem_Pot))
-		////data_value = 1.01 * running_scan_threshold/ph_cost; // force for all types of desc
-		//data_value = 1.01 * running_scan_threshold; // only force for no new ph pairs
-		////data_value = 1.0; // force for all types of desc
-		// If we're sitting out of the iKmin & iKmax window, stop:
-		//if (iKmin != iKmax && (ScanState.iK - AveragingState.iK > iKmax || ScanState.iK - AveragingState.iK < iKmin)) data_value = 0.0;
-
-		//if (abs(data_value) * (type_required_here != 2 ? 1.0 : ph_cost) > running_scan_threshold
-		//if ((abs(data_value) > running_scan_threshold
-		//|| Nr_ph_Recombinations_Possible (ScanState.label, BaseScanState, type_required_here) > 0)
-
-		//DP expected_abs_data_value = abs(data_value)/pow(ph_cost, DP(Nr_ph_Recombinations_Possible (ScanState.label, BaseScanState, type_required_here)));
-		DP expected_abs_data_value = abs(data_value);
-
-		//++G_7 logic
-		if ((type_required_here == 14 || type_required_here == 8 || type_required_here == 7 || type_required_here == 6)
-		    && Expect_ph_Recombination_iK_Up (ScanState.label, BaseScanState)) expected_abs_data_value /= ph_cost;
-		if (type_required_here == 12 || type_required_here == 2 || type_required_here == 1 || type_required_here == 0)
-		  expected_abs_data_value *= ph_cost;
-		if ((type_required_here == 11 || type_required_here == 8 || type_required_here == 5 || type_required_here == 2)
-		    && Expect_ph_Recombination_iK_Down (ScanState.label, BaseScanState)) expected_abs_data_value /= ph_cost;
-		if (type_required_here == 9 || type_required_here == 6 || type_required_here == 3 || type_required_here == 0)
-		  expected_abs_data_value *= ph_cost;
-
-		//paused_thread_set.Include_Thread (expected_abs_data_value, ScanState.label, type_required_here);
 		{
 #pragma omp critical
-		  //cout << "\tIncluding thread " << ScanState.label << "\t" << type_required_here << "\tdata_value " << data_value << "\texpected abs data value " << expected_abs_data_value << endl;
-		  paused_thread_data.Include_Thread (expected_abs_data_value, ScanState.label, type_required_here);
+		  RAW_outfile << rawfile_entry.str();
+		  if (iKexc >= iKmin && iKexc <= iKmax) {
+		    scan_info_this_ithread.Ndata++;
+		    scan_info_this_ithread.Ndata_conv++;
+		    scan_info_this_ithread.sumrule_obtained += data_value*sumrule_factor;
+		  }
 		}
-		//cout << "\tDone including thread." << endl;
-	      } // for type_required_here
 
-	      //cout << "\tFinished with descendent " << idesc << " out of " << desc_label.size() << " with label " << desc_label[idesc] << endl;
-	      //cout << "\tfrom state with label " << label_here << endl;
-	    } // for idesc
+		// Uncomment line below if .stat file is desired:
+		//STAT_outfile << setw(20) << label_here << "\t" << setw(5) << type_required << "\t" << setw(16) << std::scientific << running_scan_threshold << "\t" << setw(20) << ScanState.label << "\t" << setw(16) << data_value << "\t" << setw(16) << std::fixed << setprecision(8) << data_value/running_scan_threshold << endl;
 
-	    //cout << "Finished Descend on state " << endl << ScanState.label << endl;
+	      } // if (ScanState.conv)
+	      else {
+		if (nconv0++ < 1000)
+		  CONV0_outfile << setw(25) << ScanState.label << setw(25)
+				<< ScanState.diffsq << setw(5) << ScanState.Check_Rapidities()
+				<< setw(25) << ScanState.String_delta() << endl;
+		scan_info_this_ithread.Ndata++;
+		scan_info_this_ithread.Ndata_conv0++;
+	      }
+	    } // if (admissible)
+
+	    else {
+	      if (ninadm++ < 1000000) INADM_outfile << ScanState.label << endl;
+	      scan_info_this_ithread.Ndata++;
+	      scan_info_this_ithread.Ninadm++;
+	    }
+
+	    Tstate state_to_descend;  state_to_descend = ScanState; // for checking
+
+	    ScanState.Compute_Momentum();
+	    // Put momentum in fundamental window, if possible:
+	    int iKexc = ScanState.iK - AveragingState.iK;
+	    while (iKexc > iKmax && iKexc - iKmod >= iKmin) iKexc -= iKmod;
+	    while (iKexc < iKmin && iKexc + iKmod <= iKmax) iKexc += iKmod;
+
+	    // Momentum-preserving are only descended to momentum-preserving.
+	    // Momentum-increasing are only descended to momentum-preserving and momentum-increasing.
+	    // Momentum-decreasing are only descended to momentum-preserving and momentum-decreasing.
+	    Vect<bool> allowed(false, 15);
+	    if (whichDSF == 'B') {
+	      // We scan over symmetric states. Only types 14 down to 9 are allowed.
+	      if (type_required >= 9 && type_required <= 11) { // iK stepped down on rightIx2; step further up or down
+		allowed[9] = true; allowed[10] = true; allowed[11] = true;
+		allowed[12] = true; allowed[13] = true; allowed[14] = true;
+	      }
+	      else if (type_required >= 12 && type_required <= 14) { // iK stepped up on rightIx2; only step further up
+		allowed[12] = true; allowed[13] = true; allowed[14] = true;
+	      }
+	    }
+	    else {
+	      if (type_required >= 0 && type_required <= 8) { // momentum-preserving
+		allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
+		allowed[9] = false;
+		allowed[12] = false;
+	      }
+	      if (type_required >= 9 && type_required <= 11) { // momentum-decreasing
+		allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
+		allowed[9] = (iKexc > iKmin);
+		allowed[12] = false;
+	      }
+	      if (type_required >= 12 && type_required <= 14) { // momentum-increasing
+		allowed[0] = (iKexc >= iKmin && iKexc <= iKmax);
+		allowed[9] = false;
+		allowed[12] = (iKexc < iKmax);
+	      }
+	      // The others are just copies of the ones above:
+	      allowed[1] = allowed[0]; allowed[2] = allowed[0]; allowed[3] = allowed[0]; allowed[4] = allowed[0]; allowed[5] = allowed[0]; allowed[6] = allowed[0]; allowed[7] = allowed[0]; allowed[8] = allowed[0];
+	      allowed[10] = allowed[9]; allowed[11] = allowed[9];
+	      allowed[13] = allowed[12]; allowed[14] = allowed[12];
+	    }
+
+
+	    for (int type_required_here = 0; type_required_here < 15; ++type_required_here) {
+
+	      if (!allowed[type_required_here]) continue;
+
+	      // Reset ScanState to what it was, if change on first pass
+	      if (type_required_here > 0) ScanState = state_to_descend;
+
+	      // We determine if we carry on scanning based on the data_value obtained, or forcing conditions:
+	      DP expected_abs_data_value = abs(data_value);
+
+	      //++G_7 logic
+	      if ((type_required_here == 14 || type_required_here == 8
+		   || type_required_here == 7 || type_required_here == 6)
+		  && Expect_ph_Recombination_iK_Up (ScanState.label, BaseScanState))
+		expected_abs_data_value /= ph_cost;
+	      if (type_required_here == 12 || type_required_here == 2
+		  || type_required_here == 1 || type_required_here == 0)
+		expected_abs_data_value *= ph_cost;
+	      if ((type_required_here == 11 || type_required_here == 8
+		   || type_required_here == 5 || type_required_here == 2)
+		  && Expect_ph_Recombination_iK_Down (ScanState.label, BaseScanState))
+		expected_abs_data_value /= ph_cost;
+	      if (type_required_here == 9 || type_required_here == 6
+		  || type_required_here == 3 || type_required_here == 0)
+		expected_abs_data_value *= ph_cost;
+
+	      {
+#pragma omp critical
+		paused_thread_data.Include_Thread (expected_abs_data_value, ScanState.label, type_required_here);
+	      }
+	    } // for type_required_here
+
+	  } // for idesc
 
 	    // FINISHED Descend_and_Compute block
 
 
-
-	    //cout << "Finished descending." << endl;
-	    //cout << "\tFinished descending ithread = " << ithread << endl;
-
-	    //scan_info_obtained_in_descent = scan_info;
-	    //scan_info_obtained_in_descent -= scan_info_before_descent;
-	    //// Put this info into the appropriate ScanStateList.info
-	    //ScanStateList.Include_Info(scan_info_obtained_in_descent, Extract_Base_Label(threads_to_do[ithread].label));
-
-	    scan_info_this_ithread.TT += omp_get_wtime() - start_time_this_ithread;
+	  scan_info_this_ithread.TT += omp_get_wtime() - start_time_this_ithread;
 
 #pragma omp critical
-	    {
-	      scan_info += scan_info_this_ithread;
-	      //cout << "Including info_ihtread: " << scan_info_this_ithread << endl;
-	      ScanStateList.Include_Info(scan_info_this_ithread, Extract_Base_Label(threads_to_do[ithread].label));
-	    }
-	  } // for ithread
+	  {
+	    scan_info += scan_info_this_ithread;
+	    ScanStateList.Include_Info(scan_info_this_ithread, Extract_Base_Label(threads_to_do[ithread].label));
+	  }
+	} // for ithread
 
-	} // omp parallel region
+      } // omp parallel region
 
-	// Resynchronize all compute threads:
-	//omp1#pragma omp barrier
 
-	//cout << "Done with threads_to_do." << endl;
-
-	//      } // if time
-
-      //start_time_local = clock();
-
-      /*
-      if (!in_parallel)
-	LOG_outfile << "Ndata handled up to now: " << scan_info.Ndata_conv
-		    << ". Threshold level " << paused_thread_data.lowest_il_with_nthreads_neq_0 << " " << setprecision(6) << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0)
-		    << ". " << scan_info.Ndata - Ndata_previous_cycle << " new data points. Number of threads: "
-		    << paused_thread_data.nthreads_total.sum()
-		    << ".  Saturation:  " << setprecision(12) << scan_info.sumrule_obtained << endl;
-      */
-
-      //int tid = omp_get_thread_num();
 #pragma omp master
       {
 	if (!in_parallel)
 	  LOG_outfile << "Master cycling. Ndata_conv " << scan_info.Ndata_conv
-		      << ". Threshold " << paused_thread_data.lowest_il_with_nthreads_neq_0 << " " << setw(9) << setprecision(3) << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0)
+		      << ". Threshold " << paused_thread_data.lowest_il_with_nthreads_neq_0 << " "
+		      << setw(9) << setprecision(3)
+		      << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0)
 		      << ". " << setw(12) << scan_info.Ndata - Ndata_previous_cycle << " new data. Nr of threads: "
 		      << setw(14) << paused_thread_data.nthreads_total.sum()
 		      << ".  Saturation:  " << setprecision(12) << scan_info.sumrule_obtained << endl;
@@ -1154,30 +767,10 @@ namespace ABACUS {
       }
 
 
-      //stop_time_local = clock();
-
-      //current_time = clock();
-
-      //scan_info.CPU_ticks += stop_time_local - start_time_local;
-
-      //#pragma omp critical
-      //scan_info.TT += omp_get_wtime() - start_time_cycle_omp;
-
       current_time_omp = omp_get_wtime();
 
-      //int tid = omp_get_thread_num();
-      //if (tid == 0) cout << "current_time_omp - start_time_omp = " << current_time_omp - start_time_omp << "\t" << Max_Secs_used << endl;
-      //if (current_time_omp - start_time_omp > Max_Secs_used)
-      //cout << "tid " << tid << " exiting." << endl;
-
-
-      //} while (scan_info.CPU_ticks < ((long long int) Max_Secs_used) * ((long long int) CLOCKS_PER_SEC)
-      //} while (current_time - start_time < ((long long int) Max_Secs_used) * ((long long int) CLOCKS_PER_SEC)
     } while (current_time_omp - start_time_omp < Max_Secs_used
 	     && scan_info.sumrule_obtained < target_sumrule
-	     //&& paused_thread_list.Highest_abs_data_value(0.0, 1.0e+10) > 1.0e-30
-	     //&& !(all_threads_zero_previous_cycle && all_threads_zero_current_cycle && !at_least_one_new_flag_raised)
-	     //thresholdremoved && running_scan_threshold > 1.0e-10*MACHINE_EPS
 	     );
     // This closes the #pragram omp parallel block
 
@@ -1186,8 +779,6 @@ namespace ABACUS {
     CONV0_outfile.close();
     STAT_outfile.close();
 
-
-    //scan_info.CPU_ticks_TOT += scan_info.CPU_ticks;
     scan_info.Save(SRC_Cstr);
 
     Scan_Info scan_info_refine = scan_info;
@@ -1199,19 +790,17 @@ namespace ABACUS {
 	LOG_outfile << endl << "Achieved sumrule saturation of " << scan_info.sumrule_obtained
 		    << "\t(target was " << target_sumrule << ")." << endl << endl;
 
-      //thresholdremoved
-      //if (running_scan_threshold < MACHINE_EPS)
-      //LOG_outfile << endl << "Stopping because threshold lower than machine precision. " << endl << endl;
-
-      //thresholdremoved if (!refine) LOG_outfile << "Main run info: " << scan_info << endl << "Latest running_scan_threshold = " << running_scan_threshold << endl;
       if (!refine) {
 	LOG_outfile << "Main run info: " << scan_info << endl;
-	LOG_outfile << "Latest threshold level " << paused_thread_data.lowest_il_with_nthreads_neq_0 << " " << std::scientific << setprecision(3) << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
+	LOG_outfile << "Latest threshold level " << paused_thread_data.lowest_il_with_nthreads_neq_0
+		    << " " << std::scientific << setprecision(3)
+		    << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
       }
       else if (refine) {
-	//thresholdremoved LOG_outfile << "Refining info: " << scan_info_refine << endl << "Latest running_scan_threshold = " << running_scan_threshold << endl
 	LOG_outfile << "Refining info: " << scan_info_refine << endl;
-	LOG_outfile << "Latest threshold level " << paused_thread_data.lowest_il_with_nthreads_neq_0 << " " << std::scientific << setprecision(3) << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
+	LOG_outfile << "Latest threshold level " << paused_thread_data.lowest_il_with_nthreads_neq_0
+		    << " " << std::scientific << setprecision(3)
+		    << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
 	LOG_outfile << "Resulting info: " << scan_info << endl;
       }
       LOG_outfile << "Code version " << ABACUS_VERSION << ", copyright J.-S. Caux." << endl << endl;
@@ -1220,45 +809,27 @@ namespace ABACUS {
 
     else { // in_parallel
 
-      //thresholdremoved
-      //if (running_scan_threshold < MACHINE_EPS)
-      //LOG_outfile << "rank " << rank << " out of " << nr_processors << " processors: "
-      //	    << "Stopping because threshold lower than machine precision. " << endl << endl;
-
       LOG_outfile << "rank " << rank << " out of " << nr_processors << " processors: "
-	//thresholdremoved   << "run info: " << scan_info << endl << "Latest running_scan_threshold = " << running_scan_threshold << endl;
-		  << "run info: " << scan_info << endl << "Latest threshold = " << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
+		  << "run info: " << scan_info << endl << "Latest threshold = "
+		  << exp(-paused_thread_data.logscale * paused_thread_data.lowest_il_with_nthreads_neq_0) << endl;
     }
 
-    //if (paused_thread_list.dim < 1000000) paused_thread_list.Order_in_abs_data_value();
-    //paused_thread_list.Save(THR_Cstr);
-    //paused_thread_set.Save(THR_Cstr);
     paused_thread_data.Save();
 
     ScanStateList.Order_in_SRC ();
 
-    //cout << "Saving info: " << endl; for (int idef = 0; idef < ScanStateList.ndef; ++idef) cout << ScanStateList.info[idef] << endl;
     ScanStateList.Save_Info (SUM_Cstr);
 
 
     // Evaluate f-sumrule:
-    //if (!fixed_iK && !in_parallel) if (whichDSF != 'q') Evaluate_F_Sumrule (whichDSF, AveragingState, Chem_Pot, RAW_Cstr, FSR_Cstr);
-    //if (iKmin != iKmax && !in_parallel) if (whichDSF != 'q') Evaluate_F_Sumrule (whichDSF, AveragingState, Chem_Pot, iKmin, iKmax, RAW_Cstr, FSR_Cstr);
-    //if (iKmin != iKmax && !in_parallel ) if (whichDSF != 'q') Evaluate_F_Sumrule (prefix_prevparalevel, whichDSF, AveragingState, Chem_Pot, iKmin, iKmax);
-    if (!in_parallel ) if (whichDSF != 'q') Evaluate_F_Sumrule (prefix_prevparalevel, whichDSF, AveragingState, Chem_Pot, iKmin, iKmax);
-
-    // Produce sorted file
-    //if (!in_parallel && whichDSF != 'Z') Sort_RAW_File (RAW_Cstr, 'f', whichDSF);
-    //if (!in_parallel && whichDSF == 'Z') Sort_RAW_File (RAW_Cstr, 'e', whichDSF);
+    if (!in_parallel ) if (whichDSF != 'q')
+			 Evaluate_F_Sumrule (prefix_prevparalevel, whichDSF, AveragingState, Chem_Pot, iKmin, iKmax);
 
     return(scan_info);
-
-
   }
 
 
 
-
   //******************************************************
 
   // Functions to initiate scans:
@@ -1267,7 +838,8 @@ namespace ABACUS {
   // General version for equilibrium correlators at generic (possibly finite) temperature:
 
   void Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax, DP kBT,
-		     int Max_Secs, DP target_sumrule, bool refine, int paralevel, Vect<int> rank, Vect<int> nr_processors)
+		     int Max_Secs, DP target_sumrule, bool refine,
+		     int paralevel, Vect<int> rank, Vect<int> nr_processors)
   {
 
     // This function scans the Hilbert space of the LiebLin gas,
@@ -1287,8 +859,6 @@ namespace ABACUS {
     // if we refine, read the quantum numbers of the saddle point state (and seed sps) from the sps file:
 
     stringstream SPS_stringstream;  string SPS_string;
-    //SPS_stringstream << "Tgt0_";
-    //Data_File_Name (SPS_stringstream, whichDSF, iKmin, iKmax, kBT, spstate, SeedScanState, "");
     Data_File_Name (SPS_stringstream, whichDSF, c_int, L, N, iKmin, iKmax, kBT, 0.0, "");
     SPS_stringstream << ".sps";
     SPS_string = SPS_stringstream.str();    const char* SPS_Cstr = SPS_string.c_str();
@@ -1331,9 +901,6 @@ namespace ABACUS {
 
     else if (whichDSF == 'o' || whichDSF == 'g') {
       if (!refine) {
-	//SeedScanState = Canonical_Saddle_Point_State (c_int, L, Nscan, kBT);
-	//LiebLin_Bethe_State scanspstate = Canonical_Saddle_Point_State (c_int, L, Nscan, kBT);
-	//SeedScanState = scanspstate;
 	if (whichDSF == 'o') SeedScanState = Remove_Particle_at_Center (spstate);
 	else SeedScanState = Add_Particle_at_Center (spstate);
       }
@@ -1342,8 +909,8 @@ namespace ABACUS {
 	int Nsspsread;
 	spsfile >> Nsspsread;
 	if (Nsspsread != Nscan) {
-	cout << Nsspsread << "\t" << Nscan << endl;
-	ABACUSerror("Wrong number of Ix2 in scan saddle-point state.");
+	  cout << Nsspsread << "\t" << Nscan << endl;
+	  ABACUSerror("Wrong number of Ix2 in scan saddle-point state.");
 	}
 	SeedScanState = LiebLin_Bethe_State (c_int, L, Nscan);
 	for (int i = 0; i < Nscan; ++i) spsfile >> SeedScanState.Ix2[i];
@@ -1366,7 +933,6 @@ namespace ABACUS {
       spsfile << endl << spstate << endl << endl;
       for (int i = 1; i < spstate.N - 2; ++i)
 	spsfile << 0.5 * (spstate.lambdaoc[i] + spstate.lambdaoc[i+1])
-	  //<< "\t" << twoPI/(spstate.L * (spstate.lambdaoc[i+1] - spstate.lambdaoc[i])) << endl;
 		<< "\t" << 1.0/spstate.L * (0.25/(spstate.lambdaoc[i] - spstate.lambdaoc[i-1])
 					    + 0.5/(spstate.lambdaoc[i+1] - spstate.lambdaoc[i])
 					    + 0.25/(spstate.lambdaoc[i+2] - spstate.lambdaoc[i+1]))
@@ -1377,27 +943,30 @@ namespace ABACUS {
     spsfile.close();
 
     // Perform the scan:
-    General_Scan (whichDSF, iKmin, iKmax, 100000000, kBT, spstate, SeedScanState, "", Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    General_Scan (whichDSF, iKmin, iKmax, 100000000, kBT, spstate, SeedScanState, "",
+		  Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
     return;
   }
 
   void Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax, DP kBT,
-		  int Max_Secs, DP target_sumrule, bool refine)
+		     int Max_Secs, DP target_sumrule, bool refine)
   {
     int paralevel = 0;
     Vect<int> rank(0,1);
     Vect<int> nr_processors(0,1);
 
-    Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    Scan_LiebLin (whichDSF, c_int, L, N, iKmin, iKmax, kBT, Max_Secs, target_sumrule,
+		  refine, paralevel, rank, nr_processors);
 
     return;
   }
 
 
   // Scanning on an excited state defined by a set of Ix2:
-  void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename, int iKmin, int iKmax,
-		  int Max_Secs, DP target_sumrule, bool refine, int paralevel, Vect<int> rank, Vect<int> nr_processors)
+  void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename,
+		     int iKmin, int iKmax, int Max_Secs, DP target_sumrule, bool refine,
+		     int paralevel, Vect<int> rank, Vect<int> nr_processors)
   {
     // This function is as Scan_LiebLin for generic T defined above, except that the
     // averaging is now done on a state defined by AveragingStateIx2
@@ -1409,10 +978,6 @@ namespace ABACUS {
     DP L = AveragingState.L;
     int N = AveragingState.N;
 
-    //LiebLin_Bethe_State GroundState (c_int, L, N);
-    // Make sure the label of AveragingState is properly set to that relative to GS:
-    //AveragingState.Set_Label_from_Ix2 (GroundState.Ix2);
-
     // The label of the Averaging State is by definition the `empty' label
     AveragingState.Set_Label_from_Ix2 (AveragingState.Ix2);
     AveragingState.Compute_All(true);
@@ -1423,15 +988,7 @@ namespace ABACUS {
 
     LiebLin_Bethe_State SeedScanState (c_int, L, Nscan);
     if (whichDSF == 'd' || whichDSF == 'B') SeedScanState.Ix2 = AveragingState.Ix2;
-    // If 'o', remove rightmost and shift quantum numbers by half-integer towards center.
-    // if (whichDSF == 'o') for (int i = 0; i < N-1; ++i) SeedScanState.Ix2[i] = AveragingState.Ix2[i] + 1;
-    // If 'g', add a new particle at the right, after shifting all towards center.
-    //if (whichDSF == 'g') {
-    //for (int i = 0; i < N; ++i) SeedScanState.Ix2[i] = AveragingState.Ix2[i] - 1;
-    //SeedScanState.Ix2[N] = SeedScanState.Ix2[N-1] + 2;
-    //}
     // If 'o', remove midmost and shift quantum numbers by half-integer towards removed one:
-
     if (whichDSF == 'o') {
       for (int i = 0; i < N-1; ++i)
 	SeedScanState.Ix2[i] = AveragingState.Ix2[i + (i >= N/2)] + 1 - 2*(i >= N/2);
@@ -1447,28 +1004,26 @@ namespace ABACUS {
 
     SeedScanState.Set_Label_from_Ix2 (SeedScanState.Ix2);
 
-    //cout << "which DSF = " << whichDSF << endl;
-    //cout << "AveragingState Ix2: " << endl << AveragingState.Ix2 << endl;
-    //cout << "SeedScanState Ix2: " << endl << SeedScanState.Ix2 << endl;
-
     DP kBT = 0.0;
 
     // Perform the scan:
-    General_Scan (whichDSF, iKmin, iKmax, 100000000, kBT, AveragingState, SeedScanState, defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    General_Scan (whichDSF, iKmin, iKmax, 100000000, kBT, AveragingState, SeedScanState, defaultScanStatename,
+		  Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
     return;
 
   }
 
   // Simplified function call of the above:
-  void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename, int iKmin, int iKmax,
-		  int Max_Secs, DP target_sumrule, bool refine)
+  void Scan_LiebLin (char whichDSF, LiebLin_Bethe_State AveragingState, string defaultScanStatename,
+		     int iKmin, int iKmax, int Max_Secs, DP target_sumrule, bool refine)
   {
     int paralevel = 0;
     Vect<int> rank(0,1);
     Vect<int> nr_processors(0,1);
 
-    Scan_LiebLin (whichDSF, AveragingState, defaultScanStatename, iKmin, iKmax, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    Scan_LiebLin (whichDSF, AveragingState, defaultScanStatename, iKmin, iKmax, Max_Secs,
+		  target_sumrule, refine, paralevel, rank, nr_processors);
 
     return;
   }
@@ -1491,10 +1046,9 @@ namespace ABACUS {
     else if (whichDSF == 'p') SeedScanState = Add_Particle_at_Center (AveragingState);
     else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
-    //cout << "In General_Scan: SeedScanState = " << SeedScanState << endl;
-
     // Now the scan itself
-    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState, defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState,
+		  defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
   }
 
@@ -1517,7 +1071,8 @@ namespace ABACUS {
     else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
     // Now the scan itself
-    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState, defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState,
+		  defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
   }
 
@@ -1540,12 +1095,12 @@ namespace ABACUS {
     else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
     // Now the scan itself
-    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState, defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+    General_Scan (whichDSF, iKmin, iKmax, AveragingState.chain.Nsites, 0.0, AveragingState, SeedScanState,
+		  defaultScanStatename, Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
   }
 
 
-  //void Scan_Heis (char whichDSF, DP Delta, DP N, int M, bool fixed_iK, int iKneeded,
   void Scan_Heis (char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax,
 		  int Max_Secs, DP target_sumrule, bool refine, int paralevel, Vect<int> rank, Vect<int> nr_processors)
   {
@@ -1582,7 +1137,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       // Now the scan itself
-      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "", Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "",
+		    Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
 
     }
 
@@ -1600,7 +1156,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       // Now the scan itself
-      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "", Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "",
+		    Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
     }
 
     else if (Delta > 1.0) {
@@ -1616,7 +1173,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       // Now the scan itself
-      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "", Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
+      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, SeedScanState, "",
+		    Max_Secs, target_sumrule, refine, paralevel, rank, nr_processors);
     }
 
     else ABACUSerror("Delta out of range in Heis_Structure_Factor");
@@ -1636,180 +1194,5 @@ namespace ABACUS {
     return;
   }
 
-  /*
-  // Simplified calls:
-
-  void Scan_Heis (char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, false, 0, Max_Secs, refine, 0, 1);
-    Scan_Heis (whichDSF, Delta, N, M, iKmin, iKmax, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-
-  void Scan_Heis (char whichDSF, DP Delta, int N, int M, int iKneeded, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, true, iKneeded, Max_Secs, refine, 0, 1);
-    Scan_Heis (whichDSF, Delta, N, M, iKneeded, iKneeded, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-
-  void Scan_Heis (char whichDSF, DP Delta, int N, int M, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, false, 0, Max_Secs, refine, 0, 1);
-    Scan_Heis (whichDSF, Delta, N, M, 0, N, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-  */
-  /*
-  void Scan_ODSLF (char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax,
-		   int Max_Secs, DP target_sumrule, bool refine, int rank, int nr_processors)
-  {
-    // This function scans the Hilbert space of the spinless fermions related to Heisenberg spin-1/2 chain
-    // for the function identified by whichDSF.
-
-    // whichDSF == 'Z':  canonical partition function
-    // whichDSF == 'm':  S^{-+}
-    // whichDSF == 'z':  S^{zz}
-    // whichDSF == 'p':  S^{+-}
-    // whichDSF == 'a':  < S^z_j S^_{j+1} S^z_l S^z_{l+1} > for RIXS
-
-    Heis_Chain BD1(1.0, Delta, 0.0, N);
-
-    Vect_INT Nrapidities_groundstate(0, BD1.Nstrings);
-
-    Nrapidities_groundstate[0] = M;
-
-    ODSLF_Base baseconfig_groundstate(BD1, Nrapidities_groundstate);
-
-    ODSLF_Ix2_Offsets baseoffsets(baseconfig_groundstate, 0ULL);
-
-    if ((Delta > 0.0) && (Delta < 1.0)) {
-
-      ODSLF_XXZ_Bethe_State GroundState(BD1, baseconfig_groundstate);
-      GroundState.Compute_All(true);
-
-      // The ground state is now fully defined.  Now the scan itself
-      //General_Scan (whichDSF, fixed_iK, iKneeded, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-      General_Scan (whichDSF, iKmin, iKmax, N, 0.0, GroundState, GroundState, Max_Secs, target_sumrule, refine, rank, nr_processors);
-
-    }
-  */
-    /*
-    else if (Delta == 1.0) {
-
-      XXX_Bethe_State GroundState(BD1, baseconfig_groundstate);
-      GroundState.Compute_All(true);
-
-      // The ground state is now fully defined.  Now the scan itself
-      //General_Scan (whichDSF, fixed_iK, iKneeded, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-      General_Scan (whichDSF, iKmin, iKmax, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-    }
-
-    else if (Delta > 1.0) {
-
-      XXZ_gpd_Bethe_State GroundState(BD1, baseconfig_groundstate);
-      GroundState.Compute_All(true);
-
-      // The ground state is now fully defined.  Now the scan itself
-      //General_Scan (whichDSF, fixed_iK, iKneeded, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-      General_Scan (whichDSF, iKmin, iKmax, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-    }
-    */
-  /*
-    else ABACUSerror("Delta out of range in ODSLF Structure Factor");
-
-    return;
-  }
-
-  // Simplified calls:
-
-  void Scan_ODSLF (char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, false, 0, Max_Secs, refine, 0, 1);
-    Scan_ODSLF (whichDSF, Delta, N, M, iKmin, iKmax, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-
-  void Scan_ODSLF (char whichDSF, DP Delta, int N, int M, int iKneeded, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, true, iKneeded, Max_Secs, refine, 0, 1);
-    Scan_ODSLF (whichDSF, Delta, N, M, iKneeded, iKneeded, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-
-  void Scan_ODSLF (char whichDSF, DP Delta, int N, int M, int Max_Secs, bool refine)
-  {
-    //Scan_Heis (whichDSF, Delta, N, M, false, 0, Max_Secs, refine, 0, 1);
-    Scan_ODSLF (whichDSF, Delta, N, M, 0, N, Max_Secs, 1.0e+6, refine, 0, 1);
-  }
-
-
-
-  // Geometric quenches
-
-  void Scan_LiebLin_Geometric_Quench (DP c_int, DP L_1, int type_id_1, long long int id_1, DP L_2, int N, int iK_UL,
-				   int Max_Secs, DP target_sumrule, bool refine)
-  {
-    // We decompose the wavefunction of state 1 (living on length L_1) into
-    // the wavefunctions living on length L_2.
-
-    // IMPORTANT ASSUMPTIONS:
-
-    LiebLin_Bethe_State lstate(c_int, L_1, N, iK_UL, type_id_1);
-    lstate.Set_to_id(id_1);
-    lstate.Compute_All(true);
-
-    // We now put the rapidities and norm into a state in length L_2,
-    // which will serve as basis for the scan.
-
-    LiebLin_Bethe_State lstate2(c_int, L_2, N, iK_UL, type_id_1);
-    lstate2.Set_to_id (0LL);
-    lstate2.Compute_All(true);
-
-    char whichDSF = 'q';
-
-    //General_Scan (whichDSF, false, 0, 100000000, lstate, lstate2, Max_Secs, refine, 0, 1);
-    General_Scan (whichDSF, -iK_UL, iK_UL, 100000000, 0.0, lstate, lstate2, Max_Secs, target_sumrule, refine, 0, 1);
-
-    return;
-  }
-
-
-  void Scan_Heis_Geometric_Quench (DP Delta, int N_1, int M, long long int base_id_1, long long int type_id_1, long long int id_1,
-				   int N_2, int iKmin, int iKmax, int Max_Secs, DP target_sumrule, bool refine)
-  {
-    // We decompose the wavefunction of state 1 (living on length L_1) into
-    // the wavefunctions living on length L_2.
-
-    Heis_Chain BD_1(1.0, Delta, 0.0, N_1);
-    Heis_Chain BD_2(1.0, Delta, 0.0, N_2);
-
-
-    if ((Delta > 0.0) && (Delta < 1.0)) {
-      ABACUSerror("Geometric quench not yet implemented for XXZ.");
-    }
-
-    else if (Delta == 1.0) {
-
-      XXX_Bethe_State BasicState_1(BD_1, base_id_1, type_id_1);
-      BasicState_1.Set_to_id (id_1);
-      BasicState_1.Compute_All(true);
-
-      // Ref state for scanning:
-      XXX_Bethe_State BasicState_2(BD_2, M);
-      BasicState_2.Set_to_id (0LL);
-      BasicState_2.Compute_All(true);
-
-      char whichDSF = 'q';
-
-      // The ground state is now fully defined.  Now the scan itself
-      //General_Scan (whichDSF, fixed_iK, iKneeded, N, GroundState, GroundState, Max_Secs, refine, rank, nr_processors);
-      General_Scan (whichDSF, iKmin, iKmax, N_2, 0.0, BasicState_1, BasicState_2, Max_Secs, target_sumrule, refine, 0, 1);
-    }
-
-    else if (Delta > 1.0) {
-      ABACUSerror("Geometric quench not yet implemented for XXZ_gpd.");
-    }
-
-    else ABACUSerror("Delta out of range in Heis_Structure_Factor");
-
-    return;
-  }
-  */
 
 } // namespace ABACUS
diff --git a/src/SCAN/General_Scan_Parallel.cc b/src/SCAN/General_Scan_Parallel.cc
index 62bad54..f71e539 100644
--- a/src/SCAN/General_Scan_Parallel.cc
+++ b/src/SCAN/General_Scan_Parallel.cc
@@ -52,7 +52,8 @@ namespace ABACUS {
 
     Vect<Scan_Thread_Data> thr_data_par(nr_processors_at_newlevel);
     for (int rank = 0; rank < nr_processors_at_newlevel; ++rank)
-      thr_data_par[rank] = Scan_Thread_Data (THRDIRS_stringstream[rank].str(), false); // put refine == false here to avoid loading any deprecated data
+      thr_data_par[rank] = Scan_Thread_Data (THRDIRS_stringstream[rank].str(), false);
+    // put refine == false here to avoid loading any deprecated data
 
     // Transfer all the existing threads into the new ones:
     int rankindex = 0;
@@ -83,59 +84,12 @@ namespace ABACUS {
     return;
   }
 
-  /*
-  void Create_Empty_Files (string prefix, char whichDSF, int nr_processors_at_newlevel)
-  {
-    // This function creates, for convenience, a set of 'empty' files, so a full set of files is available at all paralevels.
-    for (int rank = 0; rank < nr_processors_at_newlevel; ++rank) {
-      stringstream RAW_stringstream;    string RAW_string;
-      stringstream INADM_stringstream;    string INADM_string;
-      stringstream CONV0_stringstream;    string CONV0_string;
-      stringstream LOG_stringstream;    string LOG_string;
-      //stringstream THR_stringstream;    string THR_string;
-      stringstream SRC_stringstream;    string SRC_string;
-      stringstream FSR_stringstream;    string FSR_string;
-      stringstream SUM_stringstream;    string SUM_string;
-
-      RAW_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".raw";
-      INADM_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".inadm";
-      CONV0_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".conv0";
-      LOG_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".log";
-      //THR_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".thr";
-      SRC_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".src";
-      FSR_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".fsr";
-      SUM_stringstream << prefix << "_" << rank << "_" << nr_processors_at_newlevel << ".sum";
-
-      RAW_string = RAW_stringstream.str();    const char* RAW_Cstr = RAW_string.c_str();
-      INADM_string = INADM_stringstream.str();    const char* INADM_Cstr = INADM_string.c_str();
-      CONV0_string = CONV0_stringstream.str();    const char* CONV0_Cstr = CONV0_string.c_str();
-      LOG_string = LOG_stringstream.str();    const char* LOG_Cstr = LOG_string.c_str();
-      //THR_string = THR_stringstream.str();    const char* THR_Cstr = THR_string.c_str();
-      SRC_string = SRC_stringstream.str();    const char* SRC_Cstr = SRC_string.c_str();
-      FSR_string = FSR_stringstream.str();    const char* FSR_Cstr = FSR_string.c_str();
-      SUM_string = SUM_stringstream.str();    const char* SUM_Cstr = SUM_string.c_str();
-
-      // We open and close these files (except for SUM, which we fill with a zero-valued scan_info
-      fstream RAW_file; RAW_file.open(RAW_Cstr); RAW_file.close();
-      fstream INADM_file; INADM_file.open(INADM_Cstr); INADM_file.close();
-      fstream CONV0_file; CONV0_file.open(CONV0_Cstr); CONV0_file.close();
-      fstream LOG_file; LOG_file.open(LOG_Cstr); LOG_file.close();
-      Scan_Info emptyinfo; emptyinfo.Save(SRC_Cstr);
-      fstream FSR_file; FSR_file.open(FSR_Cstr); FSR_file.close();
-      fstream SUM_file; SUM_file.open(SUM_Cstr); SUM_file.close();
-
-    }
-  }
-  */
 
   void Merge_raw_Files (string prefix, char whichDSF, int nr_processors_at_newlevel)
   {
 
     // Open the original raw file:
     stringstream RAW_stringstream;    string RAW_string;
-    //RAW_stringstream << prefix;
-    //if (whichDSF == 'Z') RAW_stringstream << ".dat";
-    //else RAW_stringstream << ".raw";
     RAW_stringstream << prefix << ".raw";
     RAW_string = RAW_stringstream.str();    const char* RAW_Cstr = RAW_string.c_str();
 
@@ -153,23 +107,18 @@ namespace ABACUS {
       ifstream RAW_infile;
       RAW_infile.open(RAW_in_Cstr);
       if (RAW_infile.fail()) {
-	//cout << RAW_in_Cstr << endl;
-	//ABACUSerror ("Could not open file.");
 	continue; // if file isn't there, just continue...
       }
 
       DP omega;
       int iK;
       DP FF;
-      //int conv;
       DP dev;
       string label;
       int nr, nl;
       while (RAW_infile.peek() != EOF) {
-	//RAW_infile >> omega >> iK >> FF >> conv >> label;
 	RAW_infile >> omega >> iK >> FF >> dev >> label;
 	if (whichDSF == '1') RAW_infile >> nr >> nl;
-	//RAW_outfile << endl << omega << "\t" << iK << "\t" << FF << "\t" << conv << "\t" << label;
 	RAW_outfile << endl << omega << "\t" << iK << "\t" << FF << "\t" << dev << "\t" << label;
 	if (whichDSF == '1') RAW_outfile << "\t" << nr << "\t" << nl;
       }
@@ -244,7 +193,6 @@ namespace ABACUS {
     SUM_string = SUM_stringstream.str();    const char* SUM_Cstr = SUM_string.c_str();
 
     // Load the original info:
-    //ScanStateList.Load_Info (SUM_Cstr);  // Not needed anymore: rank 0 has loaded the original info
     if (file_exists(SUM_Cstr)) ScanStateList.Load_Info (SUM_Cstr);  // Needed again!
 
     // Load all other info:
@@ -470,12 +418,9 @@ namespace ABACUS {
   //****************************************************************************//
   // Model-specific functions:
 
-  //void Prepare_Parallel_Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iK_UL, bool fixed_iK, int iKneeded,
   void Prepare_Parallel_Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax, DP kBT,
-				   string defaultScanStatename,
-				   //int Max_Secs, bool refine, int rank,
-				   int paralevel, Vect<int> rank_lower_paralevels, Vect<int> nr_processors_lower_paralevels,
-				   int nr_processors_at_newlevel)
+				      string defaultScanStatename, int paralevel, Vect<int> rank_lower_paralevels,
+				      Vect<int> nr_processors_lower_paralevels, int nr_processors_at_newlevel)
   {
     // From an existing scan, this function splits the threads into
     // nr_processors_at_newlevel separate files, from which the parallel process
@@ -485,35 +430,21 @@ namespace ABACUS {
 
     // Define file name
     stringstream filenameprefix;
-    //Data_File_Name (filenameprefix, whichDSF, fixed_iK, iKneeded, GroundState, GroundState);
     Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, kBT, GroundState, GroundState, defaultScanStatename);
     for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
     string prefix = filenameprefix.str();
 
     Split_thr_Files (prefix, whichDSF, nr_processors_at_newlevel);
 
-    //Create_Empty_Files (prefix, whichDSF, nr_processors_at_newlevel);
-
     return;
   }
 
-  //void Wrapup_Parallel_Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iK_UL, bool fixed_iK, int iKneeded,
   void Wrapup_Parallel_Scan_LiebLin (char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax, DP kBT,
-				  string defaultScanStatename,
-				  //int Max_Secs, bool refine, int rank,
-				  int paralevel, Vect<int> rank_lower_paralevels, Vect<int> nr_processors_lower_paralevels,
-				  int nr_processors_at_newlevel)
+				     string defaultScanStatename, int paralevel, Vect<int> rank_lower_paralevels,
+				     Vect<int> nr_processors_lower_paralevels, int nr_processors_at_newlevel)
   {
-    //DP epsilon = log(L)/L;
-
-    //LiebLin_Bethe_State GroundState (c_int, L, N);
-    //LiebLin_Bethe_State spstate = Canonical_Saddle_Point_State (c_int, L, N, kBT, epsilon);
-    //LiebLin_Bethe_State spstate = Canonical_Saddle_Point_State (c_int, L, N, kBT);
-
     // Read the saddle-point state from the sps file:
     stringstream SPS_stringstream;  string SPS_string;
-    //SPS_stringstream << "Tgt0_";
-    //Data_File_Name (SPS_stringstream, whichDSF, iKmin, iKmax, kBT, spstate, SeedScanState, "");
     Data_File_Name (SPS_stringstream, whichDSF, c_int, L, N, iKmin, iKmax, kBT, 0.0, "");
     SPS_stringstream << ".sps";
     SPS_string = SPS_stringstream.str();    const char* SPS_Cstr = SPS_string.c_str();
@@ -546,14 +477,13 @@ namespace ABACUS {
     if (whichDSF == 'g') Nscan = N + 1;
 
     LiebLin_Bethe_State SeedScanState = spstate;
-    //if (whichDSF == 'o' || whichDSF == 'g') SeedScanState = Canonical_Saddle_Point_State (c_int, L, Nscan, kBT, epsilon);
     if (whichDSF == 'o' || whichDSF == 'g') SeedScanState = Canonical_Saddle_Point_State (c_int, L, Nscan, kBT);
 
     // Define file name
     stringstream filenameprefix;
-    //Data_File_Name (filenameprefix, whichDSF, fixed_iK, iKneeded, GroundState, GroundState);
     Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, kBT, spstate, SeedScanState, defaultScanStatename);
-    for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
+    for (int i = 0; i < paralevel - 1; ++i)
+      filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
     string prefix = filenameprefix.str();
 
 
@@ -570,19 +500,7 @@ namespace ABACUS {
     Merge_inadm_conv0_src_stat_log_Files (prefix, whichDSF, nr_processors_at_newlevel);
     // This also puts some digested info in log file.
 
-    // Evaluate f-sumrule:
-    /*
-    stringstream RAW_stringstream;    string RAW_string;
-    RAW_stringstream << prefix << ".raw";
-    RAW_string = RAW_stringstream.str();    const char* RAW_Cstr = RAW_string.c_str();
-
-    stringstream FSR_stringstream;    string FSR_string;
-    FSR_stringstream << prefix << ".fsr";
-    FSR_string = FSR_stringstream.str();    const char* FSR_Cstr = FSR_string.c_str();
-    */
     DP Chem_Pot = Chemical_Potential (spstate);
-    //if (!fixed_iK) if (whichDSF != 'q') Evaluate_F_Sumrule (whichDSF, GroundState, Chem_Pot, RAW_Cstr, FSR_Cstr);
-    //if (iKmin != iKmax) if (whichDSF != 'q') Evaluate_F_Sumrule (whichDSF, GroundState, Chem_Pot, iKmin, iKmax, RAW_Cstr, FSR_Cstr);
     if (iKmin != iKmax) if (whichDSF != 'q') Evaluate_F_Sumrule (prefix, whichDSF, spstate, Chem_Pot, iKmin, iKmax);
 
     // ... and we're done.
@@ -595,7 +513,6 @@ namespace ABACUS {
   // Heisenberg:
 
   void Prepare_Parallel_Scan_Heis (char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax,
-				   //int Max_Secs, bool refine, int rank,
 				   int paralevel, Vect<int> rank_lower_paralevels, Vect<int> nr_processors_lower_paralevels,
 				   int nr_processors_at_newlevel)
   {
@@ -611,8 +528,6 @@ namespace ABACUS {
 
     Heis_Base baseconfig_groundstate(BD1, Nrapidities_groundstate);
 
-    //Ix2_Offsets baseoffsets(baseconfig_groundstate, 0ULL);
-
     // Define file name
     stringstream filenameprefix;
 
@@ -639,7 +554,8 @@ namespace ABACUS {
 
     else ABACUSerror("Delta out of range in Prepare_Parallel_Scan_Heis");
 
-    for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
+    for (int i = 0; i < paralevel - 1; ++i)
+      filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
     string prefix = filenameprefix.str();
 
     Split_thr_Files (prefix, whichDSF, nr_processors_at_newlevel);
@@ -664,8 +580,6 @@ namespace ABACUS {
 
     Heis_Base baseconfig_groundstate(BD1, Nrapidities_groundstate);
 
-    //Ix2_Offsets baseoffsets(baseconfig_groundstate, 0ULL);
-
     // Define file name
     stringstream filenameprefix;
     string prefix;
@@ -681,7 +595,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, 0.0, GroundState, SeedScanState, "");
-      for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
+      for (int i = 0; i < paralevel - 1; ++i)
+	filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
       prefix = filenameprefix.str();
 
       // Merge sum files
@@ -715,7 +630,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, 0.0, GroundState, SeedScanState, "");
-      for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
+      for (int i = 0; i < paralevel - 1; ++i)
+	filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
       prefix = filenameprefix.str();
 
       // Merge sum files
@@ -748,7 +664,8 @@ namespace ABACUS {
       else ABACUSerror("Unknown whichDSF in Scan_Heis.");
 
       Data_File_Name (filenameprefix, whichDSF, iKmin, iKmax, 0.0, GroundState, SeedScanState, "");
-      for (int i = 0; i < paralevel - 1; ++i) filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
+      for (int i = 0; i < paralevel - 1; ++i)
+	filenameprefix << "_" << rank_lower_paralevels[i] << "_" << nr_processors_lower_paralevels[i];
       prefix = filenameprefix.str();
 
       // Merge sum files
@@ -773,7 +690,7 @@ namespace ABACUS {
     else ABACUSerror("Delta out of range in Prepare_Parallel_Scan_Heis");
 
 
-     // ... and we're done.
+    // ... and we're done.
 
     return;
   }
diff --git a/src/SCAN/Particle_Hole_Excitation_Cost.cc b/src/SCAN/Particle_Hole_Excitation_Cost.cc
index 38fe964..5c9b39f 100644
--- a/src/SCAN/Particle_Hole_Excitation_Cost.cc
+++ b/src/SCAN/Particle_Hole_Excitation_Cost.cc
@@ -24,10 +24,7 @@ namespace ABACUS {
     // Estimates the cost of adding a particle-hole excitation to an intermediate state
     DP ph_cost = 1.0;
 
-    //if (whichDSF == 'd') ph_cost = ABACUS::min(0.1, 1.0/sqrt(AveragingState.c_int));
-    //if (whichDSF == 'd') ph_cost = ABACUS::min(0.1, 1.0/AveragingState.c_int);
     if (whichDSF == 'd') ph_cost = ABACUS::min(0.01, 0.1/AveragingState.c_int);
-    //if (whichDSF == 'd') ph_cost = ABACUS::min(0.001, 0.01/AveragingState.c_int);
     else if (whichDSF == 'o') ph_cost = 0.01;
     else if (whichDSF == 'g') ph_cost = 0.01;
     else if (whichDSF == 'Z') ph_cost = 1.0;
diff --git a/src/SCAN/Scan_Info.cc b/src/SCAN/Scan_Info.cc
index 49fdaea..a554eb1 100644
--- a/src/SCAN/Scan_Info.cc
+++ b/src/SCAN/Scan_Info.cc
@@ -20,13 +20,9 @@ using namespace ABACUS;
 namespace ABACUS {
 
   Scan_Info::Scan_Info() :
-    //sumrule_obtained(0.0), Nfull(0LL), Ninadm(0LL), Ndata(0LL), Ndata_conv(0LL), Ndata_conv0(0LL), CPU_ticks(0LL), CPU_ticks_TOT(0LL)  {}
-    //sumrule_obtained(0.0), Nfull(0.0), Ninadm(0LL), Ndata(0LL), Ndata_conv(0LL), Ndata_conv0(0LL), CPU_ticks(0LL), CPU_ticks_TOT(0LL)  {}
     sumrule_obtained(0.0), Nfull(0.0), Ninadm(0LL), Ndata(0LL), Ndata_conv(0LL), Ndata_conv0(0LL), TT(0.0)  {}
 
-  //Scan_Info::Scan_Info (DP sr, long long int Nf, long long int Ni, long long int Nd, long long int Ndc, long long int Ndc0, long long int t) :
   Scan_Info::Scan_Info (DP sr, DP Nf, long long int Ni, long long int Nd, long long int Ndc, long long int Ndc0, double t) :
-    //sumrule_obtained(sr), Nfull(Nf), Ninadm(Ni), Ndata(Nd), Ndata_conv(Ndc), Ndata_conv0(Ndc0), CPU_ticks(t), CPU_ticks_TOT(t) {}
     sumrule_obtained(sr), Nfull(Nf), Ninadm(Ni), Ndata(Nd), Ndata_conv(Ndc), Ndata_conv0(Ndc0), TT(t) {}
 
   void Scan_Info::Save (const char* outfile_Cstr)
@@ -36,19 +32,17 @@ namespace ABACUS {
     outfile.open(outfile_Cstr);
     if (outfile.fail()) ABACUSerror("Could not open outfile... ");
 
-    //outfile.setf(ios::fixed);
-    //outfile.setf(ios::showpoint);
     outfile.precision(16);
 
     int TT_hr = int(TT/3600);
     int TT_min = int((TT - 3600.0*TT_hr)/60);
 
-    outfile << setw(25) << setprecision(16) << sumrule_obtained << setw(25) << Nfull << setw(16) << Ninadm << setw(16) << Ndata << setw(16) << Ndata_conv << setw(16) << Ndata_conv0
-      //<< "\t" << CPU_ticks/CLOCKS_PER_SEC << "\t" << CPU_ticks_TOT/CLOCKS_PER_SEC << endl;
-      //<< setw(16) << std::fixed << setprecision(3) << TT << endl;
-	    << "\t" << TT_hr << " h " << TT_min << " m " << std::fixed << setprecision(3) << TT - 3600*TT_hr - 60*TT_min << " s" << endl;
-    //outfile << "sumrule_obtained \t Nfull \t Ninadm \t Ndata \t Ndata_conv \t Ndata_conv0 \t T \t TT.";
-    outfile << setw(25) << "sumrule_obtained" << setw(25) << "Nfull" << setw(16) << "Ninadm" << setw(16) << "Ndata" << setw(16) << "Ndata_conv" << setw(16) << "Ndata_conv0" << setw(16) << "TT." << endl;
+    outfile << setw(25) << setprecision(16) << sumrule_obtained << setw(25) << Nfull
+	    << setw(16) << Ninadm << setw(16) << Ndata << setw(16) << Ndata_conv << setw(16) << Ndata_conv0
+	    << "\t" << TT_hr << " h " << TT_min << " m "
+	    << std::fixed << setprecision(3) << TT - 3600*TT_hr - 60*TT_min << " s" << endl;
+    outfile << setw(25) << "sumrule_obtained" << setw(25) << "Nfull" << setw(16) << "Ninadm"
+	    << setw(16) << "Ndata" << setw(16) << "Ndata_conv" << setw(16) << "Ndata_conv0" << setw(16) << "TT." << endl;
     outfile.close();
 
     return;
@@ -67,13 +61,10 @@ namespace ABACUS {
     DP TT_sec;
     char a;
 
-    //infile >> sumrule_obtained >> Nfull >> Ninadm >> Ndata >> Ndata_conv >> Ndata_conv0 >> CPU_ticks >> CPU_ticks_TOT;
-    //infile >> sumrule_obtained >> Nfull >> Ninadm >> Ndata >> Ndata_conv >> Ndata_conv0 >> TT;
-    infile >> sumrule_obtained >> Nfull >> Ninadm >> Ndata >> Ndata_conv >> Ndata_conv0 >> TT_hr >> a >> TT_min >> a >> TT_sec >> a;
+    infile >> sumrule_obtained >> Nfull >> Ninadm >> Ndata >> Ndata_conv >> Ndata_conv0
+	   >> TT_hr >> a >> TT_min >> a >> TT_sec >> a;
 
     TT = 3600.0 * TT_hr + 60.0* TT_min + TT_sec;
-    //CPU_ticks_TOT *= CLOCKS_PER_SEC; // correct for factor in Save function
-    //CPU_ticks = 0;  // reset CPU ticks.
 
     infile.close();
 
@@ -84,12 +75,12 @@ namespace ABACUS {
   {
     s.ios::unsetf(ios::scientific);
     return s << " sr " << setprecision(14) << info.sumrule_obtained
-	     << "\tNfull " << std::fixed << setprecision(0) << info.Nfull << "\t Ninadm " << info.Ninadm << " Ndata " << info.Ndata
+	     << "\tNfull " << std::fixed << setprecision(0) << info.Nfull
+	     << "\t Ninadm " << info.Ninadm << " Ndata " << info.Ndata
 	     << "\t_conv " << info.Ndata_conv << " _conv0 " << info.Ndata_conv0
-      //<< " t " << info.CPU_ticks/CLOCKS_PER_SEC << "s"
-      //<< " TT " << info.CPU_ticks_TOT/CLOCKS_PER_SEC;
-      //<< "\tTT " << std::fixed << setprecision(3) << info.TT;
-	     << "\tTT " << int(info.TT/3600) << " h " << int((info.TT - 3600.0 * int(info.TT/3600))/60) << " m " << std::fixed << setprecision(3) << info.TT - 3600.0 * int(info.TT/3600) - 60.0 * int((info.TT - 3600.0 * int(info.TT/3600))/60) << " s";
+	     << "\tTT " << int(info.TT/3600) << " h " << int((info.TT - 3600.0 * int(info.TT/3600))/60)
+	     << " m " << std::fixed << setprecision(3)
+	     << info.TT - 3600.0 * int(info.TT/3600) - 60.0 * int((info.TT - 3600.0 * int(info.TT/3600))/60) << " s";
   }
 
 
diff --git a/src/SCAN/Scan_Thread_Data.cc b/src/SCAN/Scan_Thread_Data.cc
index ca4d9c2..f75fbcb 100644
--- a/src/SCAN/Scan_Thread_Data.cc
+++ b/src/SCAN/Scan_Thread_Data.cc
@@ -57,15 +57,13 @@ namespace ABACUS {
     }
 
     filename = Vect<string> (nlists);
-    //file = Vect<fstream*> (nlists);
-    //file_is_open = Vect<bool> (nlists);
 
     for (int il = 0; il < nlists; ++il) {
       stringstream filename_strstream;
       filename_strstream << thrdir_name << "/" << il << ".thr";
       filename[il] = filename_strstream.str();
-      if (!refine) remove(filename[il].c_str()); // the file is deleted to make sure we don't interfere with a previous (failed) computation
-      //file_is_open[il] = false;
+      if (!refine) remove(filename[il].c_str());
+      // the file is deleted to make sure we don't interfere with a previous (failed) computation
     }
     if (!refine) {
       // remove the nthreads.dat file
@@ -79,11 +77,6 @@ namespace ABACUS {
 
   Scan_Thread_Data::~Scan_Thread_Data()
   {
-    //for (int il = 0; il < nlists; ++il)
-    //if (file_is_open[il]) {
-    //	(*file[il]).close();
-    //	delete file[il];
-    //}
   }
 
   bool Scan_Thread_Data::Increase_Memory_Size (int il, int nr_to_add)
@@ -118,21 +111,11 @@ namespace ABACUS {
 
   void Scan_Thread_Data::Include_Thread (int il, string label_ref, int type_ref)
   {
-    //cout << "Calling Include_Threads..." << endl;
-
-    if (il < 0 || il > nlists - 1) ABACUSerror("il out of range in Scan_Thread_Data::Include_Thread.");
-
-    //cout << "\t\tIncluding thread " << label_ref << "\t" << type_ref << " in list with il = " << il << endl;
+    if (il < 0 || il > nlists - 1)
+      ABACUSerror("il out of range in Scan_Thread_Data::Include_Thread.");
 
     if (il < lowest_il_with_nthreads_neq_0) lowest_il_with_nthreads_neq_0 = il;
 
-    // append to file
-    //if (!file_is_open[il]) {
-    //file[il] = new fstream(filename[il].c_str(), fstream::out);
-    //file_is_open[il] = true;
-    //}
-    //*file[il] << label_ref << "\t" << type_ref << endl;
-
     // Keep in memory for now:
     if (nthreads_in_memory[il] > dim[il] - 10) {
       (*this).Increase_Memory_Size (il, dim[il]);
@@ -158,23 +141,13 @@ namespace ABACUS {
       label[il] = Vect<string> (dim[il]);
       type[il] = Vect<int> (dim[il]);
     }
-
-    //cout << "\t\tDone including thread." << endl;
-    //char a;
-    //cin >> a;
-    //cout << "OK for Include_Threads..." << endl;
-
   }
 
 
   Vect<Scan_Thread> Scan_Thread_Data::Extract_Next_Scan_Threads ()
   {
-    //cout << "Calling Extract_Next_Scan_Threads..." << endl;
-
     // Returns a vector of threads which are next in line for scanning.
 
-    //cout << "Here 1" << endl;
-
     int il_used = lowest_il_with_nthreads_neq_0;
     Vect<Scan_Thread> next_in_line(nthreads_total[il_used]);
 
@@ -206,25 +179,6 @@ namespace ABACUS {
     type[il_used] = Vect<int> (dim[il_used]);
     remove(filename[il_used].c_str());
 
-    /* Moved to Include_Thread
-    // We save the higher-index in-memory threads to files if they are big enough:
-    for (int il = il_used + 1; il < nlists; ++il)
-      //if (nthreads_in_memory[il] > 0) {
-      if (nthreads_in_memory[il] > 1000) {
-	fstream outfile;
-	outfile.open(filename[il].c_str(), fstream::out | fstream::app);
-	for (int it = 0; it < nthreads_in_memory[il]; ++it)
-	  outfile << label[il][it] << "\t" << type[il][it] << endl;
-	outfile.close();
-	nthreads_on_disk[il] += nthreads_in_memory[il];
-
-	// We then reset these memory buffers
-	dim[il] = 100;
-	nthreads_in_memory[il] = 0;
-	label[il] = Vect<string> (dim[il]);
-	type[il] = Vect<int> (dim[il]);
-      }
-    */
     // Find the next non-empty list:
     do {
       lowest_il_with_nthreads_neq_0 += 1;
@@ -234,11 +188,6 @@ namespace ABACUS {
       }
     } while (nthreads_total[lowest_il_with_nthreads_neq_0] == 0);
 
-    //cout << "Set lowest_il_with_nthreads_neq_0 to " << lowest_il_with_nthreads_neq_0 << endl;
-    //cin >> a;
-
-    //cout << "OK for Extract_Next_Scan_Threads." << endl;
-
     return(next_in_line);
   }
 
@@ -258,7 +207,8 @@ namespace ABACUS {
 
   void Scan_Thread_Data::Flush_to_Disk (int il)
   {
-    if (il < 0 || il > nlists - 1) ABACUSerror("il out of range in Scan_Thread_Data::Flush_to_Disk.");
+    if (il < 0 || il > nlists - 1)
+      ABACUSerror("il out of range in Scan_Thread_Data::Flush_to_Disk.");
 
     if (nthreads_in_memory[il] > 0) {
       fstream outfile;
@@ -288,7 +238,8 @@ namespace ABACUS {
     string nthreads_outfile_str = nthreads_outfile_strstream.str();
 
     nthreads_outfile.open(nthreads_outfile_str.c_str());
-    if (nthreads_outfile.fail()) ABACUSerror("Could not open outfile in Scan_Thread_Data::Save... ");
+    if (nthreads_outfile.fail())
+      ABACUSerror("Could not open outfile in Scan_Thread_Data::Save... ");
 
     //cout << "Saving threads: nthreads_tot vector is" << endl;
     for (int il = 0; il < nlists; ++il) {
diff --git a/src/TBA/Root_Density.cc b/src/TBA/Root_Density.cc
index 7aa1625..68c424c 100644
--- a/src/TBA/Root_Density.cc
+++ b/src/TBA/Root_Density.cc
@@ -19,439 +19,347 @@ using namespace ABACUS;
 
 namespace ABACUS {
 
-/************************************/
-/*
-struct Root_Density {
 
-  int Npts;  // how many points are used to describe each function
-  DP lambdamax;  // what the largest rapidity is
-  Vect_DP lambda;  // rapidity vector
-  Vect_DP dlambda; // differential element
-  Vect_DP value;  // the root density itself
-  Vect_DP prev_value;  // results of previous iteration
-  DP diff;   // relative differences with previous iteration
-  bool value_infty_set;  // boolean, true if asymptotic value set
-  DP value_infty;  // asymptotic value, computed analytically
-
-  Root_Density ();
-  Root_Density (int Npts_ref, DP lambdamax_ref);
-
-  Root_Density& operator= (const Root_Density& RefDensity);
-
-  DP Return_Value (DP lambda_ref);  // evaluates the function for any argument using linear interpolation
-  DP Set_Asymptotics (DP value_infty_ref);  // sets value for lambda >= lambdamax
-
-  Root_Density Compress_and_Match_Densities (DP comp_factor);  // returns a Root_Density with fewer points
-};
-*/
-
-Root_Density::Root_Density ()
-  : Npts(1), lambdamax(0.0), lambda(Vect_DP(0.0, Npts)), dlambda(Vect_DP(0.0, Npts)), value(Vect_DP(0.0, Npts)),
-    prev_value(Vect_DP(0.0, Npts)), diff(1.0e+6), value_infty_set(false), value_infty(0.0)
-{
-}
-
-Root_Density::Root_Density (int Npts_ref, DP lambdamax_ref)
-  : Npts(Npts_ref), lambdamax(lambdamax_ref), lambda(Vect_DP(Npts)), dlambda(Vect_DP(0.0, Npts)), value(Vect_DP(0.0, Npts)),
-    prev_value(Vect_DP(0.0, Npts)), diff(1.0e+6), value_infty_set(false), value_infty(0.0)
-{
-  for (int i = 0; i < value.size(); ++i) {
-    lambda[i] = lambdamax * (-(Npts - 1.0) + 2*i)/Npts;
-    dlambda[i] = 2.0 * lambdamax/Npts;
-  }
-}
-
-Root_Density& Root_Density::operator= (const Root_Density& RefDensity)
-{
-  if (this != &RefDensity) {
-    Npts = RefDensity.Npts;
-    lambdamax = RefDensity.lambdamax;
-    lambda = RefDensity.lambda;
-    dlambda = RefDensity.dlambda;
-    value = RefDensity.value;
-    prev_value = RefDensity.prev_value;
-    diff = RefDensity.diff;
-    value_infty_set = RefDensity.value_infty_set;
-    value_infty = RefDensity.value_infty;
-
-  }
-  return(*this);
-}
-
-DP Root_Density::Return_Value (DP lambda_ref)
-{
-  // This function returns a value for epsilon at any real lambda
-  // using simple linear interpolation.
-  // Degree 3 polynomical also programmed in, but commented out:  no improvement.
-
-  DP answer = 0.0;
-  if (fabs(lambda_ref) >= fabs(lambda[0])) {
-    if (value_infty_set) answer = value_infty;
-    else ABACUSerror("Need to set asymptotics of Root_Density !");
+  Root_Density::Root_Density ()
+    : Npts(1), lambdamax(0.0), lambda(Vect_DP(0.0, Npts)), dlambda(Vect_DP(0.0, Npts)), value(Vect_DP(0.0, Npts)),
+      prev_value(Vect_DP(0.0, Npts)), diff(1.0e+6), value_infty_set(false), value_infty(0.0)
+  {
   }
 
-  else { // try to find the i such that lambda[i] <= lambda_ref < lambda[i+1]
+  Root_Density::Root_Density (int Npts_ref, DP lambdamax_ref)
+    : Npts(Npts_ref), lambdamax(lambdamax_ref), lambda(Vect_DP(Npts)), dlambda(Vect_DP(0.0, Npts)), value(Vect_DP(0.0, Npts)),
+      prev_value(Vect_DP(0.0, Npts)), diff(1.0e+6), value_infty_set(false), value_infty(0.0)
+  {
+    for (int i = 0; i < value.size(); ++i) {
+      lambda[i] = lambdamax * (-(Npts - 1.0) + 2*i)/Npts;
+      dlambda[i] = 2.0 * lambdamax/Npts;
+    }
+  }
 
-    int index = (Npts - 1)/2;
-    int indexstep = (Npts - 1)/4 + 1;
+  Root_Density& Root_Density::operator= (const Root_Density& RefDensity)
+  {
+    if (this != &RefDensity) {
+      Npts = RefDensity.Npts;
+      lambdamax = RefDensity.lambdamax;
+      lambda = RefDensity.lambda;
+      dlambda = RefDensity.dlambda;
+      value = RefDensity.value;
+      prev_value = RefDensity.prev_value;
+      diff = RefDensity.diff;
+      value_infty_set = RefDensity.value_infty_set;
+      value_infty = RefDensity.value_infty;
 
-    while (indexstep >= 1) {
+    }
+    return(*this);
+  }
 
-      if ( // if is "lower": we go up
-	  lambda_ref >= lambda[index + 1]) {
-	index += indexstep;
-      }
+  DP Root_Density::Return_Value (DP lambda_ref)
+  {
+    // This function returns a value for epsilon at any real lambda
+    // using simple linear interpolation.
+    // Degree 3 polynomical also programmed in, but commented out:  no improvement.
 
-      else if ( // if is "higher" or equal:  we go down
-	       lambda[index] > lambda_ref) {
-	index -= indexstep;
-      }
-
-      index = ABACUS::max(0, index);
-      index = ABACUS::min(Npts - 2, index);
-
-      if (indexstep == 1) indexstep--;
-      else indexstep = (indexstep + 1)/2;
-
-    } // while ...
-
-    if (index < 0 || index >= Npts || lambda[index] > lambda_ref || lambda[index + 1] < lambda_ref) {
-      cout << "Seeking index: " << index << "\t" << lambda[index] << "\t <=? " << lambda_ref << "\t<? " << lambda[index + 1] << endl;
-      ABACUSerror("Calculating index wrong in Root_Density::Evaluate.");
+    DP answer = 0.0;
+    if (fabs(lambda_ref) >= fabs(lambda[0])) {
+      if (value_infty_set) answer = value_infty;
+      else ABACUSerror("Need to set asymptotics of Root_Density !");
     }
 
-    //if (index < 1 || index > Npts - 3)
-    answer = ((value[index] * (lambda[index+1] - lambda_ref)
-	       + value[index + 1] * (lambda_ref - lambda[index]))/(lambda[index+1] - lambda[index]));
-    /*
-    else {
-      // Better:  if possible, fit to polynomial going through 4 closest points
-      Vect_DP xa (4);
-      Vect_DP ya (4);
-      DP dy;
-      xa[0] = lambda[index - 1];  xa[1] = lambda[index];  xa[2] = lambda[index + 1];  xa[3] = lambda[index + 2];
-      ya[0] = value[index - 1];  ya[1] = value[index];  ya[2] = value[index + 1];  ya[3] = value[index + 2];
-      polint (xa, ya, lambda_ref, answer, dy);  // sets answer to value at lambda_ref
+    else { // try to find the i such that lambda[i] <= lambda_ref < lambda[i+1]
+
+      int index = (Npts - 1)/2;
+      int indexstep = (Npts - 1)/4 + 1;
+
+      while (indexstep >= 1) {
+
+	if ( // if is "lower": we go up
+	    lambda_ref >= lambda[index + 1]) {
+	  index += indexstep;
+	}
+
+	else if ( // if is "higher" or equal:  we go down
+		 lambda[index] > lambda_ref) {
+	  index -= indexstep;
+	}
+
+	index = ABACUS::max(0, index);
+	index = ABACUS::min(Npts - 2, index);
+
+	if (indexstep == 1) indexstep--;
+	else indexstep = (indexstep + 1)/2;
+
+      } // while ...
+
+      if (index < 0 || index >= Npts || lambda[index] > lambda_ref || lambda[index + 1] < lambda_ref) {
+	cout << "Seeking index: " << index << "\t" << lambda[index] << "\t <=? " << lambda_ref
+	     << "\t<? " << lambda[index + 1] << endl;
+	ABACUSerror("Calculating index wrong in Root_Density::Evaluate.");
+      }
+
+      answer = ((value[index] * (lambda[index+1] - lambda_ref)
+		 + value[index + 1] * (lambda_ref - lambda[index]))/(lambda[index+1] - lambda[index]));
     }
-    */
+
+    return(answer);
   }
 
-  return(answer);
-}
-
-void Root_Density::Set_Asymptotics (DP value_infty_ref)
-{
-  value_infty = value_infty_ref;
-  value_infty_set = true;
-}
-
-Root_Density Root_Density::Compress_and_Match_Densities (DP comp_factor)
-{
-  // Returns a 'compressed' version of the density, using 1/comp_factor as many points.
-
-  // PROBLEM:  this implementation can lead to numerical instabilities.
-
-  //Root_Density compressed_density(Npts/comp_factor, lambdamax);
-
-  // Rather:  use this implementation:
-  int Npts_used = int(2.0 * lambdamax/(dlambda[0] * comp_factor));
-
-  Root_Density compressed_density(Npts_used, lambdamax);
-
-  compressed_density.Set_Asymptotics (value_infty);
-
-  for (int i = 0; i < compressed_density.Npts; ++i)
-    compressed_density.value[i] = (*this).Return_Value (compressed_density.lambda[i]);
-
-  return(compressed_density);
-}
-
-void Root_Density::Save (const char* outfile_Cstr)
-{
-  ofstream outfile;
-  outfile.open(outfile_Cstr);
-  outfile.precision(16);
-
-  for (int i = 0; i < Npts; ++i) {
-    if (i > 0) outfile << endl;
-    outfile << setw(20) << lambda[i] << "\t" << setw(20) << value[i];
+  void Root_Density::Set_Asymptotics (DP value_infty_ref)
+  {
+    value_infty = value_infty_ref;
+    value_infty_set = true;
   }
 
-  outfile.close();
-}
+  Root_Density Root_Density::Compress_and_Match_Densities (DP comp_factor)
+  {
+    // Returns a 'compressed' version of the density, using 1/comp_factor as many points.
 
+    int Npts_used = int(2.0 * lambdamax/(dlambda[0] * comp_factor));
 
-/************************************/
-/*
-struct Root_Density_Set {
+    Root_Density compressed_density(Npts_used, lambdamax);
 
-  int ntypes;
-  Vect<Root_Density> epsilon;
-  int Npts_total;  // sum of all Npts of epsilon's
-  DP diff;  // sum of diff's of the epsilon's
+    compressed_density.Set_Asymptotics (value_infty);
 
-  Root_Density_Set ();
-  Root_Density_Set (int ntypes_ref, int Npts_ref, DP lambdamax_ref);
-  Root_Density_Set (int ntypes_ref, Vect_INT Npts_ref, Vect_DP lambdamax_ref);
+    for (int i = 0; i < compressed_density.Npts; ++i)
+      compressed_density.value[i] = (*this).Return_Value (compressed_density.lambda[i]);
 
-  Root_Density_Set& operator= (const Root_Density_Set& RefSet);
-
-  void Insert_new_function (DP asymptotic_value);
-  void Extend_limits (Vect<bool> need_to_extend_limit);
-  void Insert_new_points (Vect<Vect<bool> > need_new_point_around);
-
-  DP Return_Value (int n_ref, DP lambda_ref);  // returns a value, no matter what.
-
-  Root_Density_Set Return_Compressed_and_Matched_Set (DP comp_factor);
-  void Match_Densities (Root_Density_Set& RefSet);
-
-  void Save (const char* outfile_Cstr);
-};
-*/
-Root_Density_Set::Root_Density_Set () : ntypes(1), epsilon(Vect<Root_Density> (ntypes)), Npts_total(0), diff(1.0e+6)
-{
-}
-
-Root_Density_Set::Root_Density_Set (int ntypes_ref, int Npts_ref, DP lambdamax_ref)
-  : ntypes(ntypes_ref), epsilon(Vect<Root_Density> (ntypes_ref)), Npts_total(ntypes_ref * Npts_ref), diff(1.0e+6)
-{
-  for (int n = 0; n < ntypes; ++n) epsilon[n] = Root_Density(Npts_ref, lambdamax_ref);
-}
-
-Root_Density_Set::Root_Density_Set (int ntypes_ref, Vect_INT Npts_ref, Vect_DP lambdamax_ref)
-  : ntypes(ntypes_ref), epsilon(Vect<Root_Density> (ntypes_ref)), Npts_total(Npts_ref.sum()), diff(1.0e+6)
-{
-  if (Npts_ref.size() != ntypes_ref || lambdamax_ref.size() != ntypes_ref) ABACUSerror("Wrong vector sizes in Root_Density_Set.");
-  for (int n = 0; n < ntypes; ++n) epsilon[n] = Root_Density(Npts_ref[n], lambdamax_ref[n]);
-}
-
-Root_Density_Set& Root_Density_Set::operator= (const Root_Density_Set& RefSet)
-{
-  if (this != &RefSet) {
-    ntypes = RefSet.ntypes;
-    epsilon = RefSet.epsilon;
-    Npts_total = RefSet.Npts_total;
-    diff = RefSet.diff;
+    return(compressed_density);
   }
-  return(*this);
-}
 
-void Root_Density_Set::Insert_new_function (DP asymptotic_value)
-{
-  // This function extends a set by adding one epsilon_n function on top
+  void Root_Density::Save (const char* outfile_Cstr)
+  {
+    ofstream outfile;
+    outfile.open(outfile_Cstr);
+    outfile.precision(16);
 
-  Root_Density_Set Updated_Set (ntypes + 1, 10, 10.0);  // last two parameters are meaningless
-  for (int n = 0; n < ntypes; ++n) Updated_Set.epsilon[n] = epsilon[n];
+    for (int i = 0; i < Npts; ++i) {
+      if (i > 0) outfile << endl;
+      outfile << setw(20) << lambda[i] << "\t" << setw(20) << value[i];
+    }
 
-  //Updated_Set.epsilon[ntypes] = Root_Density (epsilon[ntypes - 1].Npts, epsilon[ntypes - 1].lambdamax);
-  Updated_Set.epsilon[ntypes] = Root_Density (50, epsilon[ntypes - 1].lambdamax);
-  Updated_Set.epsilon[ntypes].Set_Asymptotics (asymptotic_value);
+    outfile.close();
+  }
 
-  for (int i = 0; i < Updated_Set.epsilon[ntypes].Npts; ++i)
-    Updated_Set.epsilon[ntypes].value[i] = Updated_Set.epsilon[ntypes].value_infty;
 
-  ntypes = Updated_Set.ntypes;
-  epsilon = Updated_Set.epsilon;
-  Npts_total+= Updated_Set.epsilon[ntypes - 1].Npts;
-}
+  Root_Density_Set::Root_Density_Set () : ntypes(1), epsilon(Vect<Root_Density> (ntypes)), Npts_total(0), diff(1.0e+6)
+  {
+  }
 
-void Root_Density_Set::Extend_limits (Vect<bool> need_to_extend_limit)
-{
-  // Extend the limits of integration at each level, according to boolean
+  Root_Density_Set::Root_Density_Set (int ntypes_ref, int Npts_ref, DP lambdamax_ref)
+    : ntypes(ntypes_ref), epsilon(Vect<Root_Density> (ntypes_ref)), Npts_total(ntypes_ref * Npts_ref), diff(1.0e+6)
+  {
+    for (int n = 0; n < ntypes; ++n) epsilon[n] = Root_Density(Npts_ref, lambdamax_ref);
+  }
 
-  // The function extends the limits by 10% on both sides, putting the
-  // extra values to value_infty.
+  Root_Density_Set::Root_Density_Set (int ntypes_ref, Vect_INT Npts_ref, Vect_DP lambdamax_ref)
+    : ntypes(ntypes_ref), epsilon(Vect<Root_Density> (ntypes_ref)), Npts_total(Npts_ref.sum()), diff(1.0e+6)
+  {
+    if (Npts_ref.size() != ntypes_ref || lambdamax_ref.size() != ntypes_ref)
+      ABACUSerror("Wrong vector sizes in Root_Density_Set.");
+    for (int n = 0; n < ntypes; ++n) epsilon[n] = Root_Density(Npts_ref[n], lambdamax_ref[n]);
+  }
 
-  if (need_to_extend_limit.size() != epsilon.size()) ABACUSerror("Wrong size need_to_extend_limit boolean in Extend_limits.");
+  Root_Density_Set& Root_Density_Set::operator= (const Root_Density_Set& RefSet)
+  {
+    if (this != &RefSet) {
+      ntypes = RefSet.ntypes;
+      epsilon = RefSet.epsilon;
+      Npts_total = RefSet.Npts_total;
+      diff = RefSet.diff;
+    }
+    return(*this);
+  }
 
-  Vect_INT nr_new_points_needed(0, ntypes);
-  int total_nr_new_points_added = 0;
-  DP dlambda_used = 0.0;
-  for (int n = 0; n < ntypes; ++n) {
-    if (need_to_extend_limit[n]) {
+  void Root_Density_Set::Insert_new_function (DP asymptotic_value)
+  {
+    // This function extends a set by adding one epsilon_n function on top
 
+    Root_Density_Set Updated_Set (ntypes + 1, 10, 10.0);  // last two parameters are meaningless
+    for (int n = 0; n < ntypes; ++n) Updated_Set.epsilon[n] = epsilon[n];
+
+    Updated_Set.epsilon[ntypes] = Root_Density (50, epsilon[ntypes - 1].lambdamax);
+    Updated_Set.epsilon[ntypes].Set_Asymptotics (asymptotic_value);
+
+    for (int i = 0; i < Updated_Set.epsilon[ntypes].Npts; ++i)
+      Updated_Set.epsilon[ntypes].value[i] = Updated_Set.epsilon[ntypes].value_infty;
+
+    ntypes = Updated_Set.ntypes;
+    epsilon = Updated_Set.epsilon;
+    Npts_total+= Updated_Set.epsilon[ntypes - 1].Npts;
+  }
+
+  void Root_Density_Set::Extend_limits (Vect<bool> need_to_extend_limit)
+  {
+    // Extend the limits of integration at each level, according to boolean
+
+    // The function extends the limits by 10% on both sides, putting the
+    // extra values to value_infty.
+
+    if (need_to_extend_limit.size() != epsilon.size())
+      ABACUSerror("Wrong size need_to_extend_limit boolean in Extend_limits.");
+
+    Vect_INT nr_new_points_needed(0, ntypes);
+    int total_nr_new_points_added = 0;
+    DP dlambda_used = 0.0;
+    for (int n = 0; n < ntypes; ++n) {
+      if (need_to_extend_limit[n]) {
+
+	Root_Density epsilon_n_before_update = epsilon[n];
+
+	// Determine the dlambda to be used:
+	dlambda_used = epsilon[n].dlambda[0];
+
+	// How many new points do we add ?  Say 5\% on each side:
+	nr_new_points_needed[n] = ABACUS::max(1, epsilon[n].Npts/20);
+
+	epsilon[n] = Root_Density(epsilon_n_before_update.Npts + 2* nr_new_points_needed[n],
+				  epsilon_n_before_update.lambdamax + nr_new_points_needed[n] * dlambda_used);
+	epsilon[n].Set_Asymptotics(epsilon_n_before_update.value_infty);
+
+	for (int i = 0; i < nr_new_points_needed[n]; ++i) {
+	  epsilon[n].lambda[i] = epsilon_n_before_update.lambda[0] - (nr_new_points_needed[n] - i) * dlambda_used;
+	  epsilon[n].dlambda[i] = dlambda_used;
+	  epsilon[n].value[i] = epsilon_n_before_update.value_infty;
+	}
+
+	for (int i = 0; i < epsilon_n_before_update.Npts; ++i) {
+	  epsilon[n].lambda[i + nr_new_points_needed[n] ] = epsilon_n_before_update.lambda[i];
+	  epsilon[n].dlambda[i + nr_new_points_needed[n] ] = epsilon_n_before_update.dlambda[i];
+	  epsilon[n].value[i + nr_new_points_needed[n] ] = epsilon_n_before_update.value[i];
+	}
+
+	for (int i = 0; i < nr_new_points_needed[n]; ++i) {
+	  epsilon[n].lambda[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ]
+	    = epsilon_n_before_update.lambda[epsilon_n_before_update.Npts - 1] + (i+1.0) * dlambda_used;
+	  epsilon[n].dlambda[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ] = dlambda_used;
+	  epsilon[n].value[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ] = epsilon_n_before_update.value_infty;
+	}
+
+	total_nr_new_points_added += 2 * nr_new_points_needed[n];
+
+      } // if (need
+    } // for n
+
+    Npts_total += total_nr_new_points_added;
+
+    // Done !
+
+    return;
+
+  }
+
+  void Root_Density_Set::Insert_new_points (Vect<Vect<bool> > need_new_point_around)
+  {
+    // need_new_point_around specifies whether a new point needs to be inserted around existing points.
+
+    // Count the number of new points needed per type:
+    Vect_INT nr_new_points_needed(0, ntypes);
+    int total_nr_new_points_needed = 0;
+    for (int n = 0; n < ntypes; ++n) {
+      if (need_new_point_around[n].size() != epsilon[n].Npts)
+	ABACUSerror("Wrong size need_new_point_around boolean in Insert_new_points.");
+      for (int i = 0; i < epsilon[n].Npts; ++i)
+	if (need_new_point_around[n][i]) nr_new_points_needed[n]++;
+      total_nr_new_points_needed += nr_new_points_needed[n];
+    }
+
+    // Working version using non-equispaced points
+    // Now update all data via interpolation:
+    for (int n = 0; n < ntypes; ++n) {
       Root_Density epsilon_n_before_update = epsilon[n];
-
-      // Determine the dlambda to be used:
-      dlambda_used = epsilon[n].dlambda[0];
-
-      // How many new points do we add ?  Say 5\% on each side:
-      nr_new_points_needed[n] = ABACUS::max(1, epsilon[n].Npts/20);
-
-      epsilon[n] = Root_Density(epsilon_n_before_update.Npts + 2* nr_new_points_needed[n], epsilon_n_before_update.lambdamax + nr_new_points_needed[n] * dlambda_used);
+      epsilon[n] = Root_Density(epsilon_n_before_update.Npts + nr_new_points_needed[n], epsilon_n_before_update.lambdamax);
       epsilon[n].Set_Asymptotics(epsilon_n_before_update.value_infty);
-
-      for (int i = 0; i < nr_new_points_needed[n]; ++i) {
-	epsilon[n].lambda[i] = epsilon_n_before_update.lambda[0] - (nr_new_points_needed[n] - i) * dlambda_used;
-	epsilon[n].dlambda[i] = dlambda_used;
-	epsilon[n].value[i] = epsilon_n_before_update.value_infty;
-      }
-
+      int nr_pts_added_n = 0;
       for (int i = 0; i < epsilon_n_before_update.Npts; ++i) {
-	epsilon[n].lambda[i + nr_new_points_needed[n] ] = epsilon_n_before_update.lambda[i];
-	epsilon[n].dlambda[i + nr_new_points_needed[n] ] = epsilon_n_before_update.dlambda[i];
-	epsilon[n].value[i + nr_new_points_needed[n] ] = epsilon_n_before_update.value[i];
+	if (!need_new_point_around[n][i]) {
+	  epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i];
+	  epsilon[n].dlambda[i + nr_pts_added_n] = epsilon_n_before_update.dlambda[i];
+	  epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.value[i];
+	}
+	else if (need_new_point_around[n][i]) {
+	  epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i] - 0.25 * epsilon_n_before_update.dlambda[i];
+	  epsilon[n].dlambda[i + nr_pts_added_n] = 0.5 * epsilon_n_before_update.dlambda[i];
+	  epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.Return_Value(epsilon[n].lambda[i + nr_pts_added_n]);
+	  nr_pts_added_n++;
+	  epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i] + 0.25 * epsilon_n_before_update.dlambda[i];
+	  epsilon[n].dlambda[i + nr_pts_added_n] = 0.5 * epsilon_n_before_update.dlambda[i];
+	  epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.Return_Value(epsilon[n].lambda[i + nr_pts_added_n]);
+	}
+      }
+      if (nr_pts_added_n != nr_new_points_needed[n]) {
+	cout << nr_pts_added_n << "\t" << nr_new_points_needed[n] << endl;
+	ABACUSerror("Wrong counting of new points in Insert_new_points.");
       }
 
-      for (int i = 0; i < nr_new_points_needed[n]; ++i) {
-	epsilon[n].lambda[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ]
-	  = epsilon_n_before_update.lambda[epsilon_n_before_update.Npts - 1] + (i+1.0) * dlambda_used;
-	epsilon[n].dlambda[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ] = dlambda_used;
-	epsilon[n].value[i + epsilon_n_before_update.Npts + nr_new_points_needed[n] ] = epsilon_n_before_update.value_infty;
-      }
+    } // for n
 
-      total_nr_new_points_added += 2 * nr_new_points_needed[n];
+    Npts_total += total_nr_new_points_needed;
 
-      //cout << "Extending limits at level " << n << " with " << nr_new_points_needed[n] << " points on each side to " << epsilon[n].lambdamax << endl;
+    // Done !
 
-    } // if (need
-  } // for n
-
-  Npts_total += total_nr_new_points_added;
-
-  // Done !
-
-  return;
-
-}
-
-void Root_Density_Set::Insert_new_points (Vect<Vect<bool> > need_new_point_around)
-{
-  // need_new_point_around specifies whether a new point needs to be inserted around existing points.
-
-  // Count the number of new points needed per type:
-  Vect_INT nr_new_points_needed(0, ntypes);
-  int total_nr_new_points_needed = 0;
-  for (int n = 0; n < ntypes; ++n) {
-    if (need_new_point_around[n].size() != epsilon[n].Npts) ABACUSerror("Wrong size need_new_point_around boolean in Insert_new_points.");
-    for (int i = 0; i < epsilon[n].Npts; ++i)
-      if (need_new_point_around[n][i]) nr_new_points_needed[n]++;
-    total_nr_new_points_needed += nr_new_points_needed[n];
+    return;
   }
-  /*
-  // Simplistic version:  always keep equidistant points
-  for (int n = 0; n < ntypes; ++n) {
-    Root_Density epsilon_n_before_update = epsilon[n];
-    epsilon[n] = Root_Density(epsilon_n_before_update.Npts + nr_new_points_needed[n], epsilon_n_before_update.lambdamax);
-    epsilon[n].Set_Asymptotics(epsilon_n_before_update.value_infty);
-    for (int i = 0; i < epsilon[n].Npts; ++i)
-      epsilon[n].value[i] = epsilon_n_before_update.Return_Value(epsilon[n].lambda[i]);
-  }
-  */
 
-  // Working version using non-equispaced points
-  // Now update all data via interpolation:
-  for (int n = 0; n < ntypes; ++n) {
-    Root_Density epsilon_n_before_update = epsilon[n];
-    epsilon[n] = Root_Density(epsilon_n_before_update.Npts + nr_new_points_needed[n], epsilon_n_before_update.lambdamax);
-    epsilon[n].Set_Asymptotics(epsilon_n_before_update.value_infty);
-    //cout << "Check: " << epsilon[n].Npts << " " << epsilon_n_before_update.Npts << endl;
-    int nr_pts_added_n = 0;
-    for (int i = 0; i < epsilon_n_before_update.Npts; ++i) {
-      if (!need_new_point_around[n][i]) {
-	epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i];
-	epsilon[n].dlambda[i + nr_pts_added_n] = epsilon_n_before_update.dlambda[i];
-	epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.value[i];
-      }
-      else if (need_new_point_around[n][i]) {
-	epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i] - 0.25 * epsilon_n_before_update.dlambda[i];
-	epsilon[n].dlambda[i + nr_pts_added_n] = 0.5 * epsilon_n_before_update.dlambda[i];
-	epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.Return_Value(epsilon[n].lambda[i + nr_pts_added_n]);
-	nr_pts_added_n++;
-	epsilon[n].lambda[i + nr_pts_added_n] = epsilon_n_before_update.lambda[i] + 0.25 * epsilon_n_before_update.dlambda[i];
-	epsilon[n].dlambda[i + nr_pts_added_n] = 0.5 * epsilon_n_before_update.dlambda[i];
-	epsilon[n].value[i + nr_pts_added_n] = epsilon_n_before_update.Return_Value(epsilon[n].lambda[i + nr_pts_added_n]);
-      }
-    }
-    if (nr_pts_added_n != nr_new_points_needed[n]) {
-      cout << nr_pts_added_n << "\t" << nr_new_points_needed[n] << endl;
-      ABACUSerror("Wrong counting of new points in Insert_new_points.");
+  DP Root_Density_Set::Return_Value (int n_ref, DP lambda_ref)
+  {
+    // Returns a value, no matter what !
+
+    if (n_ref < ntypes) return(epsilon[n_ref].Return_Value(lambda_ref));
+
+    else // assume asymptotic form of epsilon, proportional to n
+      return(epsilon[ntypes - 1].Return_Value(lambda_ref) * n_ref/(ntypes - 1.0));
+
+  }
+
+  Root_Density_Set Root_Density_Set::Return_Compressed_and_Matched_Set (DP comp_factor)
+  {
+    // Returns a set with 1/comp_factor as many points at each level
+
+    if (comp_factor >= 2.0)
+      ABACUSerror("Compression factor too large in Return_Compressed_and_Matched_Set, numerical instability will occur.");
+
+    Vect_INT nrpts_comp (ntypes);
+    Vect_DP lambdamax_comp (ntypes);
+    for (int n = 0; n < ntypes; ++n) {
+      nrpts_comp[n] = int(2.0 * epsilon[n].lambdamax/(epsilon[n].dlambda[0] * comp_factor));
+      lambdamax_comp[n] = epsilon[n].lambdamax;
     }
 
-    // Check:
-    //for (int i = 0; i < epsilon[n].Npts - 1; ++i)
-    //if (fabs(epsilon[n].lambda[i] + 0.5 *(epsilon[n].dlambda[i] + epsilon[n].dlambda[i+1]) - epsilon[n].lambda[i+1]) > 1.0e-13)
-	//{
-    // cout << "Error at level " << n << "\ti " << i << "\t" << epsilon[n].lambda[i] << "\t" << epsilon[n].dlambda[i]
-    //       << "\t" << epsilon[n].lambda[i+1] << "\t" << epsilon[n].dlambda[i+1]
-    //       << "\t" << epsilon[n].lambda[i] + 0.5 *(epsilon[n].dlambda[i] + epsilon[n].dlambda[i+1]) - epsilon[n].lambda[i+1] << endl;
-    //  ABACUSerror("...");
-    //}
+    Root_Density_Set Compressed_and_Matched_Set (ntypes, nrpts_comp, lambdamax_comp);
 
-  } // for n
+    for (int n = 0; n < ntypes; ++n)
+      Compressed_and_Matched_Set.epsilon[n] = (*this).epsilon[n].Compress_and_Match_Densities (comp_factor);
 
-
-  //cout << "need_new_pt_above " << need_new_point_above[0] << endl << endl;
-  //cout << "epsilon[0].lambda = " << epsilon[0].lambda << endl << endl;
-  //cout << "epsilon[0].dlambda = " << epsilon[0].dlambda << endl << endl;
-  //cout << "epsilon[0].value = " << epsilon[0].value << endl << endl;
-
-  Npts_total += total_nr_new_points_needed;
-
-  // Done !
-
-  return;
-}
-
-DP Root_Density_Set::Return_Value (int n_ref, DP lambda_ref)
-{
-  // Returns a value, no matter what !
-
-  if (n_ref < ntypes) return(epsilon[n_ref].Return_Value(lambda_ref));
-
-  else // assume asymptotic form of epsilon, proportional to n
-    return(epsilon[ntypes - 1].Return_Value(lambda_ref) * n_ref/(ntypes - 1.0));
-
-}
-
-Root_Density_Set Root_Density_Set::Return_Compressed_and_Matched_Set (DP comp_factor)
-{  // Returns a set with 1/comp_factor as many points at each level
-
-  if (comp_factor >= 2.0)
-    ABACUSerror("Compression factor too large in Return_Compressed_and_Matched_Set, numerical instability will occur.");
-
-  Vect_INT nrpts_comp (ntypes);
-  Vect_DP lambdamax_comp (ntypes);
-  for (int n = 0; n < ntypes; ++n) {
-    nrpts_comp[n] = int(2.0 * epsilon[n].lambdamax/(epsilon[n].dlambda[0] * comp_factor));
-    lambdamax_comp[n] = epsilon[n].lambdamax;
+    return(Compressed_and_Matched_Set);
   }
 
-  Root_Density_Set Compressed_and_Matched_Set (ntypes, nrpts_comp, lambdamax_comp);
+  void Root_Density_Set::Match_Densities (Root_Density_Set& RefSet)
+  {
+    // matched densities to those in RefSet
 
-  for (int n = 0; n < ntypes; ++n)
-    Compressed_and_Matched_Set.epsilon[n] = (*this).epsilon[n].Compress_and_Match_Densities (comp_factor);
-
-  return(Compressed_and_Matched_Set);
-}
-
-void Root_Density_Set::Match_Densities (Root_Density_Set& RefSet)
-{ // matched densities to those in RefSet
-
-  for (int n = 0; n < ntypes; ++n)
-    for (int i = 0; i < epsilon[n].Npts; ++i)
-      epsilon[n].value[i] = RefSet.epsilon[n].Return_Value(epsilon[n].lambda[i]);
-}
-
-void Root_Density_Set::Save (const char* outfile_Cstr)
-{
-  ofstream outfile;
-  outfile.open(outfile_Cstr);
-  outfile.precision(16);
-
-  // Determine what the maximal nr of pts is:
-  int Npts_n_max = 0;
-  for (int n = 0; n < ntypes; ++n) Npts_n_max = ABACUS::max(Npts_n_max, epsilon[n].Npts);
-
-  for (int i = 0; i < Npts_n_max; ++i) {
-    if (i > 0) outfile << endl;
-    for (int n = 0; n < ntypes; ++n) (i < epsilon[n].Npts) ?
-					       (outfile << epsilon[n].lambda[i] << "\t" << epsilon[n].value[i] << "\t")
-					       : (outfile << 0 << "\t" << 0 << "\t");
+    for (int n = 0; n < ntypes; ++n)
+      for (int i = 0; i < epsilon[n].Npts; ++i)
+	epsilon[n].value[i] = RefSet.epsilon[n].Return_Value(epsilon[n].lambda[i]);
   }
 
-  outfile.close();
-}
+  void Root_Density_Set::Save (const char* outfile_Cstr)
+  {
+    ofstream outfile;
+    outfile.open(outfile_Cstr);
+    outfile.precision(16);
+
+    // Determine what the maximal nr of pts is:
+    int Npts_n_max = 0;
+    for (int n = 0; n < ntypes; ++n) Npts_n_max = ABACUS::max(Npts_n_max, epsilon[n].Npts);
+
+    for (int i = 0; i < Npts_n_max; ++i) {
+      if (i > 0) outfile << endl;
+      for (int n = 0; n < ntypes; ++n) (i < epsilon[n].Npts) ?
+					 (outfile << epsilon[n].lambda[i] << "\t" << epsilon[n].value[i] << "\t")
+					 : (outfile << 0 << "\t" << 0 << "\t");
+    }
+
+    outfile.close();
+  }
 
 
 } // namespace ABACUS
diff --git a/src/TBA/TBA_2CBG.cc b/src/TBA/TBA_2CBG.cc
index 09f743a..4aaf64e 100644
--- a/src/TBA/TBA_2CBG.cc
+++ b/src/TBA/TBA_2CBG.cc
@@ -20,1718 +20,1112 @@ using namespace ABACUS;
 namespace ABACUS {
 
 
-/********************* 2CBG specific *******************/
+  /********************* 2CBG specific *******************/
 
 
-DP Asymptotic_2CBG_epsilon (int n, DP Omega, DP kBT)
-{
-  return(2.0 * Omega * n + kBT * log(pow((1.0 - exp(-2.0 * (n + 1.0) * Omega/kBT))
-					 /(1.0 - exp(-2.0 * Omega/kBT)), 2.0) - exp(-2.0 * n * Omega/kBT)));
-}
-
-void Set_2CBG_Asymptotics (Root_Density_Set& TBA_Set, DP mu, DP Omega, DP kBT)
-{
-  //DP epsilon_infty = 0.0;
-  TBA_Set.epsilon[0].Set_Asymptotics (pow(TBA_Set.epsilon[0].lambdamax, 2.0) - mu - Omega);
-  for (int n = 1; n < TBA_Set.ntypes; ++n) {
-    //epsilon_infty = 2.0 * Omega * n + kBT *
-    //log(pow((1.0 - exp(-2.0 * (n + 1.0) * Omega/kBT))/(1.0 - exp(-2.0 * Omega/kBT)), 2.0) - exp(-2.0 * n * Omega/kBT));
-    //TBA_Set.epsilon[n].Set_Asymptotics (epsilon_infty);
-    TBA_Set.epsilon[n].Set_Asymptotics (Asymptotic_2CBG_epsilon(n, Omega, kBT));
-    //cout << "Set asymptotics of " << n << " to " << setprecision(16) << TBA_Set.epsilon[n].value_infty
-    // << "\t" << 2.0 * Omega * n + 2.0 * kBT * log((1.0 - exp(-2.0 * (n + 1) * Omega/kBT))/(1.0 - exp(-2.0 * Omega/kBT))) - TBA_Set.epsilon[n].value_infty << endl;
+  DP Asymptotic_2CBG_epsilon (int n, DP Omega, DP kBT)
+  {
+    return(2.0 * Omega * n + kBT * log(pow((1.0 - exp(-2.0 * (n + 1.0) * Omega/kBT))
+					   /(1.0 - exp(-2.0 * Omega/kBT)), 2.0) - exp(-2.0 * n * Omega/kBT)));
   }
-}
 
-void Set_2CBG_deps_dchempot_Asymptotics (int option, const Root_Density_Set& Set, Root_Density_Set& DSet, DP mu, DP Omega, DP kBT)
-{
-  // option == 0:  deps/dmu
-  // option == 1:  deps/dOmega
-
-  //DSet.epsilon[0].Set_Asymptotics (-1.0);
-  DP zeroasymptote = -1.0;
-  DP em2OoT = exp(-2.0 * Omega/kBT);
-  for (int n = 1; n < DSet.ntypes; ++n) {
-    if (option == 0) DSet.epsilon[n].Set_Asymptotics (0.0);
-    else if (option == 1)
-      DSet.epsilon[n].Set_Asymptotics (2.0 * (1.0 - pow(em2OoT, n+1.0))
-				       * (n * (1.0 - pow(em2OoT, n+2.0)) - (n + 2.0) * em2OoT * (1.0 - pow(em2OoT, DP(n))))
-				       /((1.0 - em2OoT) * (1.0 - pow(em2OoT, DP(n))) * (1.0 - pow(em2OoT, n + 2.0))));
-    //cout << "Set asymptotics for option " << option << " at level " << n << " to " << DSet.epsilon[n].value_infty << endl;
-    zeroasymptote += DSet.epsilon[n].value_infty * exp(-Set.epsilon[n].value_infty/kBT)/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
-  }
-  // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
-  // Remember:  nmax in notes is Set.ntypes - 1.
-  zeroasymptote -= option == 0 ? 0.0 : 2.0 * ((Set.ntypes + 1.0) * exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT)/(1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))
-					      - Set.ntypes * exp(-2.0 * Set.ntypes * Omega/kBT)/(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
-
-  //cout << "Set asymptotics for option " << option << " at level 0 to " << zeroasymptote << endl;
-
-  DSet.epsilon[0].Set_Asymptotics (zeroasymptote);
-
-  return;
-}
-
-void Initiate_2CBG_TBA_Functions (Root_Density_Set& TBA_Set, DP mu, DP Omega)
-{
-  for (int i = 0; i < TBA_Set.epsilon[0].Npts; ++i) {
-    TBA_Set.epsilon[0].value[i] = TBA_Set.epsilon[0].lambda[i] * TBA_Set.epsilon[0].lambda[i] - mu - Omega;
-    TBA_Set.epsilon[0].prev_value[i] = TBA_Set.epsilon[0].value[i];
-  }
-  for (int n = 1; n < TBA_Set.ntypes; ++n) {
-    for (int i = 0; i < TBA_Set.epsilon[n].Npts; ++i)
-      TBA_Set.epsilon[n].value[i] = TBA_Set.epsilon[n].value_infty;
-  }
-}
-
-void Initiate_2CBG_deps_dchempot_Functions (Root_Density_Set& DSet)
-{
-  for (int n = 0; n < DSet.ntypes; ++n) {
-    for (int i = 0; i < DSet.epsilon[n].Npts; ++i)
-      DSet.epsilon[n].value[i] = DSet.epsilon[n].value_infty;
-  }
-}
-
-void Iterate_2CBG_TBAE (Root_Density_Set& Set, Vect<Vect<Vect_DP> >& a_n_dlambda, Vect<Vect<Vect_DP> >& fmin_dlambda,
-			Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  // Produces a new Root_Density_Set from a previous iteration.
-  // Does NOT add types or change Npts, lambdamax values.
-
-  //DP oneoverc = 1.0/c_int;
-  //DP twoovernc = 2.0/c_int;
-
-  // First define some useful functions:
-  Vect<Vect_DP> Tln1plusemineps(Set.ntypes);
-  Vect_DP Tln1pluseminepsinfty(Set.ntypes);
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    Tln1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      Tln1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
-	kBT * (Set.epsilon[n].value[i] < 24.0 * kBT ? log(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) : exp(-Set.epsilon[n].value[i]/kBT))
-	:
-	-Set.epsilon[n].value[i] + kBT * (-Set.epsilon[n].value[i] < 24.0 * kBT ? log (1.0 + exp(Set.epsilon[n].value[i]/kBT)) : exp(Set.epsilon[n].value[i]/kBT));
-      // Keep previous rapidities:
-      Set.epsilon[n].prev_value[i] = Set.epsilon[n].value[i];
+  void Set_2CBG_Asymptotics (Root_Density_Set& TBA_Set, DP mu, DP Omega, DP kBT)
+  {
+    TBA_Set.epsilon[0].Set_Asymptotics (pow(TBA_Set.epsilon[0].lambdamax, 2.0) - mu - Omega);
+    for (int n = 1; n < TBA_Set.ntypes; ++n) {
+      TBA_Set.epsilon[n].Set_Asymptotics (Asymptotic_2CBG_epsilon(n, Omega, kBT));
     }
-    Tln1pluseminepsinfty[n] = kBT * (Set.epsilon[n].value_infty < 24.0 * kBT ? log(1.0 + exp(-Set.epsilon[n].value_infty/kBT)) : exp(-Set.epsilon[n].value_infty/kBT));
-    //Tln1pluseminepsinfty[n] = kBT * log(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
-    //cout << "Check Tln1pluseminepsinfty:  n " << n << " " << Tln1pluseminepsinfty[n] << " " << -kBT * log(1.0 - pow(sinh(Omega/kBT)/sinh((n + 1) * Omega/kBT), 2.0)) << endl;
   }
 
-  // Now do the necessary convolutions for epsilon == epsilon[0].
-  // For each value of lambda, do the convolutions:
-  // Careful:  the lambda's used for lambda (index i) are those of epsilon[0], the lambda' (index j) are for epsilon[n] !!
-  Vect<Vect_DP> a_n_Tln_conv(Set.ntypes);
-  for (int n = 0; n < Set.ntypes; ++n) {
-    a_n_Tln_conv[n] = Vect_DP (0.0, Set.epsilon[0].Npts);
-    Vect_DP f(0.0, Set.epsilon[n].Npts);
+  void Set_2CBG_deps_dchempot_Asymptotics (int option, const Root_Density_Set& Set, Root_Density_Set& DSet,
+					   DP mu, DP Omega, DP kBT)
+  {
+    // option == 0:  deps/dmu
+    // option == 1:  deps/dOmega
 
-    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-      a_n_Tln_conv[n][i] = 0.0;
+    DP zeroasymptote = -1.0;
+    DP em2OoT = exp(-2.0 * Omega/kBT);
+    for (int n = 1; n < DSet.ntypes; ++n) {
+      if (option == 0) DSet.epsilon[n].Set_Asymptotics (0.0);
+      else if (option == 1)
+	DSet.epsilon[n].Set_Asymptotics (2.0 * (1.0 - pow(em2OoT, n+1.0))
+					 * (n * (1.0 - pow(em2OoT, n+2.0)) - (n + 2.0) * em2OoT * (1.0 - pow(em2OoT, DP(n))))
+					 /((1.0 - em2OoT) * (1.0 - pow(em2OoT, DP(n))) * (1.0 - pow(em2OoT, n + 2.0))));
+      zeroasymptote += DSet.epsilon[n].value_infty
+	* exp(-Set.epsilon[n].value_infty/kBT)/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
+    }
+    // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
+    // Remember:  nmax in notes is Set.ntypes - 1.
+    zeroasymptote -= option == 0 ? 0.0
+      : 2.0 * ((Set.ntypes + 1.0) * exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT)
+	       /(1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))
+	       - Set.ntypes * exp(-2.0 * Set.ntypes * Omega/kBT)/(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
 
-      for (int j = 0; j < Set.epsilon[n].Npts; ++j) a_n_Tln_conv[n][i] += Tln1plusemineps[n][j] * a_n_dlambda[i][n][j];
-      // Add alpha curvature terms:  VERY COSTLY
-      /*
-      for (int j = 1; j < Set.epsilon[n].Npts - 1; ++j)
-	a_n_Tln_conv[n][i] += (1.0/12.0) * pow(Set.epsilon[n].dlambda[j], 3.0)
-	  * ((Tln1plusemineps[n][j+1] * a_n_dlambda[i][n][j+1] - Tln1plusemineps[n][j] * a_n_dlambda[i][n][j])/(Set.epsilon[n].lambda[j+1] - Set.epsilon[n].lambda[j])
-	     - (Tln1plusemineps[n][j] * a_n_dlambda[i][n][j] - Tln1plusemineps[n][j-1] * a_n_dlambda[i][n][j-1])/(Set.epsilon[n].lambda[j] - Set.epsilon[n].lambda[j-1]))
+    DSet.epsilon[0].Set_Asymptotics (zeroasymptote);
+
+    return;
+  }
+
+  void Initiate_2CBG_TBA_Functions (Root_Density_Set& TBA_Set, DP mu, DP Omega)
+  {
+    for (int i = 0; i < TBA_Set.epsilon[0].Npts; ++i) {
+      TBA_Set.epsilon[0].value[i] = TBA_Set.epsilon[0].lambda[i] * TBA_Set.epsilon[0].lambda[i] - mu - Omega;
+      TBA_Set.epsilon[0].prev_value[i] = TBA_Set.epsilon[0].value[i];
+    }
+    for (int n = 1; n < TBA_Set.ntypes; ++n) {
+      for (int i = 0; i < TBA_Set.epsilon[n].Npts; ++i)
+	TBA_Set.epsilon[n].value[i] = TBA_Set.epsilon[n].value_infty;
+    }
+  }
+
+  void Initiate_2CBG_deps_dchempot_Functions (Root_Density_Set& DSet)
+  {
+    for (int n = 0; n < DSet.ntypes; ++n) {
+      for (int i = 0; i < DSet.epsilon[n].Npts; ++i)
+	DSet.epsilon[n].value[i] = DSet.epsilon[n].value_infty;
+    }
+  }
+
+  void Iterate_2CBG_TBAE (Root_Density_Set& Set, Vect<Vect<Vect_DP> >& a_n_dlambda, Vect<Vect<Vect_DP> >& fmin_dlambda,
+			  Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    // Produces a new Root_Density_Set from a previous iteration.
+    // Does NOT add types or change Npts, lambdamax values.
+
+    // First define some useful functions:
+    Vect<Vect_DP> Tln1plusemineps(Set.ntypes);
+    Vect_DP Tln1pluseminepsinfty(Set.ntypes);
+
+    for (int n = 0; n < Set.ntypes; ++n) {
+
+      Tln1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
+
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	Tln1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
+	  kBT * (Set.epsilon[n].value[i] < 24.0 * kBT ? log(1.0 + exp(-Set.epsilon[n].value[i]/kBT))
+		 : exp(-Set.epsilon[n].value[i]/kBT))
+	  :
+	  -Set.epsilon[n].value[i] + kBT * (-Set.epsilon[n].value[i] < 24.0 * kBT
+					    ? log (1.0 + exp(Set.epsilon[n].value[i]/kBT)) : exp(Set.epsilon[n].value[i]/kBT));
+	// Keep previous rapidities:
+	Set.epsilon[n].prev_value[i] = Set.epsilon[n].value[i];
+      }
+      Tln1pluseminepsinfty[n] = kBT * (Set.epsilon[n].value_infty < 24.0 * kBT
+				       ? log(1.0 + exp(-Set.epsilon[n].value_infty/kBT)) : exp(-Set.epsilon[n].value_infty/kBT));
+    }
+
+    // Now do the necessary convolutions for epsilon == epsilon[0].
+    // For each value of lambda, do the convolutions:
+    // Careful:  the lambda's used for lambda (index i) are those of epsilon[0], the lambda' (index j) are for epsilon[n] !!
+    Vect<Vect_DP> a_n_Tln_conv(Set.ntypes);
+    for (int n = 0; n < Set.ntypes; ++n) {
+      a_n_Tln_conv[n] = Vect_DP (0.0, Set.epsilon[0].Npts);
+      Vect_DP f(0.0, Set.epsilon[n].Npts);
+
+      for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+	a_n_Tln_conv[n][i] = 0.0;
+
+	for (int j = 0; j < Set.epsilon[n].Npts; ++j) a_n_Tln_conv[n][i] += Tln1plusemineps[n][j] * a_n_dlambda[i][n][j];
+	// Add alpha curvature terms:  VERY COSTLY, remove for now
+	/*
+	  for (int j = 1; j < Set.epsilon[n].Npts - 1; ++j)
+	  a_n_Tln_conv[n][i] += (1.0/12.0) * pow(Set.epsilon[n].dlambda[j], 3.0)
+	  * ((Tln1plusemineps[n][j+1] * a_n_dlambda[i][n][j+1]
+	  - Tln1plusemineps[n][j] * a_n_dlambda[i][n][j])/(Set.epsilon[n].lambda[j+1] - Set.epsilon[n].lambda[j])
+	  - (Tln1plusemineps[n][j] * a_n_dlambda[i][n][j]
+	  - Tln1plusemineps[n][j-1] * a_n_dlambda[i][n][j-1])/(Set.epsilon[n].lambda[j] - Set.epsilon[n].lambda[j-1]))
 	  /(Set.epsilon[n].lambda[j+1] - Set.epsilon[n].lambda[j-1]);
-      */
-    } // for (int i ...
+	*/
+      } // for (int i ...
 
-  } // for (int n...    We now have all the a_n * Tln... at our disposal.
+    } // for (int n...    We now have all the a_n * Tln... at our disposal.
 
-  // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
-  // Remember:  nmax = Set.ntypes - 1
-  DP Smaxsum = kBT * log((1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))/(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
-  /*
-  // Check of convolutions, for lambda around 0:
-  Vect_DP a_n_Tln_conv_0_integ (0.0, Set.ntypes);
-  Vect_DP integ_target(0.0, Set.ntypes);
-  //for (int n = 1; n < Set.ntypes; ++n) {
-  for (int n = Set.ntypes - 1; n < Set.ntypes; ++n) {
-    for (int j = 0; j < Set.epsilon[n].Npts; ++j) a_n_Tln_conv_0_integ[n] += Tln1plusemineps[n][j] * a_n_dlambda[Set.epsilon[0].Npts/2][n][j];
-    // Add asymptotic parts:  not necessary
-    a_n_Tln_conv_0_integ[n] += Tln1pluseminepsinfty[n]
-	* (1.0 - (atan((Set.epsilon[n].lambdamax - Set.epsilon[0].lambda[Set.epsilon[0].Npts/2])/(0.5 * n * c_int))
-		  + atan((Set.epsilon[n].lambdamax + Set.epsilon[0].lambda[Set.epsilon[0].Npts/2])/(0.5 * n * c_int)))/PI);
-    // Prediction based on value_infty:
-    integ_target[n] = -kBT * log(1.0 - pow(sinh(Omega/kBT)/sinh((n + 1.0) * Omega/kBT), 2.0));
-    integ_target[n] = -kBT * log(1.0 - pow((1.0 - exp(-2.0*Omega/kBT))/(1.0 - exp(-2.0*(n + 1.0)*Omega/kBT)), 2.0) * exp(-2.0 * n * Omega/kBT));
-    cout << n << " " << Set.epsilon[n].value[0] << " " << Set.epsilon[n].value_infty << " " << " " << Set.epsilon[n].value[Set.epsilon[n].Npts/2]
-	 << " " << Tln1plusemineps[n][0] << " " << Tln1plusemineps[n][Set.epsilon[n].Npts/2] << " " << Tln1pluseminepsinfty[n]
-	 << " " << a_n_Tln_conv_0_integ[n] << " " << integ_target[n] << "\t";
-  }
-  cout << endl;
-  //ABACUSerror("Stop...");
-  */
-  // Reconstruct the epsilon[0] function:
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    Set.epsilon[0].value[i] = pow(Set.epsilon[0].lambda[i], 2.0) - mu - Omega;
+    // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
+    // Remember:  nmax = Set.ntypes - 1
+    DP Smaxsum = kBT * log((1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))/(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
 
-    // Add the convolutions:
-    for (int n = 0; n < Set.ntypes; ++n)
-      //if (n <= 1) // REMOVE
+    // Reconstruct the epsilon[0] function:
+    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+      Set.epsilon[0].value[i] = pow(Set.epsilon[0].lambda[i], 2.0) - mu - Omega;
+
+      // Add the convolutions:
+      for (int n = 0; n < Set.ntypes; ++n)
 	Set.epsilon[0].value[i] -= a_n_Tln_conv[n][i];
 
-    // Add the asymptotic parts of convolutions:
-    for (int n = 1; n < Set.ntypes; ++n)
-      //if (n <= 1) // REMOVE
-      Set.epsilon[0].value[i] -= Tln1pluseminepsinfty[n]
-	* (1.0 - (atan((Set.epsilon[n].lambdamax - Set.epsilon[0].lambda[i])/(0.5 * n * c_int))
-		  + atan((Set.epsilon[n].lambdamax + Set.epsilon[0].lambda[i])/(0.5 * n * c_int)))/PI);
+      // Add the asymptotic parts of convolutions:
+      for (int n = 1; n < Set.ntypes; ++n)
+	Set.epsilon[0].value[i] -= Tln1pluseminepsinfty[n]
+	  * (1.0 - (atan((Set.epsilon[n].lambdamax - Set.epsilon[0].lambda[i])/(0.5 * n * c_int))
+		    + atan((Set.epsilon[n].lambdamax + Set.epsilon[0].lambda[i])/(0.5 * n * c_int)))/PI);
 
-    // Add the leftover summation for species n > nmax, assuming epsilon_n = epsilon_n^\infty in those cases:
-    Set.epsilon[0].value[i] -= Smaxsum;
+      // Add the leftover summation for species n > nmax, assuming epsilon_n = epsilon_n^\infty in those cases:
+      Set.epsilon[0].value[i] -= Smaxsum;
 
-    //cout << "i " << i << "\tlambda " << Set.epsilon[0].lambda[i] << "\te[0][i] " << Set.epsilon[0].value[i] << "\tprev " << Set.epsilon[0].prev_value[i]
-    // << "\tlambda^2 " << pow(Set.epsilon[0].lambda[i], 2.0) << "\ta_n_Tln_conv[0] " << a_n_Tln_conv[0][i] << "\ta_n_Tln_conv[1] " << a_n_Tln_conv[1][i]
-    // << endl;
-    //cout << a_n_dlambda[i][1] << endl << endl;
+      // Include some damping:
+      Set.epsilon[0].value[i] = 0.1 * Set.epsilon[0].prev_value[i] + 0.9 * Set.epsilon[0].value[i];
+      // No need to force boundaries here, epsilon[0] is inherently stable.
+    }
+    // epsilon[0] is now fully iterated.
 
-    // Include some damping:
-    Set.epsilon[0].value[i] = 0.1 * Set.epsilon[0].prev_value[i] + 0.9 * Set.epsilon[0].value[i];
-    // No need to force boundaries here, epsilon[0] is inherently stable.
+
+    // Now do the remaining epsilons:
+
+    for (int n = 1; n < Set.ntypes; ++n) {
+
+      Vect_DP f_Tln_conv_min (0.0, Set.epsilon[n].Npts);  // 'down' convolution
+      Vect_DP f_Tln_conv_plus (0.0, Set.epsilon[n].Npts); // 'up' convolution
+
+      Vect_DP fmin(0.0, Set.epsilon[n-1].Npts);
+      Vect_DP fplus(0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
+
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	f_Tln_conv_min[i] = 0.0;
+	f_Tln_conv_plus[i] = 0.0;
+
+	// 'down' convolutions
+	if (n == 1)
+	  for (int j = 0; j < Set.epsilon[0].Npts; ++j)
+	    f_Tln_conv_min[i] += Tln1plusemineps[0][j] * fmin_dlambda[n][i][j];
+
+	else for (int j = 0; j < Set.epsilon[n - 1].Npts; ++j)
+	       f_Tln_conv_min[i] += (Set.epsilon[n-1].prev_value[j] - Set.epsilon[n-1].value_infty
+				     + Tln1plusemineps[n-1][j] - Tln1pluseminepsinfty[n-1])
+		 * fmin_dlambda[n][i][j];
+
+	// 'up' convolutions
+	if (n < Set.ntypes - 1)
+	  for (int j = 0; j < Set.epsilon[n+1].Npts; ++j)
+	    f_Tln_conv_plus[i] += (Set.epsilon[n+1].prev_value[j] - Set.epsilon[n+1].value_infty
+				   + Tln1plusemineps[n+1][j] - Tln1pluseminepsinfty[n+1])
+	      * fplus_dlambda[n][i][j];
+
+	else f_Tln_conv_plus[i] = 0.0;
+
+	// Do some damping:
+	Set.epsilon[n].value[i] = 0.1 * Set.epsilon[n].prev_value[i]
+	  + 0.9 * (Set.epsilon[n].value_infty + f_Tln_conv_min[i] + f_Tln_conv_plus[i]);
+	// Force boundary values to asymptotes:  force boundary 10 points on each side
+	if (i < 10)
+	  Set.epsilon[n].value[i] = (1.0 - 0.1 * i) * Set.epsilon[n].value_infty + 0.1 * i * Set.epsilon[n].value[i];
+	if (i > Set.epsilon[n].Npts - 11)
+	  Set.epsilon[n].value[i] = (1.0 - 0.1 * (Set.epsilon[n].Npts-1 - i)) * Set.epsilon[n].value_infty
+	    + 0.1 * (Set.epsilon[n].Npts-1 - i) * Set.epsilon[n].value[i];
+
+      } // for (int i = 0...
+
+    } // for (int n = 1...
+
+    // All functions have now been iterated.
+
+    // Now calculate diff:
+
+    DP eps0i = 0.0;
+    DP eps1i = 0.0;
+
+    Set.diff = 0.0;
+
+    for (int n = 0; n < Set.ntypes; ++n) {
+      Set.epsilon[n].diff = 0.0;
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	// Measure based on delta f/delta epsilon:
+	if (n == 0)
+	  Set.epsilon[n].diff += Set.epsilon[n].dlambda[i] *
+	    (Set.epsilon[0].value[i] > 0.0 ?
+	     exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT)) : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)))
+	    * fabs(Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i]);
+	else {
+	  eps0i = Set.epsilon[0].Return_Value(Set.epsilon[n].lambda[i]);
+	  eps1i = Set.epsilon[1].Return_Value(Set.epsilon[n].lambda[i]);
+
+	  Set.epsilon[n].diff += Set.epsilon[n].dlambda[i] *
+	    // Logic:  simple 1/2 cascade
+	    (eps0i > 0.0 ? exp(-eps0i/kBT)/(1.0 + exp(-eps0i/kBT)) : 1.0/(1.0 + exp(eps0i/kBT)))
+	    * pow(0.5, n) //* (exp(-eps1i/kBT)/(1.0 + exp(-eps1i/kBT)))
+	    * fabs(Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i]);
+	}
+      }
+      Set.diff += Set.epsilon[n].diff;
+    }
+
+    return;
   }
-  // epsilon[0] is now fully iterated.
 
-  //cout << "Here 1" << endl;
+  void Iterate_and_Extrapolate_2CBG_TBAE (Root_Density_Set& Set, Vect<Root_Density_Set>& IterSet,
+					  Vect<Vect<Vect_DP> >& a_n_dlambda, Vect<Vect<Vect_DP> >& fmin_dlambda,
+					  Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    int nfit = IterSet.size();
 
-  // Now do the remaining epsilons:
+    for (int ifit = 0; ifit < nfit; ++ifit) {
+      Iterate_2CBG_TBAE (Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
+      IterSet[ifit] = Set;
+    }
 
-  for (int n = 1; n < Set.ntypes; ++n) {
+    // Now extrapolate each value to infinite nr of iterations:
+    Vect_DP density(nfit);
+    Vect_DP oneoverP(nfit);
+    DP deltalambda = 0.0;
+    for (int ifit = 0; ifit < nfit; ++ifit) oneoverP[ifit] = 1.0/(1.0 + ifit*ifit);
+    for (int n = 0; n < Set.ntypes; ++n)
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	for (int ifit = 0; ifit < nfit; ++ifit) density[ifit] = IterSet[ifit].epsilon[n].value[i];
+	polint (oneoverP, density, 0.0, Set.epsilon[n].value[i], deltalambda);
+      }
 
-    Vect_DP f_Tln_conv_min (0.0, Set.epsilon[n].Npts);  // 'down' convolution
-    Vect_DP f_Tln_conv_plus (0.0, Set.epsilon[n].Npts); // 'up' convolution
+    // Now iterate a few times to stabilize:
+    for (int iint = 0; iint < 2; ++iint)
+      Iterate_2CBG_TBAE(Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
 
-    // For n = ntypes, need:
-    //DP Tln1pluseepsntypesinfty = 2.0 * Omega * Set.ntypes
-    //+ 2.0 * kBT *
-    //((Set.ntypes * Omega/kBT < 24.0 ?  log(1.0 - exp(-2.0 * (Set.ntypes + 1) * Omega/kBT)) : - exp(-2.0 * (Set.ntypes + 1) * Omega/kBT))
-    // - (2.0 * Omega/kBT < 24.0 ? log(1.0 - exp(-2.0 * Omega/kBT)) : - exp(-2.0 * Omega/kBT)));
+    return;
+  }
 
-    Vect_DP fmin(0.0, Set.epsilon[n-1].Npts);
-    Vect_DP fplus(0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
+  DP Refine_2CBG_Set (Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT, DP refine_fraction)
+  {
+    // This function replaces Set by a new set with more points, where
+    // Tln(...) needs to be evaluated more precisely.
 
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      f_Tln_conv_min[i] = 0.0;
-      f_Tln_conv_plus[i] = 0.0;
+    // The return value is the max of delta_tni found.
 
-      // 'down' convolutions
-      if (n == 1)
-	for (int j = 0; j < Set.epsilon[0].Npts; ++j)
-	  f_Tln_conv_min[i] += Tln1plusemineps[0][j] * fmin_dlambda[n][i][j];
+    // First, calculate the needed Tln...
+    Vect<Vect_DP> Tln1plusemineps(Set.ntypes);
+    Vect_DP Tln1pluseminepsinfty(Set.ntypes);
 
-      else for (int j = 0; j < Set.epsilon[n - 1].Npts; ++j)
-	     f_Tln_conv_min[i] += (Set.epsilon[n-1].prev_value[j] - Set.epsilon[n-1].value_infty + Tln1plusemineps[n-1][j] - Tln1pluseminepsinfty[n-1])
-	       * fmin_dlambda[n][i][j];
+    for (int n = 0; n < Set.ntypes; ++n) {
 
-      // 'up' convolutions
-      if (n < Set.ntypes - 1)
+      Tln1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
+
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	Tln1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
+	  kBT * (Set.epsilon[n].value[i] < 24.0 * kBT
+		 ? log(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) : exp(-Set.epsilon[n].value[i]/kBT))
+	  : -Set.epsilon[n].value[i] + kBT * log (1.0 + exp(Set.epsilon[n].value[i]/kBT));
+      }
+      Tln1pluseminepsinfty[n] = kBT * (Set.epsilon[n].value_infty < 24.0 * kBT ?
+				       log(1.0 + exp(-Set.epsilon[n].value_infty/kBT)) : exp(-Set.epsilon[n].value_infty/kBT));
+    }
+
+    // Now find the achieved delta_tni
+    DP max_delta_tni_dlambda = 0.0;
+    DP max_delta_tni_dlambda_toplevel = 0.0;
+    DP sum_delta_tni_dlambda = 0.0;
+
+    Vect<Vect_DP> tni(Set.ntypes);
+    Vect<Vect_DP> tni_ex(Set.ntypes);
+
+    DP measure_factor = 0.0;
+    DP eps0i = 0.0;
+    DP eps1i = 0.0;
+
+    for (int n = 0; n < Set.ntypes; ++n) {
+
+      tni[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
+      tni_ex[n] = Vect_DP (0.0, Set.epsilon[n].Npts);  // extrapolation from adjacent points, to compare to obtained value
+
+      for (int i = 1; i < Set.epsilon[n].Npts - 1; ++i) {
+	if (n == 0) {
+	  // Measure based on delta f/delta epsilon:
+	  measure_factor = (Set.epsilon[0].value[i] > 0.0
+			    ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
+			    : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
+	}
+	else {
+	  // Measure based on delta f/delta epsilon:
+	  eps0i = Set.epsilon[0].Return_Value(Set.epsilon[n].lambda[i]);
+	  eps1i = Set.epsilon[1].Return_Value(Set.epsilon[n].lambda[i]);
+
+	  measure_factor = (eps0i > 0.0 ? exp(-eps0i/kBT)/(1.0 + exp(-eps0i/kBT)) : 1.0/(1.0 + exp(eps0i/kBT)))
+	    // Logic:  simple 1/2 per level cascade down
+	    * pow(0.5, n);
+
+	}
+
+	tni[n][i] = measure_factor * Set.epsilon[n].value[i];
+	tni_ex[n][i] = measure_factor * (Set.epsilon[n].value[i-1] *  (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
+					 + Set.epsilon[n].value[i+1] * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
+	  /(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
+
+	max_delta_tni_dlambda = ABACUS::max(max_delta_tni_dlambda, fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i]);
+	if (n == Set.ntypes - 1)
+	  max_delta_tni_dlambda_toplevel = ABACUS::max(max_delta_tni_dlambda_toplevel, fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i]);
+	sum_delta_tni_dlambda += fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i];
+      }
+    }
+
+
+    // We now determine the locations where we need to add points
+    Vect<Vect<bool> > need_new_point_around(Set.ntypes);
+    Vect<bool> need_to_extend_limit(false, Set.ntypes);
+
+    for (int n = 0; n < Set.ntypes; ++n) {
+
+      need_new_point_around[n] = Vect<bool> (false, Set.epsilon[n].Npts);
+
+      for (int i = 1; i < Set.epsilon[n].Npts - 1; ++i) {
+	if (fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i] > (1.0 - refine_fraction) * max_delta_tni_dlambda) {
+	  need_new_point_around[n][i] = true;
+	  // Do also the symmetric ones...  Require need...[n][i] = need...[n][Npts - 1 - i]
+	  need_new_point_around[n][Set.epsilon[n].Npts - 1 - i] = true;
+	}
+      }
+
+      // Check boundary values;  if too different from value_infty, extend limits
+      if (n == 0) {
+	// Measure based on delta f/delta epsilon:
+	if (exp(-Set.epsilon[0].value[0]/kBT) > 0.001 * max_delta_tni_dlambda)
+	  need_to_extend_limit[0] = true;
+      }
+      else
+	// Measure deviation from asymptote for 10th element, since we smoothly put the i<10 ones to the asymptote when damping:
+	if (fabs(Set.epsilon[n].value[10] - Set.epsilon[n].value_infty) * Set.epsilon[0].dlambda[10] > max_delta_tni_dlambda)
+	  need_to_extend_limit[n] = true;
+    }
+
+    // Check if we need to add a level
+    bool need_new_epsilon_n_function = false;
+
+    // We add new levels if the integral a_n * Tln1plusemineps at the highest level differs too much from
+    // the asymptotic value.  Since such integrals appear for each point of the epsilon[0] function, these
+    // errors should be compared to the individual delta_tni factors.
+    DP a_2_Tln_conv_0_integ = 0.0;
+    DP oneoverpi = 1.0/PI;
+    DP twoovernc = 2.0/((Set.ntypes - 1) * c_int);
+    int i0 = Set.epsilon[0].Npts/2;
+    for (int j = 0; j < Set.epsilon[Set.ntypes - 1].Npts; ++j)
+      a_2_Tln_conv_0_integ += (Tln1plusemineps[Set.ntypes - 1][j] - Tln1pluseminepsinfty[Set.ntypes - 1])
+	* oneoverpi * (atan(twoovernc * (Set.epsilon[0].lambda[i0] - Set.epsilon[Set.ntypes - 1].lambda[j]
+					 + 0.5 * Set.epsilon[Set.ntypes - 1].dlambda[j]))
+		       - atan(twoovernc * (Set.epsilon[0].lambda[i0] - Set.epsilon[Set.ntypes - 1].lambda[j]
+					   - 0.5 * Set.epsilon[Set.ntypes - 1].dlambda[j])));
+
+    // Compare to prediction for this integral based on value_infty, which is simply 0.
+    // Count this difference Set.ntypes times over, since it cascades down all levels
+    if (fabs(a_2_Tln_conv_0_integ) * Set.ntypes > max_delta_tni_dlambda) need_new_epsilon_n_function = true;
+
+    // Additionally, if the highest level needs updating, we automatically add new functions:
+    for (int i = 0; i < Set.epsilon[Set.ntypes - 1].Npts; ++i)
+      if (need_new_point_around[Set.ntypes - 1][i] || need_to_extend_limit[Set.ntypes - 1]) need_new_epsilon_n_function = true;
+
+
+    // Now insert the new points between existing points:
+    Set.Insert_new_points (need_new_point_around);
+
+    // Now extend the integration limits if needed:
+    Set.Extend_limits (need_to_extend_limit);
+
+    // If we add a level, we do it here:
+    if (need_new_epsilon_n_function) {
+      // Insert more than one function per cycle...
+      Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));
+      Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
+      Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
+      Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated;
+      Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
+    }
+
+    //return(max_delta_tni_dlambda);
+    return(sum_delta_tni_dlambda);
+  }
+
+  DP Calculate_Gibbs_Free_Energy (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    // Computes the Gibbs free energy, assuming that epsilon[0] is symmetric.
+
+    // WORKING VERSION
+    DP sum_f = 0.0;
+    Vect_DP f(0.0, Set.epsilon[0].Npts);
+    DP sum_f_check = 0.0;
+    Vect_DP fcheck(0.0, Set.epsilon[0].Npts);
+    DP sum_g_check = 0.0;
+    Vect_DP gcheck(0.0, Set.epsilon[0].Npts);
+    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+      f[i] = (Set.epsilon[0].value[i] > 0.0 ? kBT* log(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
+	      : -Set.epsilon[0].value[i] + kBT * log(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
+      sum_f += Set.epsilon[0].dlambda[i] * f[i];
+      fcheck[i] = kBT * log(1.0 + exp(-(Set.epsilon[0].lambda[i] * Set.epsilon[0].lambda[i] - mu - Omega)/kBT));
+      sum_f_check += Set.epsilon[0].dlambda[i] * fcheck[i];
+      gcheck[i] = exp(-(Set.epsilon[0].lambda[i] * Set.epsilon[0].lambda[i])/kBT);
+      sum_g_check += Set.epsilon[0].dlambda[i] * gcheck[i];
+    }
+
+    // Now add alpha curvature terms:
+    for (int i = 1; i < Set.epsilon[0].Npts - 1; ++i)
+      sum_f += pow(Set.epsilon[0].dlambda[i], 3.0) * ((f[i+1] - f[i])/(Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i])
+						      - (f[i] - f[i-1])/(Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1]))
+	/(12.0 * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i-1]));
+
+    // Now add alpha curvature terms:
+    DP sum_gcorralphacheck = 0.0;
+    Vect_DP gcorr_alpha_check(0.0, Set.epsilon[0].Npts);
+    for (int i = 1; i < Set.epsilon[0].Npts - 1; ++i) {
+      gcorr_alpha_check[i] = (1.0/12.0) * pow(Set.epsilon[0].dlambda[i], 3.0)
+	* ((gcheck[i+1] - gcheck[i]) * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1])
+	   - (gcheck[i] - gcheck[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i]))
+	/((Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i])
+	  * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1]));
+      sum_gcorralphacheck += gcorr_alpha_check[i];
+    }
+
+    // Testing:
+    int Npts_test = Set.epsilon[0].Npts;
+    DP lambdamax_test = Set.epsilon[0].lambdamax;
+    DP testsum = 0.0;
+    DP testgauss = 0.0;
+    DP lambda;
+    DP dlambda;
+    for (int i = 0; i < Npts_test; ++i) {
+      lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
+      dlambda = lambdamax_test * 2.0/Npts_test;
+      testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
+      testgauss += exp(-lambda*lambda/kBT) * dlambda;
+    }
+
+    return(-sum_f/twoPI);
+  }
+
+  DP Calculate_dGibbs_dchempot (const Root_Density_Set& DSet, const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    // This calculates the derivative of the Gibbs free energy with respect to either of the two chemical potential,
+    // given the fundametal set Set for eps and DSet for either deps_du or deps_dOmega.
+
+    DP sum_f = 0.0;
+    Vect_DP f(0.0, Set.epsilon[0].Npts);
+
+    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+      f[i] = DSet.epsilon[0].value[i] * (Set.epsilon[0].value[i] > 0.0
+					 ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
+					 : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
+      sum_f += DSet.epsilon[0].dlambda[i] * f[i];
+    }
+
+    // Now add alpha curvature terms:
+    for (int i = 1; i < DSet.epsilon[0].Npts - 1; ++i)
+      sum_f += pow(DSet.epsilon[0].dlambda[i], 3.0)
+	* ((f[i+1] - f[i])/(DSet.epsilon[0].lambda[i+1] - DSet.epsilon[0].lambda[i])
+	   - (f[i] - f[i-1])/(DSet.epsilon[0].lambda[i] - DSet.epsilon[0].lambda[i-1]))
+	/(12.0 * (DSet.epsilon[0].lambda[i+1] - DSet.epsilon[0].lambda[i-1]));
+
+    return(sum_f/twoPI);
+  }
+
+  Vect<Vect<Vect_DP> > Build_a_n_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    DP oneoverpi = 1.0/PI;
+    DP oneoverc = 1.0/c_int;
+    DP twoovernc = 2.0/c_int;
+
+    Vect<Vect<Vect_DP> > a_n_dlambda(Set.epsilon[0].Npts);
+    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+      a_n_dlambda[i] = Vect<Vect_DP>(Set.ntypes);
+      for (int n = 0; n < Set.ntypes; ++n) {
+	a_n_dlambda[i][n] = Vect_DP(0.0, Set.epsilon[n].Npts);
+      }
+    }
+
+    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+
+      // Do n == 0 separately:
+      for (int j = 0; j < Set.epsilon[0].Npts; ++j)
+	a_n_dlambda[i][0][j] = oneoverpi
+	  * (atan(oneoverc * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] + 0.5 * Set.epsilon[0].dlambda[j]))
+	     - atan(oneoverc * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] - 0.5 * Set.epsilon[0].dlambda[j])));
+
+      // Now do n > 0:
+      for (int n = 1; n < Set.ntypes; ++n) {
+	twoovernc = 2.0/(n * c_int);
+	for (int j = 0; j < Set.epsilon[n].Npts; ++j) {
+	  a_n_dlambda[i][n][j] = oneoverpi
+	    * (atan(twoovernc * (Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j] + 0.5 * Set.epsilon[n].dlambda[j]))
+	       - atan(twoovernc * (Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j] - 0.5 * Set.epsilon[n].dlambda[j])));
+	}
+      } // for (int n
+    } // for (int i
+
+    return(a_n_dlambda);
+  }
+
+  Vect<Vect<Vect_DP> >  Build_fmin_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    DP oneoverpi = 1.0/PI;
+    DP pioverc = PI/c_int;
+    DP twopioverc = 2.0*PI/c_int;
+    DP piovertwoc = 0.5 * pioverc;
+
+    Vect<Vect<Vect_DP> > fmin_dlambda(Set.ntypes);
+    for (int n = 0; n < Set.ntypes; ++n) {
+      fmin_dlambda[n] = Vect<Vect_DP> (Set.epsilon[n].Npts);
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i)
+	fmin_dlambda[n][i] = Vect_DP (0.0, Set.epsilon[ABACUS::max(n-1, 0)].Npts);
+    }
+
+    for (int n = 1; n < Set.ntypes; ++n) {
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+
+	for (int j = 0; j < Set.epsilon[n-1].Npts; ++j)
+	  fmin_dlambda[n][i][j] = oneoverpi
+	    * atan(exp(-pioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j]))
+		   * 2.0 * sinh(piovertwoc * Set.epsilon[n-1].dlambda[j])
+		   /(1.0 + exp(-twopioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j]))));
+      } // for i
+    } // for n
+
+    return(fmin_dlambda);
+  }
+
+  Vect<Vect<Vect_DP> > Build_fplus_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    DP oneoverpi = 1.0/PI;
+    DP pioverc = PI/c_int;
+    DP twopioverc = 2.0*PI/c_int;
+    DP piovertwoc = 0.5 * pioverc;
+
+    Vect<Vect<Vect_DP> > fplus_dlambda(Set.ntypes);
+    for (int n = 0; n < Set.ntypes; ++n) {
+      fplus_dlambda[n] = Vect<Vect_DP> (Set.epsilon[n].Npts);
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i)
+	fplus_dlambda[n][i] = Vect_DP (0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
+    }
+
+    for (int n = 0; n < Set.ntypes - 1; ++n) {
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
 	for (int j = 0; j < Set.epsilon[n+1].Npts; ++j)
-	  f_Tln_conv_plus[i] += (Set.epsilon[n+1].prev_value[j] - Set.epsilon[n+1].value_infty + Tln1plusemineps[n+1][j] - Tln1pluseminepsinfty[n+1])
-	    * fplus_dlambda[n][i][j];
+	  fplus_dlambda[n][i][j] = oneoverpi
+	    * atan(exp(-pioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j]))
+		   * 2.0 * sinh(piovertwoc * Set.epsilon[n+1].dlambda[j])
+		   /(1.0 + exp(-twopioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j]))));
+      }
+    }
 
-      else f_Tln_conv_plus[i] = 0.0;
+    return(fplus_dlambda);
+  }
 
-      // Do some damping:
-      Set.epsilon[n].value[i] = 0.1 * Set.epsilon[n].prev_value[i]
-	+ 0.9 * (Set.epsilon[n].value_infty + f_Tln_conv_min[i] + f_Tln_conv_plus[i]);
+
+  Root_Density_Set Solve_2CBG_TBAE_via_refinements (DP c_int, DP mu, DP Omega, DP kBT, int Max_Secs,
+						    ofstream& LOG_outfile, bool Save_data)
+  {
+    // This solves the 2CBG TBAE as best as possible given the time constraint.
+
+    clock_t StartTime = clock();
+
+    int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
+
+    // Set basic precision needed:
+    DP running_prec = 1.0;
+
+    DP refine_fraction = 0.5; // value fraction of points to be refined
+
+    // Set basic number of types needed:
+    int ntypes_needed = int(kBT * log(kBT/1.0e-14)/Omega);
+    int ntypes = ABACUS::max(ntypes_needed, 1);
+    ntypes = ABACUS::min(ntypes, 10);
+
+    if (Save_data)
+      if (ntypes >= 10) LOG_outfile << "WARNING:  ntypes needs to be quite high for c_int = " << c_int
+				    << " mu = " << mu << " Omega = " << Omega
+				    << " kBT = " << kBT << ".  Set to " << ntypes << ", ideally needed: "
+				    << ntypes_needed << ".  Accuracy might be incorrectly evaluated." << endl;
+
+    DP lambdamax = 10.0 + sqrt(ABACUS::max(1.0, kBT * 36.0 + mu + Omega));
+    // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
+    int Npts = 50;
+    Vect_INT Npts_init(Npts, ntypes);
+    if (Save_data) LOG_outfile << "Npts (basic) set to " << Npts_init << endl;
+
+    Vect_DP lambdamax_init(lambdamax, ntypes);  // such that exp(-pi *lambdamax/c) <~ machine_eps
+    Npts_init[0] = 1 * Npts;  // give more precision to lowest level
+    lambdamax_init[0] = 10.0 + sqrt(ABACUS::max(1.0, kBT * 36.0 + mu + Omega));
+    // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
+    Root_Density_Set TBA_Set (ntypes, Npts_init, lambdamax_init);
+
+    // Set the asymptotics of the TBA_fns:
+    Set_2CBG_Asymptotics (TBA_Set, mu, Omega, kBT);
+
+    // Initiate the functions:
+    Initiate_2CBG_TBA_Functions (TBA_Set, mu, Omega);
+
+    clock_t StopTime = clock();
+    clock_t Cycle_StartTime, Cycle_StopTime;
+
+    int CPU_ticks = StopTime - StartTime;
+
+    int ncycles = 0;
+    int niter_tot = 0;
+
+    do {
+
+      StartTime = clock();
+
+      Cycle_StartTime = clock();
+
+      // The running precision is an estimate of the accuracy of the free energy integral.
+      // Refine... returns sum_delta_tni_dlambda, so running prec is estimated as...
+      running_prec = Refine_2CBG_Set (TBA_Set, c_int, mu, Omega, kBT, refine_fraction);
+
+      Vect<Vect<Vect_DP> > a_n_dlambda = Build_a_n_dlambda (TBA_Set, c_int, mu, Omega, kBT);
+      Vect<Vect<Vect_DP> > fmin_dlambda = Build_fmin_dlambda (TBA_Set, c_int, mu, Omega, kBT);
+      Vect<Vect<Vect_DP> > fplus_dlambda = Build_fplus_dlambda (TBA_Set, c_int, mu, Omega, kBT);
+
+      StopTime = clock();
+
+      CPU_ticks += StopTime - StartTime;
+
+      int niter = 0;
+      int niter_max = ncycles == 0 ? 300 : 300;
+
+      // For extrapolations:
+      Vect<Root_Density_Set> IterSet(4);
+
+      do {
+
+	StartTime = clock();
+
+	if (niter <= 10 || niter > 100) {
+	  Iterate_2CBG_TBAE (TBA_Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
+	  niter++;
+	}
+	else {
+	  Iterate_and_Extrapolate_2CBG_TBAE (TBA_Set, IterSet, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
+	  niter += 6;
+	}
+
+	StopTime = clock();
+	CPU_ticks += StopTime - StartTime;
+
+      } while (niter < 5 || niter < niter_max && TBA_Set.diff > 0.1 * running_prec && CPU_ticks < Max_CPU_ticks);
+
+      if (Save_data) {
+	LOG_outfile << "ncycles = " << ncycles << "\trunning_prec = " << running_prec << "\t niter = " << niter
+		    << "\tntypes = " << TBA_Set.ntypes << "\tNpts ";
+	for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].Npts << " ";
+	LOG_outfile << "\tNpts_total = " << TBA_Set.Npts_total << endl
+		    << "\tdiff = " << TBA_Set.diff
+		    << "\tGSE = " << Calculate_Gibbs_Free_Energy (TBA_Set, c_int, mu, Omega, kBT) << endl;
+      }
+
+      ncycles++;
+      niter_tot += niter;
+
+      if (niter == niter_max) {
+	if (Save_data) LOG_outfile << "Not able to improve functions enough after " << niter_max << " iterations." << endl;
+      }
+
+      Cycle_StopTime = clock();
+
+    } while (CPU_ticks < Max_CPU_ticks - 2.0*(Cycle_StopTime - Cycle_StartTime));
+    // Allow a new cycle only if there is time, assuming new cycle time < 2* last one
+
+    if (Save_data) {
+      LOG_outfile << "c_int " << c_int << "\tmu " << mu << "\tOmega " << Omega << "\tkBT " << kBT
+		  << "\tncycles = " << ncycles << "\trunning_prec = " << running_prec << "\t niter_tot = " << niter_tot
+		  << "\tntypes = " << TBA_Set.ntypes << "\tdiff = " << TBA_Set.diff << endl << "\tNpts ";
+      for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].Npts << " ";
+      LOG_outfile << "\tNpts_total = " << TBA_Set.Npts_total << endl;
+    }
+
+    return(TBA_Set);
+    //return;
+  }
+
+
+  // Iterative procedures for deps/dmu or /dOmega:
+  void Iterate_2CBG_deps_dchempot (int option, Root_Density_Set& DSet, const Root_Density_Set& Set,
+				   Vect<Vect<Vect_DP> >& a_n_dlambda, Vect<Vect<Vect_DP> >& fmin_dlambda,
+				   Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
+  {
+    // Produces a new Root_Density_Set for depsilon/dmu (option == 0) or depsilon/dOmega (option == 1) from a previous iteration.
+    // Does NOT add types or change Npts, lambdamax values.
+
+    // First define some useful functions:
+    Vect<Vect_DP> depsover1plusemineps(Set.ntypes);
+    Vect<Vect_DP> depsover1plusepluseps(Set.ntypes);
+    Vect_DP depsover1pluseminepsinfty(Set.ntypes);
+    Vect_DP depsover1pluseplusepsinfty(Set.ntypes);
+
+    for (int n = 0; n < Set.ntypes; ++n) {
+
+      depsover1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
+      depsover1plusepluseps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
+
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	depsover1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
+	  DSet.epsilon[n].value[i]/(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) :
+	  DSet.epsilon[n].value[i] * exp(Set.epsilon[n].value[i]/kBT)/(1.0 + exp(Set.epsilon[n].value[i]/kBT));
+	depsover1plusepluseps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
+	  DSet.epsilon[n].value[i] * exp(-Set.epsilon[n].value[i]/kBT)/(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) :
+	  DSet.epsilon[n].value[i]/(1.0 + exp(Set.epsilon[n].value[i]/kBT));
+
+	// Keep previous rapidities:
+	DSet.epsilon[n].prev_value[i] = DSet.epsilon[n].value[i];
+
+      }
+      depsover1pluseminepsinfty[n] = DSet.epsilon[n].value_infty/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
+      depsover1pluseplusepsinfty[n] = DSet.epsilon[n].value_infty * exp(-Set.epsilon[n].value_infty/kBT)
+	/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
+    }
+
+
+    // Now do the necessary convolutions for epsilon == epsilon[0].
+    // For each value of lambda, do the convolutions:
+    // Careful:  the lambda's used for lambda (index i) are those of epsilon[0], the lambda' (index j) are for epsilon[n] !!
+    Vect<Vect_DP> a_n_depsover_conv(Set.ntypes);
+    for (int n = 0; n < Set.ntypes; ++n) {
+      a_n_depsover_conv[n] = Vect_DP (0.0, Set.epsilon[0].Npts);
+      Vect_DP f(0.0, Set.epsilon[n].Npts);
+
+      for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
+	a_n_depsover_conv[n][i] = 0.0;
+
+	for (int j = 0; j < Set.epsilon[n].Npts; ++j) {
+	  f[j] = depsover1plusepluseps[n][j] * a_n_dlambda[i][n][j];
+	  a_n_depsover_conv[n][i] += f[j];
+	}
+      }
+    } // for (int n...    We now have all the a_n * deps... at our disposal.
+
+    // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
+    // Remember: nmax = Set.ntypes - 1
+    DP Smaxsum = option == 0 ? 0.0 : 2.0 * ((Set.ntypes + 1.0) * exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT)
+					    /(1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))
+					    - Set.ntypes * exp(-2.0 * Set.ntypes * Omega/kBT)
+					    /(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
+
+    // Reconstruct the epsilon[0] function:
+    for (int i = 0; i < DSet.epsilon[0].Npts; ++i) {
+      DSet.epsilon[0].value[i] = -1.0;
+
+      // Add the convolutions:
+      for (int n = 0; n < Set.ntypes; ++n)
+	DSet.epsilon[0].value[i] += a_n_depsover_conv[n][i];
+      // Add the asymptotic parts of convolutions:  n == 0 part is zero because of 1 + exp[epsilon[0] ] in denominator
+      for (int n = 1; n < Set.ntypes; ++n)
+	DSet.epsilon[0].value[i] += depsover1pluseplusepsinfty[n]
+	  * (1.0 - (atan((DSet.epsilon[n].lambdamax - DSet.epsilon[0].lambda[i])/(0.5 * n * c_int))
+		    + atan((DSet.epsilon[n].lambdamax + DSet.epsilon[0].lambda[i])/(0.5 * n * c_int)))/PI);
+
+      // Add the leftover summation for species n > nmax, assuming epsilon_n = epsilon_n^\infty in those cases:
+      DSet.epsilon[0].value[i] -= Smaxsum;
+      // Include some damping:
+      DSet.epsilon[0].value[i] = 0.1 * DSet.epsilon[0].prev_value[i] + 0.9 * DSet.epsilon[0].value[i];
       // Force boundary values to asymptotes:  force boundary 10 points on each side
       if (i < 10)
-	Set.epsilon[n].value[i] = (1.0 - 0.1 * i) * Set.epsilon[n].value_infty + 0.1 * i * Set.epsilon[n].value[i];
-      if (i > Set.epsilon[n].Npts - 11)
-	Set.epsilon[n].value[i] = (1.0 - 0.1 * (Set.epsilon[n].Npts-1 - i)) * Set.epsilon[n].value_infty + 0.1 * (Set.epsilon[n].Npts-1 - i) * Set.epsilon[n].value[i];
-      /*
-      if (i == 0 && n < 2) {
-	cout << "epsilon[" << n << "][0]: " << Set.epsilon[n].value[i] << "\t" << Set.epsilon[n].prev_value[i] << "\t"
-	     << Set.epsilon[n].value_infty << "\t" << f_Tln_conv_min[i] << "\t" << f_Tln_conv_plus[i]
-	  //<< "\tepsilon[" << n << "][1]: " << Set.epsilon[n].value[1] << "\t" << Set.epsilon[n].prev_value[1] << "\t"
-	  // << Set.epsilon[n].value_infty << "\t" << f_Tln_conv_min[1] << "\t" << f_Tln_conv_plus[1]
-	     << "\tepsilon[" << n << "][10]: " << Set.epsilon[n].value[10] << "\t" << Set.epsilon[n].prev_value[10] << "\t"
-	     << Set.epsilon[n].value_infty << "\t" << f_Tln_conv_min[10] << "\t" << f_Tln_conv_plus[10]
-	     << endl;
-      }
-      */
-      /*
-      if (n == 1) cout << setprecision(8) << Set.epsilon[n].lambda[i] << "\t" << Set.epsilon[n].prev_value[i] << "\t" << Set.epsilon[n].value[i] << "\t"
-		       << setprecision(16) << f_Tln_conv_min[i] << "\t" << f_Tln_conv_plus[i] << "\t"
-		       << fplus_dlambda[n][i][Set.epsilon[n+1].Npts - 1]
-		       << endl;
-      */
-      /*
-      if (fabs(1.0 - Set.epsilon[n].value[i]/Set.epsilon[n].prev_value[i]) > 0.1) {
-	cout << n << "\t" << i << "\t" << setprecision(8) << Set.epsilon[n].lambda[i] << "\t" << Set.epsilon[n].prev_value[i] << "\t" << Set.epsilon[n].value[i] << "\t"
-	     << Set.epsilon[n].value_infty << "\t" << setprecision(16) << f_Tln_conv_min[i] << "\t" << f_Tln_conv_plus[i]
-	     << endl;
-	cout << Set.epsilon[n+1].prev_value << endl << endl << Tln1plusemineps[n+1] << endl << endl;
-	cout << "dlambda: " << Set.epsilon[n+1].dlambda << endl << endl;
-	for (int j = 0; j < Set.epsilon[n+1].Npts; ++j)
-	  cout << fplus_dlambda[n][i][j]/Set.epsilon[n+1].dlambda[j] << " ";
-	cout << endl << endl;
-	for (int j = 0; j < Set.epsilon[n+1].Npts; ++j)
-	  cout << Set.epsilon[n+1].prev_value[j] - Set.epsilon[n+1].value_infty + Tln1plusemineps[n+1][j] - Tln1pluseminepsinfty[n+1] << " ";
-	cout << endl << endl;
-      }
-      */
+	DSet.epsilon[0].value[i] = (1.0 - 0.1 * i) * DSet.epsilon[0].value_infty + 0.1 * i * DSet.epsilon[0].value[i];
+      if (i > DSet.epsilon[0].Npts - 11)
+	DSet.epsilon[0].value[i] = (1.0 - 0.1 * (DSet.epsilon[0].Npts-1 - i)) * DSet.epsilon[0].value_infty
+	  + 0.1 * (DSet.epsilon[0].Npts-1 - i) * DSet.epsilon[0].value[i];
 
-    } // for (int i = 0...
-
-  } // for (int n = 1...
-
-  // All functions have now been iterated.
-
-  // Now calculate diff:
-
-  DP eps0i = 0.0;
-  DP eps1i = 0.0;
-
-  Set.diff = 0.0;
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-    Set.epsilon[n].diff = 0.0;
-    //sum_N += Set.epsilon[n].Npts;
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      //Set.epsilon[n].diff += pow((Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i])
-      //			   /ABACUS::max(1.0, fabs(Set.epsilon[n].value[i] + Set.epsilon[n].prev_value[i])), 2.0);
-      //Set.epsilon[n].diff += fabs((Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i])
-      //			   /ABACUS::max(1.0, fabs(Set.epsilon[n].value[i] + Set.epsilon[n].prev_value[i])));
-      //Set.epsilon[n].diff += fabs(kBT * log((1.0 + exp(-fabs(Set.epsilon[n].value[i])/kBT))/(1.0 + exp(-fabs(Set.epsilon[n].prev_value[i])/kBT))));
-      /*
-      // This one was used in working version before delta f measure:
-      Set.epsilon[n].diff += Set.epsilon[n].dlambda[i] *
-	fabs(Set.epsilon[n].value[i] > 0.0 ? kBT * log((1.0 + exp(-Set.epsilon[n].value[i]/kBT))/(1.0 + exp(-Set.epsilon[n].prev_value[i]/kBT)))
-	     : (-Set.epsilon[n].value[i] + kBT * log(1.0 + exp(Set.epsilon[n].value[i]/kBT)))
-	     - (-Set.epsilon[n].prev_value[i] + kBT * log(1.0 + exp(Set.epsilon[n].prev_value[i]/kBT))));
-      */
-      // Measure based on delta f/delta epsilon:
-      if (n == 0)
-	Set.epsilon[n].diff += Set.epsilon[n].dlambda[i] *
-	  //(Set.epsilon[0].value[i] > 0.0 ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-	  // : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)))
-	  //pow(Set.epsilon[0].value[i] > 0.0 ?
-	  //  exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT)) : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)), 2.0)
-	  //* fabs(Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i]);
-	  (Set.epsilon[0].value[i] > 0.0 ?
-	   exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT)) : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)))
-	  * fabs(Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i]);
-      else {
-	eps0i = Set.epsilon[0].Return_Value(Set.epsilon[n].lambda[i]);
-	eps1i = Set.epsilon[1].Return_Value(Set.epsilon[n].lambda[i]);
-
-	Set.epsilon[n].diff += Set.epsilon[n].dlambda[i] *
-	  // Logic:  simple 1/2 cascade
-	  (eps0i > 0.0 ? exp(-eps0i/kBT)/(1.0 + exp(-eps0i/kBT)) : 1.0/(1.0 + exp(eps0i/kBT)))
-	  * pow(0.5, n) //* (exp(-eps1i/kBT)/(1.0 + exp(-eps1i/kBT)))
-	  * fabs(Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i]);
-      }
-      //if (n == 0) cout << i << "\t" << Set.epsilon[n].value[i] << "\t" << Set.epsilon[n].prev_value[i]
-      //	       << "\t" << Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i] << "\t";
-      //if (n == 0 && i == Set.epsilon[n].Npts - 1) cout << endl;
-	/*
-	fabs(kBT * ((Set.epsilon[n].value[i] < 24.0 * kBT ? log(1.0 + exp(-Set.epsilon[n].value[i]/kBT))
-		     : exp(-fabs(Set.epsilon[n].value[i])/kBT))
-		    - (fabs(Set.epsilon[n].prev_value[i]) < 24.0 * kBT ? log(1.0 + exp(-fabs(Set.epsilon[n].prev_value[i])/kBT))
-		       : exp(-fabs(Set.epsilon[n].prev_value[i])/kBT))));
-	*/
     }
-    //Set.epsilon[n].diff /= Set.epsilon[n].Npts;
-    Set.diff += Set.epsilon[n].diff;
-    //cout << n << " " << Set.epsilon[n].diff << "\t";
-  }
-  //Set.diff /= Set.ntypes;
-  //cout << endl;
+    // epsilon[0] is now fully iterated.
 
-  return;
-}
+    // Now do the remaining epsilons:
 
-void Iterate_and_Extrapolate_2CBG_TBAE (Root_Density_Set& Set, Vect<Root_Density_Set>& IterSet, Vect<Vect<Vect_DP> >& a_n_dlambda,
-					Vect<Vect<Vect_DP> >& fmin_dlambda, Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  //
-
-  int nfit = IterSet.size();
-
-  for (int ifit = 0; ifit < nfit; ++ifit) {
-    Iterate_2CBG_TBAE (Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-    IterSet[ifit] = Set;
-  }
-
-  // Now extrapolate each value to infinite nr of iterations:
-  Vect_DP density(nfit);
-  Vect_DP oneoverP(nfit);
-  DP deltalambda = 0.0;
-  for (int ifit = 0; ifit < nfit; ++ifit) oneoverP[ifit] = 1.0/(1.0 + ifit*ifit);
-  for (int n = 0; n < Set.ntypes; ++n)
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      for (int ifit = 0; ifit < nfit; ++ifit) density[ifit] = IterSet[ifit].epsilon[n].value[i];
-      polint (oneoverP, density, 0.0, Set.epsilon[n].value[i], deltalambda);
-    }
-
-  // Now iterate a few times to stabilize:
-  for (int iint = 0; iint < 2; ++iint) Iterate_2CBG_TBAE(Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-
-  return;
-}
-
-DP Refine_2CBG_Set (Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT, DP refine_fraction)
-{
-  // This function replaces Set by a new set with more points, where
-  // Tln(...) needs to be evaluated more precisely.
-
-  // The return value is the max of delta_tni found.
-
-  // First, calculate the needed Tln...
-  Vect<Vect_DP> Tln1plusemineps(Set.ntypes);
-  Vect_DP Tln1pluseminepsinfty(Set.ntypes);
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    Tln1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      Tln1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
-	kBT * (Set.epsilon[n].value[i] < 24.0 * kBT ? log(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) : exp(-Set.epsilon[n].value[i]/kBT))
-	: -Set.epsilon[n].value[i] + kBT * log (1.0 + exp(Set.epsilon[n].value[i]/kBT));
-    }
-    Tln1pluseminepsinfty[n] = kBT * (Set.epsilon[n].value_infty < 24.0 * kBT ?
-				     log(1.0 + exp(-Set.epsilon[n].value_infty/kBT)) : exp(-Set.epsilon[n].value_infty/kBT));
-  }
-
-  // Now find the achieved delta_tni
-  DP max_delta_tni_dlambda = 0.0;
-  DP max_delta_tni_dlambda_toplevel = 0.0;
-  DP sum_delta_tni_dlambda = 0.0;
-  //DP tni = 0.0;
-
-  Vect<Vect_DP> tni(Set.ntypes);
-  Vect<Vect_DP> tni_ex(Set.ntypes);
-
-  //Vect_DP delta_tni_dlambda(0.0, Set.Npts_total);
-  //int delta_tni_counter = 0;
-
-  DP measure_factor = 0.0;
-  DP eps0i = 0.0;
-  DP eps1i = 0.0;
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    tni[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-    tni_ex[n] = Vect_DP (0.0, Set.epsilon[n].Npts);  // extrapolation from adjacent points, to compare to obtained value
-
-    for (int i = 1; i < Set.epsilon[n].Npts - 1; ++i) {
-      //tni[n][i] = Tln1plusemineps[n][i] - Tln1pluseminepsinfty[n];
-      //tni_ex[n][i] = ((Tln1plusemineps[n][i-1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-      //	      + (Tln1plusemineps[n][i+1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-      ///(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-      if (n == 0) {
-	/*
-	tni[n][i] = Set.epsilon[n].value[i] - pow(Set.epsilon[n].lambda[i], 2.0) + mu + Omega + Tln1plusemineps[n][i] - Tln1pluseminepsinfty[n];
-	tni_ex[n][i] = ((Set.epsilon[n].value[i-1] - pow(Set.epsilon[n].lambda[i-1], 2.0) + mu + Omega +
-			 Tln1plusemineps[n][i-1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-			+ (Set.epsilon[n].value[i+1] - pow(Set.epsilon[n].lambda[i+1], 2.0) + mu + Omega +
-			   Tln1plusemineps[n][i+1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-	  /(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-	*/
-	/*
-	// Working version:
-	tni[n][i] = Tln1plusemineps[n][i];
-	tni_ex[n][i] = (Tln1plusemineps[n][i-1] * (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-			+ Tln1plusemineps[n][i+1] * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-	  /(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-	*/
-	// Measure based on delta f/delta epsilon:
-	//measure_factor = Set.epsilon[0].value[i] > 0.0 ?
-	//exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT)) : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT));
-	//measure_factor = pow(Set.epsilon[0].value[i] > 0.0 ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-	//	     : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)), 2.0);
-	measure_factor = (Set.epsilon[0].value[i] > 0.0 ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-			  : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
-      }
-      else {
-	/*
-	// This is the more natural choice, since delta(epsilon[n]) gets transferred more or less linearly to delta(epsilon[0]):
-	tni[n][i] = Set.epsilon[n].value[i] - Set.epsilon[n].value_infty + Tln1plusemineps[n][i] - Tln1pluseminepsinfty[n];
-	tni_ex[n][i] = ((Set.epsilon[n].value[i-1] - Set.epsilon[n].value_infty + Tln1plusemineps[n][i-1] - Tln1pluseminepsinfty[n])
-			* (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-			+ (Set.epsilon[n].value[i+1] - Set.epsilon[n].value_infty + Tln1plusemineps[n][i+1] - Tln1pluseminepsinfty[n])
-			* (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-	  /(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-	*/
-	/*
-	tni[n][i] = Tln1plusemineps[n][i] - Tln1pluseminepsinfty[n];
-	tni_ex[n][i] = ((Tln1plusemineps[n][i-1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-			+ (Tln1plusemineps[n][i+1] - Tln1pluseminepsinfty[n]) * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-	  /(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-	*/
-	// Measure based on delta f/delta epsilon:
-	eps0i = Set.epsilon[0].Return_Value(Set.epsilon[n].lambda[i]);
-	eps1i = Set.epsilon[1].Return_Value(Set.epsilon[n].lambda[i]);
-
-        measure_factor = (eps0i > 0.0 ? exp(-eps0i/kBT)/(1.0 + exp(-eps0i/kBT)) : 1.0/(1.0 + exp(eps0i/kBT)))
-	  // Logic:  simple 1/2 per level cascade down
-	  //* (exp(-eps1i/kBT)/(1.0 + exp(-eps1i/kBT)))
-	  * pow(0.5, n);
-
-      }
-
-      tni[n][i] = measure_factor * Set.epsilon[n].value[i];
-      tni_ex[n][i] = measure_factor * (Set.epsilon[n].value[i-1] *  (Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i])
-				       + Set.epsilon[n].value[i+1] * (Set.epsilon[n].lambda[i] - Set.epsilon[n].lambda[i-1]))
-	/(Set.epsilon[n].lambda[i+1] - Set.epsilon[n].lambda[i-1]);
-
-      max_delta_tni_dlambda = ABACUS::max(max_delta_tni_dlambda, fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i]);
-      if (n == Set.ntypes - 1)
-	max_delta_tni_dlambda_toplevel = ABACUS::max(max_delta_tni_dlambda_toplevel, fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i]);
-      sum_delta_tni_dlambda += fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i];
-    }
-  }
-
-  //cout << "Before sort: " << endl << delta_tni_dlambda << endl;
-  //delta_tni_dlambda.QuickSort();
-  //cout << "After sort: " << endl << delta_tni_dlambda << endl;
-  //max_delta_tni_dlambda = delta_tni_dlambda[int(delta_tni_dlambda.size() * (1.0 - refine_fraction))];
-  //cout << "max_delta_tni_dlambda = " << max_delta_tni_dlambda << endl;//"\tindex = " << int(delta_tni_dlambda.size() * (1.0 - refine_fraction)) << endl;
-
-  // We now determine the locations where we need to add points
-  Vect<Vect<bool> > need_new_point_around(Set.ntypes);
-  Vect<bool> need_to_extend_limit(false, Set.ntypes);
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    need_new_point_around[n] = Vect<bool> (false, Set.epsilon[n].Npts);
-
-    for (int i = 1; i < Set.epsilon[n].Npts - 1; ++i) {
-      if (fabs(tni[n][i] - tni_ex[n][i]) * Set.epsilon[n].dlambda[i] > (1.0 - refine_fraction) * max_delta_tni_dlambda) {
-	need_new_point_around[n][i] = true;
-	// Do also the symmetric ones...  Require need...[n][i] = need...[n][Npts - 1 - i]
-	need_new_point_around[n][Set.epsilon[n].Npts - 1 - i] = true;
-      }
-    }
-
-    // Check boundary values;  if too different from value_infty, extend limits
-    if (n == 0) {
-      //if (fabs(Tln1plusemineps[0][0]/Tln1pluseminepsinfty[n] - 1.0) * Set.epsilon[0].dlambda[0] > (1.0 - refine_fraction) * max_delta_tni_dlambda)
-      //if (fabs(Tln1plusemineps[0][0]/Tln1pluseminepsinfty[n] - 1.0) > (1.0 - refine_fraction) * max_delta_tni_dlambda)
-      //if (Tln1plusemineps[0][0] > (1.0 - refine_fraction) * max_delta_tni_dlambda)
-      // Used in working version:
-      //if (10000.0 * Tln1plusemineps[0][0] > max_delta_tni_dlambda/Set.epsilon[0].Npts)
-      //need_to_extend_limit[0] = true;
-      // Measure based on delta f/delta epsilon:
-      if (exp(-Set.epsilon[0].value[0]/kBT) > 0.001 * max_delta_tni_dlambda)
-	need_to_extend_limit[0] = true;
-    }
-    else
-      //if (fabs(Set.epsilon[n].value[0]/Set.epsilon[n].value_infty - 1.0) * Set.epsilon[n].dlambda[0] > (1.0 - refine_fraction) * max_delta_tni_dlambda)
-      //if (fabs(Set.epsilon[n].value[0]/Set.epsilon[n].value_infty - 1.0) > (1.0 - refine_fraction) * max_delta_tni_dlambda)
-      // Used in working version:
-      //if (10000.0 * fabs(Tln1plusemineps[n][0] - Tln1pluseminepsinfty[n]) > max_delta_tni_dlambda/Set.epsilon[n].Npts)
-      // Measure deviation from asymptote for 10th element, since we smoothly put the i < 10 ones to the asymptote when damping:
-      if (fabs(Set.epsilon[n].value[10] - Set.epsilon[n].value_infty) * Set.epsilon[0].dlambda[10] > max_delta_tni_dlambda)
-	need_to_extend_limit[n] = true;
-  }
-
-  // Check if we need to add a level
-  bool need_new_epsilon_n_function = false;
-
-  // We add new levels if the integral a_n * Tln1plusemineps at the highest level differs too much from
-  // the asymptotic value.  Since such integrals appear for each point of the epsilon[0] function, these
-  // errors should be compared to the individual delta_tni factors.
-  DP a_2_Tln_conv_0_integ = 0.0;
-  DP oneoverpi = 1.0/PI;
-  DP twoovernc = 2.0/((Set.ntypes - 1) * c_int);
-  int i0 = Set.epsilon[0].Npts/2;
-  for (int j = 0; j < Set.epsilon[Set.ntypes - 1].Npts; ++j)
-    a_2_Tln_conv_0_integ += (Tln1plusemineps[Set.ntypes - 1][j] - Tln1pluseminepsinfty[Set.ntypes - 1])
-      * oneoverpi * (atan(twoovernc * (Set.epsilon[0].lambda[i0] - Set.epsilon[Set.ntypes - 1].lambda[j] + 0.5 * Set.epsilon[Set.ntypes - 1].dlambda[j]))
-		     - atan(twoovernc * (Set.epsilon[0].lambda[i0] - Set.epsilon[Set.ntypes - 1].lambda[j] - 0.5 * Set.epsilon[Set.ntypes - 1].dlambda[j])));
-  // Add asymptotic parts:  not necessary, identically 0
-  //a_2_Tln_conv_0_integ += Tln1pluseminepsinfty[Set.ntypes - 1]
-  //* (1.0 - (atan((Set.epsilon[Set.ntypes - 1].lambdamax - Set.epsilon[0].lambda[i0])/(0.5 * (Set.ntypes - 1) * c_int))
-  //      + atan((Set.epsilon[Set.ntypes - 1].lambdamax + Set.epsilon[0].lambda[i0])/(0.5 * (Set.ntypes - 1) * c_int)))/PI);
-
-  // Compare to prediction for this integral based on value_infty, which is simply 0.
-  // Count this difference Set.ntypes times over, since it cascades down all levels
-  if (fabs(a_2_Tln_conv_0_integ) * Set.ntypes > max_delta_tni_dlambda) need_new_epsilon_n_function = true;
-
-  //cout << "Toplevel check:  fabs(integ)* ntypes = " << fabs(a_2_Tln_conv_0_integ) * Set.ntypes
-  //   << "\tmax tni = " << max_delta_tni_dlambda << "\tbool: " << (fabs(a_2_Tln_conv_0_integ) * Set.ntypes > max_delta_tni_dlambda) << endl;
-
-  // Additionally, if the highest level needs updating, we automatically add new functions:
-  for (int i = 0; i < Set.epsilon[Set.ntypes - 1].Npts; ++i)
-    if (need_new_point_around[Set.ntypes - 1][i] || need_to_extend_limit[Set.ntypes - 1]) need_new_epsilon_n_function = true;
-
-  // Finally, we also add functions if epsilon[n] itself is too different from epsilon[n](infty),
-  // based on delta f/delta epsilon_n measure:
-  //if (fabs(Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2] - Set.epsilon[Set.ntypes - 1].value_infty)
-      // Next expression: replaced by following one
-      //* exp(-(Set.epsilon[0].value[Set.epsilon[0].Npts/2] + Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2])/kBT)
-      ///(1.0 + exp(-Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2]/kBT))
-  //* exp(-Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2]/kBT)
-  //  /((1.0 + exp(Set.epsilon[0].value[Set.epsilon[0].Npts/2]/kBT)) * (1.0 + exp(-Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2]/kBT)))
-      // Logic:  simple 1/2 factor cascade
-      //* pow(0.5, Set.ntypes)
-      //* exp(-Set.epsilon[1].value[Set.epsilon[1].Npts/2]/kBT)
-      ///((1.0 + exp(Set.epsilon[0].value[Set.epsilon[0].Npts/2]/kBT)) * (1.0 + exp(-Set.epsilon[1].value[Set.epsilon[1].Npts/2]/kBT)))
-  //   > max_delta_tni_dlambda) need_new_epsilon_n_function = true;
-  /*
-  cout << "New level ? " << fabs(Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2] - Set.epsilon[Set.ntypes - 1].value_infty)
-       << "\t" << pow(0.5, Set.ntypes) << "\t" << fabs(Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2] - Set.epsilon[Set.ntypes - 1].value_infty)
-    * pow(0.5, Set.ntypes) << "\t" << max_delta_tni_dlambda
-       << "\t" <<  (fabs(Set.epsilon[Set.ntypes - 1].value[Set.epsilon[Set.ntypes - 1].Npts/2] - Set.epsilon[Set.ntypes - 1].value_infty)
-		    * pow(0.5, Set.ntypes)  > max_delta_tni_dlambda)
-       << "\t" << need_new_epsilon_n_function
-       << "\t"
-    //<< endl;
-    ;
-  */
-  //if (max_delta_tni_dlambda_toplevel > pow(0.5, Set.ntypes) *  max_delta_tni_dlambda && Set.ntypes < 100) need_new_epsilon_n_function = true;
-  //cout << "New level above top level ? " << max_delta_tni_dlambda_toplevel << "\t" << max_delta_tni_dlambda << "\t"
-  //   << (max_delta_tni_dlambda_toplevel > 0.01 * max_delta_tni_dlambda) << endl;
-
-  // Now insert the new points between existing points:
-  Set.Insert_new_points (need_new_point_around);
-
-  // Now extend the integration limits if needed:
-  Set.Extend_limits (need_to_extend_limit);
-
-  // If we add a level, we do it here:
-  if (need_new_epsilon_n_function) {
-    // Insert more than one function per cycle...
-    Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));
-    Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
-    Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
-    Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated;
-    Set.Insert_new_function(Asymptotic_2CBG_epsilon(Set.ntypes, Omega, kBT));  // CAREFUL !!  ntypes is already updated
-  }
-
-  //return(max_delta_tni_dlambda);
-  return(sum_delta_tni_dlambda);
-}
-
-DP Calculate_Gibbs_Free_Energy (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  // Computes the Gibbs free energy, assuming that epsilon[0] is symmetric.
-
-  // WORKING VERSION
-  DP sum_f = 0.0;
-  Vect_DP f(0.0, Set.epsilon[0].Npts);
-  DP sum_f_check = 0.0;
-  Vect_DP fcheck(0.0, Set.epsilon[0].Npts);
-  DP sum_g_check = 0.0;
-  Vect_DP gcheck(0.0, Set.epsilon[0].Npts);
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    f[i] = (Set.epsilon[0].value[i] > 0.0 ? kBT* log(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-	    : -Set.epsilon[0].value[i] + kBT * log(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
-    sum_f += Set.epsilon[0].dlambda[i] * f[i];
-    fcheck[i] = kBT * log(1.0 + exp(-(Set.epsilon[0].lambda[i] * Set.epsilon[0].lambda[i] - mu - Omega)/kBT));
-    sum_f_check += Set.epsilon[0].dlambda[i] * fcheck[i];
-    gcheck[i] = exp(-(Set.epsilon[0].lambda[i] * Set.epsilon[0].lambda[i])/kBT);
-    sum_g_check += Set.epsilon[0].dlambda[i] * gcheck[i];
-  }
-
-  // Now add alpha curvature terms:
-  for (int i = 1; i < Set.epsilon[0].Npts - 1; ++i)
-    sum_f += pow(Set.epsilon[0].dlambda[i], 3.0) * ((f[i+1] - f[i])/(Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i])
-						    - (f[i] - f[i-1])/(Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1]))
-      /(12.0 * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i-1]));
-
-  // Now add alpha curvature terms:
-  DP sum_gcorralphacheck = 0.0;
-  Vect_DP gcorr_alpha_check(0.0, Set.epsilon[0].Npts);
-  for (int i = 1; i < Set.epsilon[0].Npts - 1; ++i) {
-    //gcorr_alpha_check[i] = (pow(Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i], 3.0) - pow(Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1], 3.0))
-    //* ((gcheck[i+1] - gcheck[i]) * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1])
-    // - (gcheck[i] - gcheck[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i]))
-    ///(24.0 * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i])
-    //* (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1]));
-    gcorr_alpha_check[i] = (1.0/12.0) * pow(Set.epsilon[0].dlambda[i], 3.0)
-      * ((gcheck[i+1] - gcheck[i]) * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1])
-	 - (gcheck[i] - gcheck[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i]))
-      /((Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i-1]) * (Set.epsilon[0].lambda[i+1] - Set.epsilon[0].lambda[i])
-	* (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[i-1]));
-    sum_gcorralphacheck += gcorr_alpha_check[i];
-  }
-  //sum_f += sum_fcorr + sum_fcorralpha;
-  //cout << sum_f << "\t" << sum_fcorr << "\t" << sum_fcorralpha << "\tRelative corr: " << sum_fcorr/sum_f << "\t" << sum_fcorralpha/sum_f << endl;
-
-  /*
-  // Original version:
-  DP sum_f = 0.0;
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    sum_f += Set.epsilon[0].dlambda[i] * (Set.epsilon[0].value[i] > 0.0 ? kBT* log(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-					  : -Set.epsilon[0].value[i] + kBT * log(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
-    //cout << Set.epsilon[0].lambda[i] << "\t" << Set.epsilon[0].value[i] << "\t" << (Set.epsilon[0].value[i] > 0.0 ? kBT * log(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-    //: -Set.epsilon[0].value[i] + kBT * log(1.0 + exp(Set.epsilon[0].value[i]/kBT))) << endl;
-  }
-  */
-
-  // Testing:
-  int Npts_test = Set.epsilon[0].Npts;
-  DP lambdamax_test = Set.epsilon[0].lambdamax;
-  DP testsum = 0.0;
-  DP testgauss = 0.0;
-  DP lambda;
-  DP dlambda;
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    dlambda = lambdamax_test * 2.0/Npts_test;
-    testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
-    testgauss += exp(-lambda*lambda/kBT) * dlambda;
-  }
-
-  //cout << "Test1: " << Npts_test << "\t" << lambdamax_test << "\t" << -testsum/twoPI << "\t" << testgauss/sqrt(PI * kBT) << endl;
-  /*
-  cout << setprecision(16) << "Npts = " << Set.epsilon[0].Npts << "\tlambdamax = " << Set.epsilon[0].lambdamax
-       << "\tf = " << -sum_f/twoPI << "\tGaussian: " << sum_g_check/sqrt(PI * kBT) << "\ttestGauss = " << testgauss/sqrt(PI * kBT) << endl;
-  cout << setprecision(16) << Set.epsilon[0].dlambda.sum() << "\t" << Set.epsilon[0].lambdamax * 2.0 << endl;
-  cout << gcheck << endl << endl;
-  */
-  /*
-  // Output a test file:
-  ofstream outfile_test;
-  outfile_test.open("Test_Gauss.dat");
-  outfile_test.precision(16);
-  DP gauss1 = 0.0;
-  DP gauss1beta = 0.0;
-  DP gauss1alpha = 0.0;
-  DP gauss2 = 0.0;
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    gauss1 += Set.epsilon[0].dlambda[i] * gcheck[i];
-    //gauss1beta += Set.epsilon[0].dlambda[i] * gcheck[i] + gcorr_check[i];
-    gauss1alpha += Set.epsilon[0].dlambda[i] * gcheck[i] + gcorr_alpha_check[i];
-    gauss2 += dlambda * exp(-lambda*lambda/kBT);
-    outfile_test << Set.epsilon[0].lambda[i] << "\t" << Set.epsilon[0].dlambda[i] << "\t" << gcheck[i] << "\t" << gauss1 << "\t" << gauss1alpha
-		 << "\t" << lambda << "\t" << exp(-lambda*lambda/kBT) << "\t" << gauss2 << endl;
-  }
-  outfile_test.close();
-  char a;
-  //cout << "Done..." << endl;
-  //cin >> a;
-  */
-  /*
-  Npts_test = Set.epsilon[0].Npts * 2;
-  lambdamax_test = Set.epsilon[0].lambdamax;
-  testsum = 0.0;
-  testgauss = 0.0;
-
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    dlambda = lambdamax_test * 2.0/Npts_test;
-    testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
-    testgauss += exp(-lambda*lambda/kBT) * dlambda;
-  }
-
-  //cout << "Test2: " << Npts_test << "\t" << lambdamax_test << "\t" << -testsum/twoPI << "\t" << testgauss/sqrt(PI * kBT) << endl;
-
-  Npts_test = Set.epsilon[0].Npts * 2;
-  lambdamax_test = Set.epsilon[0].lambdamax;
-  testsum = 0.0;
-  testgauss = 0.0;
-
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    dlambda = lambdamax_test * 2.0/Npts_test;
-    testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
-    testgauss += exp(-lambda*lambda/kBT) * dlambda;
-  }
-
-  //cout << "Test3: " << Npts_test << "\t" << lambdamax_test << "\t" << -testsum/twoPI << "\t" << testgauss/sqrt(PI * kBT) << endl;
-
-  Npts_test = Set.epsilon[0].Npts * 2;
-  lambdamax_test = Set.epsilon[0].lambdamax;
-  testsum = 0.0;
-  testgauss = 0.0;
-
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    dlambda = lambdamax_test * (-Npts_test + 1.0 + 2*i + 2.0)/Npts_test - lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
-    testgauss += exp(-lambda*lambda/kBT) * dlambda;
-  }
-
-  //cout << "Test4: " << Npts_test << "\t" << lambdamax_test << "\t" << -testsum/twoPI << "\t" << testgauss/sqrt(PI * kBT) << endl;
-
-  Npts_test = Set.epsilon[0].Npts * 4;
-  lambdamax_test = Set.epsilon[0].lambdamax * 1;
-  testsum = 0.0;
-  testgauss = 0.0;
-
-  for (int i = 0; i < Npts_test; ++i) {
-    lambda = lambdamax_test * (-Npts_test + 1.0 + 2*i)/Npts_test;
-    dlambda = lambdamax_test * 2.0/Npts_test;
-    testsum += kBT * log(1.0 + exp(-(lambda*lambda - mu - Omega)/kBT)) * dlambda;
-    testgauss += exp(-lambda*lambda/kBT) * dlambda;
-  }
-
-  //cout << "Test5: " << Npts_test << "\t" << lambdamax_test << "\t" << -testsum/twoPI << "\t" << testgauss/sqrt(PI * kBT) << endl;
-  */
-
-  return(-sum_f/twoPI);
-}
-
-DP Calculate_dGibbs_dchempot (const Root_Density_Set& DSet, const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  // This calculates the derivative of the Gibbs free energy with respect to either of the two chemical potential,
-  // given the fundametal set Set for eps and DSet for either deps_du or deps_dOmega.
-
-  DP sum_f = 0.0;
-  Vect_DP f(0.0, Set.epsilon[0].Npts);
-
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    f[i] = DSet.epsilon[0].value[i] * (Set.epsilon[0].value[i] > 0.0 ? exp(-Set.epsilon[0].value[i]/kBT)/(1.0 + exp(-Set.epsilon[0].value[i]/kBT))
-				       : 1.0/(1.0 + exp(Set.epsilon[0].value[i]/kBT)));
-    sum_f += DSet.epsilon[0].dlambda[i] * f[i];
-  }
-
-  // Now add alpha curvature terms:
-  for (int i = 1; i < DSet.epsilon[0].Npts - 1; ++i)
-    sum_f += pow(DSet.epsilon[0].dlambda[i], 3.0) * ((f[i+1] - f[i])/(DSet.epsilon[0].lambda[i+1] - DSet.epsilon[0].lambda[i])
-						    - (f[i] - f[i-1])/(DSet.epsilon[0].lambda[i] - DSet.epsilon[0].lambda[i-1]))
-      /(12.0 * (DSet.epsilon[0].lambda[i+1] - DSet.epsilon[0].lambda[i-1]));
-
-  return(sum_f/twoPI);
-}
-
-//void Build_a_n_dlambda (Vect<Vect<Vect_DP> >& a_n_dlambda, const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-Vect<Vect<Vect_DP> > Build_a_n_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  DP oneoverpi = 1.0/PI;
-  DP oneoverc = 1.0/c_int;
-  //DP oneovertwoc = 0.5/c_int;
-  //DP oneovercsq = oneoverc * oneoverc;
-  DP twoovernc = 2.0/c_int;
-  //DP oneovernc;
-  //DP oneoverncsq;
-
-  // First define some useful functions:
-  //Vect<Vect_DP> midlambdaleft(Set.ntypes); // 0.5 * (lambda[n][j-1] + lambda[n][j])
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    //midlambdaleft[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-
-    for (int i = 1; i < Set.epsilon[n].Npts; ++i) {
-      ////midlambdaleft[n][i] = 0.5 * (Set.epsilon[n].lambda[i-1] + Set.epsilon[n].lambda[i]);
-      //midlambdaleft[n][i] = Set.epsilon[n].lambda[i] - 0.5 * Set.epsilon[n].dlambda[i]);
-    }
-  } // for (int n = 0...)
-
-  //Vect<Vect<Vect_DP> > a_n_dlambda(Set.epsilon[0].Npts);
-
-  Vect<Vect<Vect_DP> > a_n_dlambda(Set.epsilon[0].Npts);
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    a_n_dlambda[i] = Vect<Vect_DP>(Set.ntypes);
-    for (int n = 0; n < Set.ntypes; ++n) {
-      a_n_dlambda[i][n] = Vect_DP(0.0, Set.epsilon[n].Npts);
-    }
-  }
-
-  for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-    //a_n_dlambda[i] = Vect<Vect_DP> (Set.ntypes);
-
-    // Do n == 0 separately:
-    //a_n_dlambda[i][0] = Vect_DP (0.0, Set.epsilon[0].Npts);
-    /*
-    a_n_dlambda[i][0][0] = Set.epsilon[0].dlambda[0] * oneoverpi * c_int/(pow(Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[0], 2.0) + c_int*c_int);
-    a_n_dlambda[i][0][Set.epsilon[0].Npts - 1] = Set.epsilon[0].dlambda[Set.epsilon[0].Npts - 1] * oneoverpi
-      * c_int/(pow(Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[Set.epsilon[0].Npts - 1], 2.0) + c_int*c_int);
-    for (int j = 1; j < Set.epsilon[0].Npts - 1; ++j)
-      //a_n_dlambda[i][0][j] = oneoverpi * (atan((Set.epsilon[0].lambda[i] - midlambdaleft[0][j]) * oneoverc)
-      //			  - atan((Set.epsilon[0].lambda[i] - midlambdaleft[0][j+1]) * oneoverc));
-      a_n_dlambda[i][0][j] = oneoverpi * atan(oneovertwoc * (Set.epsilon[0].lambda[j+1] - Set.epsilon[0].lambda[j-1])
-					      /(1.0 + oneovercsq * (Set.epsilon[0].lambda[i] - midlambdaleft[0][j])
-						* (Set.epsilon[0].lambda[i] - midlambdaleft[0][j+1])));
-						*/
-    for (int j = 0; j < Set.epsilon[0].Npts; ++j)
-      // CANNOT USE THE NEXT FORM !!  a_n_dlambda is then possibly negative.
-      //a_n_dlambda[i][0][j] = oneoverpi * atan(oneoverc * Set.epsilon[0].dlambda[j]
-      //				      /(1.0 + oneovercsq * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] - 0.5 * Set.epsilon[0].dlambda[j])
-      //					* (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] + 0.5 * Set.epsilon[0].dlambda[j])));
-      a_n_dlambda[i][0][j] = oneoverpi * (atan(oneoverc * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] + 0.5 * Set.epsilon[0].dlambda[j]))
-					  - atan(oneoverc * (Set.epsilon[0].lambda[i] - Set.epsilon[0].lambda[j] - 0.5 * Set.epsilon[0].dlambda[j])));
-
-    //if (fabs(a_n_dlambda[i][0].sum() - 1.0) > 1.0e-12) cout << "Error in sum a_n for n = " << 0 << " i = " << i << "\tsum = "
-    //						    << a_n_dlambda[i][0].sum() << endl << endl << a_n_dlambda[i][0] << endl << endl;
-
-    // Now do n > 0:
     for (int n = 1; n < Set.ntypes; ++n) {
-      //a_n_dlambda[i][n] = Vect_DP (0.0, Set.epsilon[n].Npts);
 
-      //a_n_dlambda[i][n][0] = Set.epsilon[n].dlambda[0] * 0.5 * n * oneoverpi * c_int
-      ///(pow(Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[0], 2.0) + 0.25 * n * n * c_int*c_int);
-      //a_n_dlambda[i][n][Set.epsilon[n].Npts - 1] = Set.epsilon[n].dlambda[Set.epsilon[n].Npts - 1] * 0.5 * n * oneoverpi * c_int
-      //  /(pow(Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[Set.epsilon[n].Npts - 1], 2.0) + 0.25 * n * n * c_int*c_int);
-      twoovernc = 2.0/(n * c_int);
-      //oneovernc = 1.0/(n * c_int);
-      //oneoverncsq = oneovernc * oneovernc;
-      //for (int j = 1; j < Set.epsilon[n].Npts - 1; ++j) {
-      for (int j = 0; j < Set.epsilon[n].Npts; ++j) {
-	//a_n_dlambda[i][n][j] = oneoverpi * (atan(twoovernc * (Set.epsilon[0].lambda[i] - midlambdaleft[n][j]))
-	//		    - atan(twoovernc * (Set.epsilon[0].lambda[i] - midlambdaleft[n][j+1])));
-	// CANNOT USE THE NEXT FORM !!  a_n_dlambda is then possibly negative.
-	//a_n_dlambda[i][n][j] = oneoverpi * atan(oneovernc * (Set.epsilon[n].lambda[j+1] - Set.epsilon[n].lambda[j-1])
-	//				/(1.0 + oneoverncsq * (2.0 * Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j-1] - Set.epsilon[n].lambda[j])
-	//				  * (2.0 * Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j+1] - Set.epsilon[n].lambda[j])));
-	a_n_dlambda[i][n][j] = oneoverpi * (atan(twoovernc * (Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j] + 0.5 * Set.epsilon[n].dlambda[j]))
-				    - atan(twoovernc * (Set.epsilon[0].lambda[i] - Set.epsilon[n].lambda[j] - 0.5 * Set.epsilon[n].dlambda[j])));
+      Vect_DP f_depsover_conv_min (0.0, Set.epsilon[n].Npts);  // 'down' convolution
+      Vect_DP f_depsover_conv_plus (0.0, Set.epsilon[n].Npts); // 'up' convolution
+
+      Vect_DP fmin(0.0, Set.epsilon[n-1].Npts);
+      Vect_DP fplus(0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
+
+      for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
+	f_depsover_conv_min[i] = 0.0;
+	f_depsover_conv_plus[i] = 0.0;
+
+	// 'down' convolutions
+	if (n == 1) {
+
+	  for (int j = 0; j < Set.epsilon[0].Npts; ++j) {
+	    fmin[j] = depsover1plusepluseps[0][j]
+	      * fmin_dlambda[n][i][j];
+	    f_depsover_conv_min[i] -= fmin[j]; // Careful ! - sign here
+	  }
+	} // if (n == 1)
+
+	else { // if (n != 1)
+
+	  for (int j = 0; j < Set.epsilon[n - 1].Npts; ++j) {
+	    fmin[j] = (depsover1plusemineps[n-1][j] - depsover1pluseminepsinfty[n-1])
+	      * fmin_dlambda[n][i][j];
+	    f_depsover_conv_min[i] += fmin[j];
+	  }
+	} // if (n != 1)
+
+
+	// 'up' convolutions
+	if (n < Set.ntypes - 1) {
+
+	  for (int j = 0; j < Set.epsilon[n+1].Npts; ++j) {
+	    fplus[j] = (depsover1plusemineps[n+1][j] - depsover1pluseminepsinfty[n+1])
+	      * fplus_dlambda[n][i][j];
+	    f_depsover_conv_plus[i] += fplus[j];
+	  }
+
+	} // if (n < Set.ntypes - 1...
+
+	// otherwise, we put the function to 1/2 times depsover... of epsilon[n+1] at infinity, minus the same:
+	else f_depsover_conv_plus[i] = 0.0;
+
+	// Do some damping:
+	DSet.epsilon[n].value[i] = 0.1 * DSet.epsilon[n].prev_value[i]
+	  + 0.9 * (DSet.epsilon[n].value_infty + f_depsover_conv_min[i] + f_depsover_conv_plus[i]);
+	// Force boundary values to asymptotes:  force boundary 10 points on each side
+	if (i < 10)
+	  DSet.epsilon[n].value[i] = (1.0 - 0.1 * i) * DSet.epsilon[n].value_infty + 0.1 * i * DSet.epsilon[n].value[i];
+	if (i > DSet.epsilon[n].Npts - 11)
+	  DSet.epsilon[n].value[i] = (1.0 - 0.1 * (DSet.epsilon[n].Npts-1 - i)) * DSet.epsilon[n].value_infty
+	    + 0.1 * (DSet.epsilon[n].Npts-1 - i) * DSet.epsilon[n].value[i];
+
+      } // for (int i = 0...
+
+    } // for (int n = 1...
+
+    // All functions have now been iterated.
+
+    // Now calculate diff:
+
+    DSet.diff = 0.0;
+
+    for (int n = 0; n < DSet.ntypes; ++n) {
+      DSet.epsilon[n].diff = 0.0;
+      for (int i = 0; i < DSet.epsilon[n].Npts; ++i) {
+	DSet.epsilon[n].diff += DSet.epsilon[n].dlambda[i] *
+	  fabs((DSet.epsilon[n].value[i] - DSet.epsilon[n].prev_value[i]) * depsover1plusepluseps[n][i]);
       }
-    } // for (int n
-  } // for (int i
-
-  return(a_n_dlambda);
-}
-
-//void  Build_fmin_dlambda (Vect<Vect<Vect_DP> >& fmin_dlambda, const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-Vect<Vect<Vect_DP> >  Build_fmin_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  DP oneoverpi = 1.0/PI;
-  //DP oneoverc = 1.0/c_int;
-  //DP twoovernc = 2.0/c_int;
-  DP pioverc = PI/c_int;
-  DP twopioverc = 2.0*PI/c_int;
-  DP piovertwoc = 0.5 * pioverc;
-
-  Vect<Vect<Vect_DP> > fmin_dlambda(Set.ntypes);
-  for (int n = 0; n < Set.ntypes; ++n) {
-    fmin_dlambda[n] = Vect<Vect_DP> (Set.epsilon[n].Npts);
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i)
-      fmin_dlambda[n][i] = Vect_DP (0.0, Set.epsilon[ABACUS::max(n-1, 0)].Npts);
-  }
-
-  for (int n = 1; n < Set.ntypes; ++n) {
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      /*
-      fmin_dlambda[n][i][0] = (Set.epsilon[n-1].dlambda[0]/c_int)
-	* (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0] > 0.0 ?
-	   exp(-pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0]))
-	   /(1.0 + exp(-twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0])))
-	   :
-	   exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0]))
-	   /(1.0 + exp(twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0]))));
-      //cosh(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[0])));
-
-      fmin_dlambda[n][i][Set.epsilon[n-1].Npts - 1] = (Set.epsilon[n-1].dlambda[Set.epsilon[n-1].Npts - 1]/c_int)
-	* (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1] > 0.0 ?
-	   exp(-pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1]))
-	   /(1.0 + exp(-twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1])))
-	   :
-	   exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1]))
-	   /(1.0 + exp(twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1]))));
-      ///(c_int * cosh(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[Set.epsilon[n-1].Npts - 1])));
-      for (int j = 1; j < Set.epsilon[n-1].Npts - 1; ++j) {
-	if (Set.epsilon[n].lambda[i] >= Set.epsilon[n-1].lambda[j])
-	  fmin_dlambda[n][i][j] = oneoverpi * atan((exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j+1])))
-						    - exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j-1]))))
-						   /(1.0 + exp(pioverc * (-2.0 * Set.epsilon[n].lambda[i] + Set.epsilon[n-1].lambda[j]
-									  + 0.5 * (Set.epsilon[n-1].lambda[j-1] + Set.epsilon[n-1].lambda[j+1])))));
-	else
-	  fmin_dlambda[n][i][j] = oneoverpi * atan((exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j-1])))
-						    - exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j+1]))))
-						   /(1.0 + exp(pioverc * (2.0 * Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j]
-									  - 0.5 * (Set.epsilon[n-1].lambda[j-1] + Set.epsilon[n-1].lambda[j+1])))));
-      }
-
-
-      // Boundary points:  to ensure stability, make sure that \int_{-\infty}^{\infty} fmin_dlambda(., ., lambda) = 1/2.
-      // For j == 0, assume lambda[j-1] = -infinity:
-      int j = 0;
-      fmin_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j+1]))));
-      //fmin_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (-Set.epsilon[n].lambda[i] + Set.epsilon[n-1].lambda[j] + 0.5 * Set.epsilon[n-1].dlambda[j])));
-      // For j == Npts - 1, assume lambda[Npts] = infinity:
-      j = Set.epsilon[n-1].Npts - 1;
-      fmin_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n-1].lambda[j] + Set.epsilon[n-1].lambda[j-1]))));
-      //fmin_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j] - 0.5 * Set.epsilon[n-1].dlambda[j])));
-      */
-
-      for (int j = 0; j < Set.epsilon[n-1].Npts; ++j)
-	fmin_dlambda[n][i][j] = oneoverpi * atan(exp(-pioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j]))
-						 * 2.0 * sinh(piovertwoc * Set.epsilon[n-1].dlambda[j])
-						 /(1.0 + exp(-twopioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n-1].lambda[j]))));
-
-    } // for i
-  } // for n
-
-  return(fmin_dlambda);
-}
-
-//void Build_fplus_dlambda (Vect<Vect<Vect_DP> >& fplus_dlambda, const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-Vect<Vect<Vect_DP> > Build_fplus_dlambda (const Root_Density_Set& Set, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  DP oneoverpi = 1.0/PI;
-  //DP oneoverc = 1.0/c_int;
-  //DP twoovernc = 2.0/c_int;
-  DP pioverc = PI/c_int;
-  DP twopioverc = 2.0*PI/c_int;
-  DP piovertwoc = 0.5 * pioverc;
-
-  Vect<Vect<Vect_DP> > fplus_dlambda(Set.ntypes);
-  for (int n = 0; n < Set.ntypes; ++n) {
-    fplus_dlambda[n] = Vect<Vect_DP> (Set.epsilon[n].Npts);
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i)
-      fplus_dlambda[n][i] = Vect_DP (0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
-  }
-
-  for (int n = 0; n < Set.ntypes - 1; ++n) {
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      /*
-      fplus_dlambda[n][i][0] = (Set.epsilon[n+1].dlambda[0]/c_int)
-	* (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0] > 0.0 ?
-	   exp(-pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0]))
-	   /(1.0 + exp(-twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0])))
-	   :
-	   exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0]))
-	   /(1.0 + exp(twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0]))));
-      //cosh(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[0])));
-      fplus_dlambda[n][i][Set.epsilon[n+1].Npts - 1] = (Set.epsilon[n+1].dlambda[Set.epsilon[n+1].Npts - 1]/c_int)
-	* (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1] > 0.0 ?
-	   exp(-pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1]))
-	   /(1.0 + exp(-twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1])))
-	   :
-	   exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1]))
-	   /(1.0 + exp(twopioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1]))));
-      ///(c_int * cosh(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[Set.epsilon[n+1].Npts - 1])));
-      for (int j = 1; j < Set.epsilon[n+1].Npts - 1; ++j) {
-	if (Set.epsilon[n].lambda[i] >= Set.epsilon[n+1].lambda[j])
-	  fplus_dlambda[n][i][j] = oneoverpi * atan((exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j+1])))
-						     - exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j-1]))))
-						    /(1.0 + exp(pioverc * (-2.0 * Set.epsilon[n].lambda[i] + Set.epsilon[n+1].lambda[j]
-									   + 0.5 * (Set.epsilon[n+1].lambda[j-1] + Set.epsilon[n+1].lambda[j+1])))));
-	else
-	  fplus_dlambda[n][i][j] = oneoverpi * atan((exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j-1])))
-						     - exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j+1]))))
-						    /(1.0 + exp(pioverc * (2.0 * Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j]
-									   - 0.5 * (Set.epsilon[n+1].lambda[j-1] + Set.epsilon[n+1].lambda[j+1])))));
-      }
-
-      // Boundary points:  to ensure stability, make sure that \int_{-\infty}^{\infty} fmin_dlambda(., ., lambda) = 1/2.
-      // For j == 0, assume lambda[j-1] = -infinity:
-      int j = 0;
-      fplus_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (-Set.epsilon[n].lambda[i] + 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j+1]))));
-      //fplus_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (-Set.epsilon[n].lambda[i] + Set.epsilon[n+1].lambda[j] + 0.5 * Set.epsilon[n+1].dlambda[j])));
-      // For j == Npts - 1, assume lambda[Npts] = infinity:
-      j = Set.epsilon[n+1].Npts - 1;
-      fplus_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (Set.epsilon[n].lambda[i] - 0.5 * (Set.epsilon[n+1].lambda[j] + Set.epsilon[n+1].lambda[j-1]))));
-      //fplus_dlambda[n][i][j] = oneoverpi * atan(exp(pioverc * (Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j] - 0.5 * Set.epsilon[n+1].dlambda[j])));
-      */
-      for (int j = 0; j < Set.epsilon[n+1].Npts; ++j)
-	fplus_dlambda[n][i][j] = oneoverpi * atan(exp(-pioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j]))
-						 * 2.0 * sinh(piovertwoc * Set.epsilon[n+1].dlambda[j])
-						 /(1.0 + exp(-twopioverc * fabs(Set.epsilon[n].lambda[i] - Set.epsilon[n+1].lambda[j]))));
+      DSet.diff += DSet.epsilon[n].diff;
     }
+
+    return;
   }
 
-  return(fplus_dlambda);
-}
+  Root_Density_Set Solve_2CBG_deps_dchempot (int option, Root_Density_Set& TBA_Set, DP c_int, DP mu, DP Omega, DP kBT,
+					     int Max_Secs, ofstream& LOG_outfile, bool Save_data)
+  {
+    // This solves the 2CBG deps/dmu (option == 0) or deps/dOmega (option == 1).
 
-//void Solve_2CBG_TBAE_via_refinements (Root_Density_Set& TBA_Set, DP c_int, DP mu, DP Omega, DP kBT, int Max_Secs, ofstream& LOG_outfile)
-Root_Density_Set Solve_2CBG_TBAE_via_refinements (DP c_int, DP mu, DP Omega, DP kBT, int Max_Secs, ofstream& LOG_outfile, bool Save_data)
-{
-  // This solves the 2CBG TBAE as best as possible given the time constraint.
+    clock_t StartTime, StopTime;
 
-  clock_t StartTime = clock();
+    int Max_CPU_ticks = 98 * (Max_Secs - 10) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
 
-  //int maxcycles = 5;
+    // Set basic precision needed:
+    DP running_prec = TBA_Set.diff;
 
-  int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
+    // We start by converging simplified sets, with fewer points:
 
-  // Set basic precision needed:
-  DP running_prec = 1.0;
+    Root_Density_Set TBA_Set_comp = TBA_Set.Return_Compressed_and_Matched_Set(1.0);
 
-  DP refine_fraction = 0.5; // value fraction of points to be refined
+    Root_Density_Set DSet = TBA_Set;  // this will be the final function return
+    Root_Density_Set DSet_comp = TBA_Set_comp;
 
-  // Set basic number of types needed:
-  int ntypes_needed = int(kBT * log(kBT/1.0e-14)/Omega);
-  int ntypes = ABACUS::max(ntypes_needed, 1);
-  ntypes = ABACUS::min(ntypes, 10);
+    // Set the asymptotics of the TBA_fns:
+    Set_2CBG_deps_dchempot_Asymptotics (option, TBA_Set_comp, DSet_comp, mu, Omega, kBT);
+    Set_2CBG_deps_dchempot_Asymptotics (option, TBA_Set, DSet, mu, Omega, kBT);
 
-  //cout << "ntypes = " << ntypes << endl;
+    // Now, start by 'converging' the comp set:
 
-  if (Save_data)
-    if (ntypes >= 10) LOG_outfile << "WARNING:  ntypes needs to be quite high for c_int = " << c_int << " mu = " << mu << " Omega = " << Omega
-				  << " kBT = " << kBT << ".  Set to " << ntypes << ", ideally needed: "
-				  << ntypes_needed << ".  Accuracy might be incorrectly evaluated." << endl;
+    // Initiate the functions:
+    Initiate_2CBG_deps_dchempot_Functions (DSet_comp);
 
-  DP lambdamax = 10.0 + sqrt(ABACUS::max(1.0, kBT * 36.0 + mu + Omega));  // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
-  int Npts = 50;
-  //int Npts = ABACUS::max(200, int(2 * lambdamax)); // For stability, we need *all* dlambda < 2.  Choose max(dlambda) = 1 here for safety.
-  //if (Npts < int(2 * lambdamax))
-  //LOG_outfile << "WARNING:  dlambda = " << 2.0 * lambdamax/Npts << " might be too large to ensure numerical stability of iterations." << endl;
-  Vect_INT Npts_init(Npts, ntypes);
-  //Vect_DP lambdamax_init(c_int * 10.0, ntypes);  // such that exp(-pi *lambdamax/c) <~ machine_eps
-  // We let the number of points fall off with increasing level n:
-  //for (int n = 1; n < ntypes; ++n) Npts_init[n] = ABACUS::max(100, 2*int(Npts/(n+1.0)));  // DON'T !!  Unstable.
-  if (Save_data) LOG_outfile << "Npts (basic) set to " << Npts_init << endl;
+    Vect<Vect<Vect_DP> > a_n_dlambda_comp = Build_a_n_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
+    Vect<Vect<Vect_DP> > fmin_dlambda_comp = Build_fmin_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
+    Vect<Vect<Vect_DP> > fplus_dlambda_comp = Build_fplus_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
 
-  Vect_DP lambdamax_init(lambdamax, ntypes);  // such that exp(-pi *lambdamax/c) <~ machine_eps
-  Npts_init[0] = 1 * Npts;  // give more precision to lowest level
-  lambdamax_init[0] = 10.0 + sqrt(ABACUS::max(1.0, kBT * 36.0 + mu + Omega));  // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
-  Root_Density_Set TBA_Set (ntypes, Npts_init, lambdamax_init);
-  //TBA_Set = Root_Density_Set(ntypes, Npts, lambdamax);
+    int CPU_ticks = 0;
 
-  // Set the asymptotics of the TBA_fns:
+    int niter_comp = 0;
 
-  Set_2CBG_Asymptotics (TBA_Set, mu, Omega, kBT);
+    do {
 
-  // Initiate the functions:
+      StartTime = clock();
+      Iterate_2CBG_deps_dchempot (option, DSet_comp, TBA_Set_comp,
+				  a_n_dlambda_comp, fmin_dlambda_comp, fplus_dlambda_comp, c_int, mu, Omega, kBT);
+      niter_comp++;
+      StopTime = clock();
+      CPU_ticks += StopTime - StartTime;
 
-  Initiate_2CBG_TBA_Functions (TBA_Set, mu, Omega);
+    } while (CPU_ticks < Max_CPU_ticks/2 && (niter_comp < 100 || DSet_comp.diff > running_prec));
+    // use at most half the time, keep rest for later.
 
-  clock_t StopTime = clock();
-  clock_t Cycle_StartTime, Cycle_StopTime;
-
-  int CPU_ticks = StopTime - StartTime;
-
-  int ncycles = 0;
-  int niter_tot = 0;
-
-  do {
-
-    StartTime = clock();
-
-    Cycle_StartTime = clock();
-
-    //TBA_Set.Save("Test1.dat");
-
-    // The running precision is an estimate of the accuracy of the free energy integral.
-    // Refine... returns sum_delta_tni_dlambda, so running prec is estimated as...
-    running_prec = Refine_2CBG_Set (TBA_Set, c_int, mu, Omega, kBT, refine_fraction);
-
-    //TBA_Set.Save("Test2.dat");
-
-    //cout << "Waiting..." << endl;
-    //char a;    cin >> a;
+    DSet.Match_Densities(DSet_comp);
 
     Vect<Vect<Vect_DP> > a_n_dlambda = Build_a_n_dlambda (TBA_Set, c_int, mu, Omega, kBT);
     Vect<Vect<Vect_DP> > fmin_dlambda = Build_fmin_dlambda (TBA_Set, c_int, mu, Omega, kBT);
     Vect<Vect<Vect_DP> > fplus_dlambda = Build_fplus_dlambda (TBA_Set, c_int, mu, Omega, kBT);
 
-    StopTime = clock();
-
-    CPU_ticks += StopTime - StartTime;
-
     int niter = 0;
-    int niter_max = ncycles == 0 ? 300 : 300;
-
-    // For extrapolations:
-    Vect<Root_Density_Set> IterSet(4);
 
     do {
-
-      //TBA_Set.Save("Test2.dat");
-
       StartTime = clock();
-      //if (ncycles <= 100) {
-      //if (ncycles <= 0) {
-      if (niter <= 10 || niter > 100) {
-	//Iterate_2CBG_TBAE (TBA_Set, c_int, mu, Omega, kBT);
-	Iterate_2CBG_TBAE (TBA_Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-	//cout << "iteration " << niter << "\tepsilon[0](0) = " << TBA_Set.epsilon[0].value[TBA_Set.epsilon[0].Npts/2]
-	// << "\tdelta epsilon[0](0) = " << TBA_Set.epsilon[0].value[TBA_Set.epsilon[0].Npts/2] - TBA_Set.epsilon[0].prev_value[TBA_Set.epsilon[0].Npts/2]
-	// << "\tdiff[0] = " << TBA_Set.epsilon[0].diff << "\tdiff[1] = " << TBA_Set.epsilon[1].diff
-	// << "\tdiff[ntypes - 1] = " << TBA_Set.epsilon[TBA_Set.ntypes - 1].diff << "\tdiff = " << TBA_Set.diff << endl;
-	niter++;
-      }
-      else {
-	Iterate_and_Extrapolate_2CBG_TBAE (TBA_Set, IterSet, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-	//cout << "extrap it " << niter << "\tepsilon[0](0) = " << TBA_Set.epsilon[0].value[TBA_Set.epsilon[0].Npts/2]
-	//   << "\tdelta epsilon[0](0) = "
-	//   << TBA_Set.epsilon[0].value[TBA_Set.epsilon[0].Npts/2] - TBA_Set.epsilon[0].prev_value[TBA_Set.epsilon[0].Npts/2]
-	//   << "\tdiff = " << TBA_Set.diff << endl;
-	niter += 6;
-      }
-
-      //TBA_Set.Save("Test3.dat");
-      //cout << "Niter = " << niter << "\tdiff = " << TBA_Set.diff << "\trunning_prec " << running_prec;
-      //for (int n = 0; n < TBA_Set.ntypes; ++n) cout << "\t" << TBA_Set.epsilon[n].diff;
-      //cout << endl;
-      //if (ncycles >= 0) { char b; cin >> b;}
-
+      Iterate_2CBG_deps_dchempot (option, DSet, TBA_Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
+      niter++;
       StopTime = clock();
       CPU_ticks += StopTime - StartTime;
-      //} while (niter < 20 || niter < niter_max && TBA_Set.diff > running_prec && CPU_ticks < Max_CPU_ticks);
-      //} while (niter < 20 || niter < niter_max && TBA_Set.diff > 1.0e-10 * TBA_Set.Npts_total && CPU_ticks < Max_CPU_ticks);
-    } while (niter < 5 || niter < niter_max && TBA_Set.diff > 0.1 * running_prec && CPU_ticks < Max_CPU_ticks);
+    } while (CPU_ticks < Max_CPU_ticks && DSet_comp.diff > 1.0e-4 * running_prec);
+
+
+    // We're done !!
 
     if (Save_data) {
-      LOG_outfile << "ncycles = " << ncycles << "\trunning_prec = " << running_prec << "\t niter = " << niter
-		  << "\tntypes = " << TBA_Set.ntypes << "\tNpts ";
-      for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].Npts << " ";
-      //for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].value_infty << " ";
-      LOG_outfile << "\tNpts_total = " << TBA_Set.Npts_total << endl
-		  << "\tdiff = " << TBA_Set.diff << "\tGSE = " << Calculate_Gibbs_Free_Energy (TBA_Set, c_int, mu, Omega, kBT) << endl;
+      LOG_outfile << "deps_dchempot, option == " << option << endl;
+      LOG_outfile << "c_int " << c_int << "\tmu " << mu << "\tOmega " << Omega << "\tkBT " << kBT
+		  << "\trunning_prec " << running_prec << " niter_comp " << niter_comp << " niter_full " << niter
+		  << "\tntypes " << DSet.ntypes << "\tdiff " << DSet.diff << endl;
     }
 
-    ncycles++;
-    niter_tot += niter;
+    return(DSet);
+  }
 
-    if (niter == niter_max) {
-      if (Save_data) LOG_outfile << "Not able to improve functions enough after " << niter_max << " iterations." << endl;
-      //break;
+
+  TBA_Data_2CBG Solve_2CBG_TBAE_via_refinements (DP c_int, DP mu, DP Omega, DP kBT, int Max_Secs, bool Save_data)
+  {
+    // This solves the 2CBG TBAE as best as possible given the time constraint.
+
+    stringstream TBA_stringstream;
+    string TBA_string;
+    stringstream Dmu_stringstream;
+    string Dmu_string;
+    stringstream DOmega_stringstream;
+    string DOmega_string;
+    stringstream LOG_stringstream;
+    string LOG_string;
+    stringstream GFE_stringstream;
+    string GFE_string;
+
+    TBA_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega
+		     << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
+    Dmu_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega
+		     << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dmu";
+    DOmega_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega
+			<< "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dOm";
+    LOG_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega
+		     << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".log";
+    GFE_stringstream << "GFE_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega
+		     << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
+
+    TBA_string = TBA_stringstream.str();
+    const char* TBA_Cstr = TBA_string.c_str();
+
+    Dmu_string = Dmu_stringstream.str();
+    const char* Dmu_Cstr = Dmu_string.c_str();
+
+    DOmega_string = DOmega_stringstream.str();
+    const char* DOmega_Cstr = DOmega_string.c_str();
+
+    LOG_string = LOG_stringstream.str();
+    const char* LOG_Cstr = LOG_string.c_str();
+
+    GFE_string = GFE_stringstream.str();
+    const char* GFE_Cstr = GFE_string.c_str();
+
+    ofstream LOG_outfile;
+    ofstream GFE_outfile;
+
+    if (Save_data) {
+      LOG_outfile.open(LOG_Cstr);
+      LOG_outfile.precision(6);
+
+      GFE_outfile.open(GFE_Cstr);
+      GFE_outfile.precision(16);
     }
 
-    Cycle_StopTime = clock();
-
-  } while (CPU_ticks < Max_CPU_ticks - 2.0*(Cycle_StopTime - Cycle_StartTime));
-  // Allow a new cycle only if there is time, assuming new cycle time < 2* last one
-
-  if (Save_data) {
-    LOG_outfile << "c_int " << c_int << "\tmu " << mu << "\tOmega " << Omega << "\tkBT " << kBT
-		<< "\tncycles = " << ncycles << "\trunning_prec = " << running_prec << "\t niter_tot = " << niter_tot
-		<< "\tntypes = " << TBA_Set.ntypes << "\tdiff = " << TBA_Set.diff << endl << "\tNpts ";
-    for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].Npts << " ";
-    //for (int n = 0; n < TBA_Set.ntypes; ++n) LOG_outfile << TBA_Set.epsilon[n].value_infty << " ";
-    LOG_outfile << "\tNpts_total = " << TBA_Set.Npts_total << endl;
-  }
-
-  return(TBA_Set);
-  //return;
-}
-
-
-// Iterative procedures for deps/dmu or /dOmega:
-void Iterate_2CBG_deps_dchempot (int option, Root_Density_Set& DSet, const Root_Density_Set& Set,
-				 Vect<Vect<Vect_DP> >& a_n_dlambda, Vect<Vect<Vect_DP> >& fmin_dlambda,
-				 Vect<Vect<Vect_DP> >& fplus_dlambda, DP c_int, DP mu, DP Omega, DP kBT)
-{
-  // Produces a new Root_Density_Set for depsilon/dmu (option == 0) or depsilon/dOmega (option == 1) from a previous iteration.
-  // Does NOT add types or change Npts, lambdamax values.
-
-  //DP oneoverc = 1.0/c_int;
-  //DP twoovernc = 2.0/c_int;
-
-  // First define some useful functions:
-  Vect<Vect_DP> depsover1plusemineps(Set.ntypes);
-  Vect<Vect_DP> depsover1plusepluseps(Set.ntypes);
-  Vect_DP depsover1pluseminepsinfty(Set.ntypes);
-  Vect_DP depsover1pluseplusepsinfty(Set.ntypes);
-
-  for (int n = 0; n < Set.ntypes; ++n) {
-
-    depsover1plusemineps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-    depsover1plusepluseps[n] = Vect_DP (0.0, Set.epsilon[n].Npts);
-
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      depsover1plusemineps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
-	DSet.epsilon[n].value[i]/(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) :
-	DSet.epsilon[n].value[i] * exp(Set.epsilon[n].value[i]/kBT)/(1.0 + exp(Set.epsilon[n].value[i]/kBT));
-      depsover1plusepluseps[n][i] = Set.epsilon[n].value[i] > 0.0 ?
-	DSet.epsilon[n].value[i] * exp(-Set.epsilon[n].value[i]/kBT)/(1.0 + exp(-Set.epsilon[n].value[i]/kBT)) :
-	DSet.epsilon[n].value[i]/(1.0 + exp(Set.epsilon[n].value[i]/kBT));
-
-      // Keep previous rapidities:
-      DSet.epsilon[n].prev_value[i] = DSet.epsilon[n].value[i];
+    Root_Density_Set TBA_Set = Solve_2CBG_TBAE_via_refinements (c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
 
+    if (Save_data) {
+      // Output the functions:
+      TBA_Set.Save(TBA_Cstr);
     }
-    depsover1pluseminepsinfty[n] = DSet.epsilon[n].value_infty/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
-    depsover1pluseplusepsinfty[n] = DSet.epsilon[n].value_infty * exp(-Set.epsilon[n].value_infty/kBT)/(1.0 + exp(-Set.epsilon[n].value_infty/kBT));
-  }
 
-  //cout << "Here 0" << endl;
+    Root_Density_Set DSet_dmu =
+      Solve_2CBG_deps_dchempot (0, TBA_Set, c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
+    if (Save_data) DSet_dmu.Save(Dmu_Cstr);
 
-  // Now do the necessary convolutions for epsilon == epsilon[0].
-  // For each value of lambda, do the convolutions:
-  // Careful:  the lambda's used for lambda (index i) are those of epsilon[0], the lambda' (index j) are for epsilon[n] !!
-  Vect<Vect_DP> a_n_depsover_conv(Set.ntypes);
-  for (int n = 0; n < Set.ntypes; ++n) {
-    a_n_depsover_conv[n] = Vect_DP (0.0, Set.epsilon[0].Npts);
-    Vect_DP f(0.0, Set.epsilon[n].Npts);
+    Root_Density_Set DSet_dOmega =
+      Solve_2CBG_deps_dchempot (1, TBA_Set, c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
+    if (Save_data) DSet_dOmega.Save(DOmega_Cstr);
 
-    for (int i = 0; i < Set.epsilon[0].Npts; ++i) {
-      a_n_depsover_conv[n][i] = 0.0;
+    DP f = Calculate_Gibbs_Free_Energy (TBA_Set, c_int, mu, Omega, kBT);
+    DP dfdmu = Calculate_dGibbs_dchempot (DSet_dmu, TBA_Set, c_int, mu, Omega, kBT);
+    DP dfdOm = Calculate_dGibbs_dchempot (DSet_dOmega, TBA_Set, c_int, mu, Omega, kBT);
 
-      for (int j = 0; j < Set.epsilon[n].Npts; ++j) {
-	f[j] = depsover1plusepluseps[n][j] * a_n_dlambda[i][n][j];
-	a_n_depsover_conv[n][i] += f[j];
-      }
+    if (Save_data)
+      GFE_outfile << f << "\t" << TBA_Set.diff
+		  << "\t" << dfdmu << "\t" << dfdOm << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm)
+		  << endl;
+
+    else cout << setprecision(16) << f << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm);
+
+    if (Save_data) {
+      LOG_outfile.close();
+      GFE_outfile.close();
     }
-  } // for (int n...    We now have all the a_n * deps... at our disposal.
 
-  // For n > nmax sum in RHS of BE for epsilon, assuming epsilon_n = epsilon_n^\infty in those cases:
-  // Remember: nmax = Set.ntypes - 1
-  DP Smaxsum = option == 0 ? 0.0 : 2.0 * ((Set.ntypes + 1.0) * exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT)/(1.0 - exp(-2.0 * (Set.ntypes + 1.0) * Omega/kBT))
-					  - Set.ntypes * exp(-2.0 * Set.ntypes * Omega/kBT)/(1.0 - exp(-2.0 * Set.ntypes * Omega/kBT)));
+    TBA_Data_2CBG answer;
+    answer.c_int = c_int;
+    answer.mu = mu;
+    answer.Omega = Omega;
+    answer.kBT = kBT;
+    answer.f = f;
+    answer.n1 = 0.5 * (-dfdmu - dfdOm);
+    answer.n2 = 0.5 * (-dfdmu + dfdOm);
 
-  // Reconstruct the epsilon[0] function:
-  for (int i = 0; i < DSet.epsilon[0].Npts; ++i) {
-    DSet.epsilon[0].value[i] = -1.0;
-
-    // Add the convolutions:
-    for (int n = 0; n < Set.ntypes; ++n)
-      DSet.epsilon[0].value[i] += a_n_depsover_conv[n][i];
-    // Add the asymptotic parts of convolutions:  n == 0 part is zero because of 1 + exp[epsilon[0] ] in denominator
-    for (int n = 1; n < Set.ntypes; ++n)
-      DSet.epsilon[0].value[i] += depsover1pluseplusepsinfty[n]
-	* (1.0 - (atan((DSet.epsilon[n].lambdamax - DSet.epsilon[0].lambda[i])/(0.5 * n * c_int))
-		  + atan((DSet.epsilon[n].lambdamax + DSet.epsilon[0].lambda[i])/(0.5 * n * c_int)))/PI);
-
-    // Add the leftover summation for species n > nmax, assuming epsilon_n = epsilon_n^\infty in those cases:
-    DSet.epsilon[0].value[i] -= Smaxsum;
-    /*
-    if (DSet.epsilon[0].value[i] >= 0.0) {
-      cout << "Warning:  eps[0][" << i << "] >= 0.0, " << DSet.epsilon[0].prev_value[i] << "\t" << DSet.epsilon[0].value[i] << endl;
-      DSet.epsilon[0].value[i] = DSet.epsilon[0].prev_value[i];
-    }
-    else
-    */
-    // Include some damping:
-    DSet.epsilon[0].value[i] = 0.1 * DSet.epsilon[0].prev_value[i] + 0.9 * DSet.epsilon[0].value[i];
-    // Force boundary values to asymptotes:  force boundary 10 points on each side
-    if (i < 10)
-      DSet.epsilon[0].value[i] = (1.0 - 0.1 * i) * DSet.epsilon[0].value_infty + 0.1 * i * DSet.epsilon[0].value[i];
-    if (i > DSet.epsilon[0].Npts - 11)
-      DSet.epsilon[0].value[i] = (1.0 - 0.1 * (DSet.epsilon[0].Npts-1 - i)) * DSet.epsilon[0].value_infty + 0.1 * (DSet.epsilon[0].Npts-1 - i) * DSet.epsilon[0].value[i];
-
-  }
-  // epsilon[0] is now fully iterated.
-
-  // Now do the remaining epsilons:
-
-  for (int n = 1; n < Set.ntypes; ++n) {
-
-    Vect_DP f_depsover_conv_min (0.0, Set.epsilon[n].Npts);  // 'down' convolution
-    Vect_DP f_depsover_conv_plus (0.0, Set.epsilon[n].Npts); // 'up' convolution
-
-    // For n = ntypes, need:
-    //DP em2OoT = exp(-2.0 * Omega/kBT);
-    //int ntypes = DSet.ntypes;
-    //DP depsover1pluseepsntypesinfty = option == 0 ? 0.0 :     2.0 * (1.0 - pow(em2OoT, ntypes+1.0))
-    //* (ntypes * (1.0 - pow(em2OoT, ntypes+2.0)) - (ntypes + 2.0) * em2OoT * (1.0 - pow(em2OoT, DP(ntypes))))
-    ///((1.0 - em2OoT) * (1.0 - pow(em2OoT, DP(ntypes))) * (1.0 - pow(em2OoT, ntypes + 2.0))) MISSING 1/1+exp...part
-
-    Vect_DP fmin(0.0, Set.epsilon[n-1].Npts);
-    Vect_DP fplus(0.0, Set.epsilon[ABACUS::min(n+1, Set.ntypes - 1)].Npts);
-
-    for (int i = 0; i < Set.epsilon[n].Npts; ++i) {
-      f_depsover_conv_min[i] = 0.0;
-      f_depsover_conv_plus[i] = 0.0;
-
-      // 'down' convolutions
-
-      if (n == 1) {
-
-	for (int j = 0; j < Set.epsilon[0].Npts; ++j) {
-	  fmin[j] = depsover1plusepluseps[0][j]
-	    * fmin_dlambda[n][i][j];
-	  f_depsover_conv_min[i] -= fmin[j]; // Careful ! - sign here
-	}
-	//if (i == 0 || i == 1)
-	//cout << "i = " << i << ": " << endl << endl << fmin << endl << endl
-	//		   << depsover1plusepluseps[0] << endl << endl << fmin_dlambda[n][i]
-	//if (i < 10) cout << "Sum of fmin_dlambda[n][" << i << "]: " << fmin_dlambda[n][i].sum() << endl;
-      } // if (n == 1)
-
-      else { // if (n != 1)
-
-	for (int j = 0; j < Set.epsilon[n - 1].Npts; ++j) {
-	    fmin[j] = (depsover1plusemineps[n-1][j] - depsover1pluseminepsinfty[n-1])
-	      * fmin_dlambda[n][i][j];
-	    f_depsover_conv_min[i] += fmin[j];
-	}
-      } // if (n != 1)
-
-
-      // 'up' convolutions
-      if (n < Set.ntypes - 1) {
-
-	for (int j = 0; j < Set.epsilon[n+1].Npts; ++j) {
-	  fplus[j] = (depsover1plusemineps[n+1][j] - depsover1pluseminepsinfty[n+1])
-	    * fplus_dlambda[n][i][j];
-	  f_depsover_conv_plus[i] += fplus[j];
-	}
-
-      } // if (n < Set.ntypes - 1...
-
-      // otherwise, we put the function to 1/2 times depsover... of epsilon[n+1] at infinity, minus the same:
-      else f_depsover_conv_plus[i] = 0.0;
-
-      //Set.epsilon[n].value[i] = Set.epsilon[n].value_infty + f_Tln_conv_min[i] + f_Tln_conv_plus[i];
-      // Do some damping:
-      DSet.epsilon[n].value[i] = 0.1 * DSet.epsilon[n].prev_value[i]
-	+ 0.9 * (DSet.epsilon[n].value_infty + f_depsover_conv_min[i] + f_depsover_conv_plus[i]);
-      // Force boundary values to asymptotes:  force boundary 10 points on each side
-      if (i < 10)
-	DSet.epsilon[n].value[i] = (1.0 - 0.1 * i) * DSet.epsilon[n].value_infty + 0.1 * i * DSet.epsilon[n].value[i];
-      if (i > DSet.epsilon[n].Npts - 11)
-	DSet.epsilon[n].value[i] = (1.0 - 0.1 * (DSet.epsilon[n].Npts-1 - i)) * DSet.epsilon[n].value_infty + 0.1 * (DSet.epsilon[n].Npts-1 - i) * DSet.epsilon[n].value[i];
-
-      //      if ((n == Set.ntypes - 1 || n == Set.ntypes - 2) && i == 0) {
-      /*
-      if (i == 0 && n < 2) {
-	cout << "epsilon[" << n << "][0]: " << DSet.epsilon[n].value[i] << "\t" << DSet.epsilon[n].prev_value[i] << "\t"
-	     << DSet.epsilon[n].value_infty << "\t" << f_depsover_conv_min[i] << "\t" << f_depsover_conv_plus[i]
-	  //<< "\tepsilon[" << n << "][1]: " << DSet.epsilon[n].value[1] << "\t" << DSet.epsilon[n].prev_value[1] << "\t"
-	  // << DSet.epsilon[n].value_infty << "\t" << f_depsover_conv_min[1] << "\t" << f_depsover_conv_plus[1]
-	     << "\tepsilon[" << n << "][10]: " << DSet.epsilon[n].value[10] << "\t" << DSet.epsilon[n].prev_value[10] << "\t"
-	     << DSet.epsilon[n].value_infty << "\t" << f_depsover_conv_min[10] << "\t" << f_depsover_conv_plus[10]
-	     << endl;
-      }
-      */
-      //if (i == 0) cout << "Check:  level " << n << " value_infty = " << DSet.epsilon[n].value_infty << endl;
-
-    } // for (int i = 0...
-
-  } // for (int n = 1...
-
-  // All functions have now been iterated.
-
-  // Now calculate diff:
-
-  DSet.diff = 0.0;
-
-  for (int n = 0; n < DSet.ntypes; ++n) {
-    DSet.epsilon[n].diff = 0.0;
-    //sum_N += Set.epsilon[n].Npts;
-    for (int i = 0; i < DSet.epsilon[n].Npts; ++i) {
-      //Set.epsilon[n].diff += pow((Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i])
-      //			   /ABACUS::max(1.0, fabs(Set.epsilon[n].value[i] + Set.epsilon[n].prev_value[i])), 2.0);
-      //Set.epsilon[n].diff += fabs((Set.epsilon[n].value[i] - Set.epsilon[n].prev_value[i])
-      //			   /ABACUS::max(1.0, fabs(Set.epsilon[n].value[i] + Set.epsilon[n].prev_value[i])));
-      //DSet.epsilon[n].diff += fabs(((DSet.epsilon[n].value[i] - DSet.epsilon[n].prev_value[i])/(DSet.epsilon[n].value[i] + DSet.epsilon[n].prev_value[i]))
-      //			   * depsover1plusepluseps[n][i]);
-      DSet.epsilon[n].diff += DSet.epsilon[n].dlambda[i] *
-	fabs((DSet.epsilon[n].value[i] - DSet.epsilon[n].prev_value[i]) * depsover1plusepluseps[n][i]);
-    }
-    //DSet.epsilon[n].diff /= DSet.epsilon[n].Npts;
-    DSet.diff += DSet.epsilon[n].diff;
-    //cout << n << " " << Set.epsilon[n].diff << "\t";
-  }
-  //DSet.diff /= Set.ntypes;
-  //cout << endl;
-
-  return;
-}
-
-/* IMPROVED VERSION BELOW
-Root_Density_Set Solve_2CBG_deps_dchempot (int option, const Root_Density_Set& TBA_Set, DP c_int, DP mu, DP Omega, DP kBT,
-					   int Max_Secs, ofstream& LOG_outfile, bool Save_data)
-{
-  // This solves the 2CBG deps/dmu (option == 0) or deps/dOmega (option == 1).
-
-  clock_t StartTime = clock();
-
-  int Max_CPU_ticks = 98 * (Max_Secs - 10) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
-
-  // Set basic precision needed:
-  DP running_prec = TBA_Set.diff;
-
-  Root_Density_Set DSet = TBA_Set;  // use same number of functions and points
-
-  // Set the asymptotics of the TBA_fns:
-
-  Set_2CBG_deps_dchempot_Asymptotics (option, TBA_Set, DSet, mu, Omega, kBT);
-
-  // Initiate the functions:
-
-  Initiate_2CBG_deps_dchempot_Functions (DSet);
-
-  Vect<Vect<Vect_DP> > a_n_dlambda = Build_a_n_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fmin_dlambda = Build_fmin_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fplus_dlambda = Build_fplus_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-
-  clock_t StopTime = clock();
-
-  int CPU_ticks = StopTime - StartTime;
-
-  int niter = 0;
-  int niter_min = 20;
-  int niter_max = 2000;
-
-  do {
-    StartTime = clock();
-    Iterate_2CBG_deps_dchempot (option, DSet, TBA_Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-    niter++;
-    StopTime = clock();
-    CPU_ticks += StopTime - StartTime;
-    //} while (niter < niter_min || niter < niter_max && DSet.diff > running_prec && CPU_ticks < Max_CPU_ticks);
-  } while (CPU_ticks < Max_CPU_ticks);  // Just do as many iterations as allowed by time.
-
-  if (Save_data) {
-    LOG_outfile << "deps_dchempot, option == " << option << endl;
-    LOG_outfile << "c_int " << c_int << "\tmu " << mu << "\tOmega " << Omega << "\tkBT " << kBT
-		<< "\trunning_prec " << running_prec << " niter_tot " << niter
-		<< "\tntypes " << DSet.ntypes << "\tdiff " << DSet.diff << endl;
-  }
-  return(DSet);
-}
-*/
-Root_Density_Set Solve_2CBG_deps_dchempot (int option, Root_Density_Set& TBA_Set, DP c_int, DP mu, DP Omega, DP kBT,
-					   int Max_Secs, ofstream& LOG_outfile, bool Save_data)
-{
-  // This solves the 2CBG deps/dmu (option == 0) or deps/dOmega (option == 1).
-
-  clock_t StartTime, StopTime;
-
-  int Max_CPU_ticks = 98 * (Max_Secs - 10) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
-
-  // Set basic precision needed:
-  DP running_prec = TBA_Set.diff;
-
-  // We start by converging simplified sets, with fewer points:
-
-  //TBA_Set.Save("TBA_0.dat");
-
-  Root_Density_Set TBA_Set_comp = TBA_Set.Return_Compressed_and_Matched_Set(1.0);
-
-  //TBA_Set_comp.Save("TBA_comp.dat");
-  //TBA_Set.Save("TBA_1.dat");
-
-  Root_Density_Set DSet = TBA_Set;  // this will be the final function return
-  Root_Density_Set DSet_comp = TBA_Set_comp;
-
-  // Set the asymptotics of the TBA_fns:
-
-  //DSet_comp.Save("comp_0.dat");
-
-  Set_2CBG_deps_dchempot_Asymptotics (option, TBA_Set_comp, DSet_comp, mu, Omega, kBT);
-  Set_2CBG_deps_dchempot_Asymptotics (option, TBA_Set, DSet, mu, Omega, kBT);
-
-  // Now, start by 'converging' the comp set:
-
-  // Initiate the functions:
-
-  //DSet_comp.Save("comp_1.dat");
-
-  Initiate_2CBG_deps_dchempot_Functions (DSet_comp);
-
-  //DSet_comp.Save("comp_2.dat");
-
-  Vect<Vect<Vect_DP> > a_n_dlambda_comp = Build_a_n_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fmin_dlambda_comp = Build_fmin_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fplus_dlambda_comp = Build_fplus_dlambda (TBA_Set_comp, c_int, mu, Omega, kBT);
-
-  //cout << "epsilon[0].Npts = " << TBA_Set.epsilon[0].Npts << endl;
-
-  int CPU_ticks = 0;
-
-  //DSet_comp.Save("comp_init.dat");
-
-  int niter_comp = 0;
-
-  do {
-
-    //DSet_comp.Save("comp_a.dat");
-
-    StartTime = clock();
-    Iterate_2CBG_deps_dchempot (option, DSet_comp, TBA_Set_comp,
-				a_n_dlambda_comp, fmin_dlambda_comp, fplus_dlambda_comp, c_int, mu, Omega, kBT);
-    niter_comp++;
-    StopTime = clock();
-    CPU_ticks += StopTime - StartTime;
-
-    //DSet_comp.Save("comp_b.dat");
-    //char a;
-    //cout << "Waiting for next DSet iteration..." << endl;
-    //cin >> a;
-
-    //} while (niter < niter_min || niter < niter_max && DSet.diff > running_prec && CPU_ticks < Max_CPU_ticks);
-    //cout << niter << "\t" << DSet_eighth.diff << "\t";
-    //} while (CPU_ticks < Max_CPU_ticks/2 && (DSet_comp.diff > running_prec || niter < 100));
-  } while (CPU_ticks < Max_CPU_ticks/2 && (niter_comp < 100 || DSet_comp.diff > running_prec));  // use at most half the time, keep rest for later.
-  //cout << endl;
-  //cout << "c_int = " << c_int << "\tmu = " << mu << "\teighth:  niter = " << niter << "\tdiff = " << DSet_eighth.diff << endl << endl;
-
-  //DSet_comp.Save("comp_final.dat");
-
-  DSet.Match_Densities(DSet_comp);
-
-  //DSet.Save("full_init.dat");
-
-  Vect<Vect<Vect_DP> > a_n_dlambda = Build_a_n_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fmin_dlambda = Build_fmin_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-  Vect<Vect<Vect_DP> > fplus_dlambda = Build_fplus_dlambda (TBA_Set, c_int, mu, Omega, kBT);
-
-  //CPU_ticks = 0;
-
-  int niter = 0;
-
-  do {
-    StartTime = clock();
-    Iterate_2CBG_deps_dchempot (option, DSet, TBA_Set, a_n_dlambda, fmin_dlambda, fplus_dlambda, c_int, mu, Omega, kBT);
-    niter++;
-    StopTime = clock();
-    CPU_ticks += StopTime - StartTime;
-    //} while (niter < niter_min || niter < niter_max && DSet.diff > running_prec && CPU_ticks < Max_CPU_ticks);
-    //cout << niter << "\t" << DSet.diff << "\t";
-    //} while (CPU_ticks < Max_CPU_ticks/2 && DSet.diff > running_prec);
-  } while (CPU_ticks < Max_CPU_ticks && DSet_comp.diff > 1.0e-4 * running_prec);
-  //cout << endl;
-  //cout << "c_int = " << c_int << "\tmu = " << mu << "\tfull:  niter = " << niter << "\tdiff = " << DSet.diff << endl << endl;
-
-  //DSet.Save("full_final.dat");
-
-  // We're done !!
-
-  if (Save_data) {
-    LOG_outfile << "deps_dchempot, option == " << option << endl;
-    LOG_outfile << "c_int " << c_int << "\tmu " << mu << "\tOmega " << Omega << "\tkBT " << kBT
-		<< "\trunning_prec " << running_prec << " niter_comp " << niter_comp << " niter_full " << niter
-		<< "\tntypes " << DSet.ntypes << "\tdiff " << DSet.diff << endl;
+    return(answer);
   }
 
-  //cout << "Done with first Dset... " << endl;
-  //char a;
-  //cin >> a;
+  void GFE_muscan_2CBG (DP c_int, DP mu_min, DP mu_max, DP Omega, DP kBT, int Npts_mu, int Max_Secs, bool Save_data)
+  {
+    DP dmu = (mu_max - mu_min)/(Npts_mu - 1);
 
-  return(DSet);
-}
+    stringstream LOG_stringstream;
+    string LOG_string;
+    stringstream GFE_stringstream;
+    string GFE_string;
 
+    LOG_stringstream << "GFE_2CBG_c_" << c_int << "_mu_min_" << mu_min << "_mu_max_" << mu_max
+		     << "_Npts_mu_" << Npts_mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".log";
+    GFE_stringstream << "GFE_2CBG_c_" << c_int << "_mu_min_" << mu_min << "_mu_max_" << mu_max
+		     << "_Npts_mu_" << Npts_mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
 
-TBA_Data_2CBG Solve_2CBG_TBAE_via_refinements (DP c_int, DP mu, DP Omega, DP kBT, int Max_Secs, bool Save_data)
-{
-  // This solves the 2CBG TBAE as best as possible given the time constraint.
+    LOG_string = LOG_stringstream.str();
+    const char* LOG_Cstr = LOG_string.c_str();
 
-  //clock_t StartTime = clock();
+    GFE_string = GFE_stringstream.str();
+    const char* GFE_Cstr = GFE_string.c_str();
 
-  //int Max_CPU_ticks = 98 * (Max_Secs - 10) * CLOCKS_PER_SEC/100;  // give 10 seconds to wrap up, assume we time to 2% accuracy.
+    ofstream LOG_outfile;
+    ofstream GFE_outfile;
 
-  stringstream TBA_stringstream;
-  string TBA_string;
-  stringstream Dmu_stringstream;
-  string Dmu_string;
-  stringstream DOmega_stringstream;
-  string DOmega_string;
-  stringstream LOG_stringstream;
-  string LOG_string;
-  stringstream GFE_stringstream;
-  string GFE_string;
-
-  TBA_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
-  Dmu_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dmu";
-  DOmega_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dOm";
-  LOG_stringstream << "EPS_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".log";
-  GFE_stringstream << "GFE_2CBG_c_" << c_int << "_mu_" << mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
-
-  TBA_string = TBA_stringstream.str();
-  const char* TBA_Cstr = TBA_string.c_str();
-
-  Dmu_string = Dmu_stringstream.str();
-  const char* Dmu_Cstr = Dmu_string.c_str();
-
-  DOmega_string = DOmega_stringstream.str();
-  const char* DOmega_Cstr = DOmega_string.c_str();
-
-  LOG_string = LOG_stringstream.str();
-  const char* LOG_Cstr = LOG_string.c_str();
-
-  GFE_string = GFE_stringstream.str();
-  const char* GFE_Cstr = GFE_string.c_str();
-
-  ofstream LOG_outfile;
-  ofstream GFE_outfile;
-
-  if (Save_data) {
     LOG_outfile.open(LOG_Cstr);
     LOG_outfile.precision(6);
 
     GFE_outfile.open(GFE_Cstr);
     GFE_outfile.precision(16);
-  }
 
-  Root_Density_Set TBA_Set = Solve_2CBG_TBAE_via_refinements (c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
 
-  if (Save_data) {
-  // Output the functions:
-  TBA_Set.Save(TBA_Cstr);
-  }
+    Root_Density_Set Scan_Set (10, 10, 10.0);;
+    Root_Density_Set Scan_dSet_dmu (10, 10, 10.0);;
+    Root_Density_Set Scan_dSet_dOmega (10, 10, 10.0);;
+    DP mu;
+    int Max_Secs_per_mu_pt = Max_Secs/Npts_mu;
 
-  Root_Density_Set DSet_dmu = Solve_2CBG_deps_dchempot (0, TBA_Set, c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
-  if (Save_data) DSet_dmu.Save(Dmu_Cstr);
+    for (int imu = 0; imu < Npts_mu; ++imu) {
+      mu = mu_min + imu * dmu;
+      Scan_Set = Solve_2CBG_TBAE_via_refinements (c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3, LOG_outfile, Save_data);
 
-  Root_Density_Set DSet_dOmega = Solve_2CBG_deps_dchempot (1, TBA_Set, c_int, mu, Omega, kBT, Max_Secs/3, LOG_outfile, Save_data);
-  if (Save_data) DSet_dOmega.Save(DOmega_Cstr);
+      Scan_dSet_dmu = Solve_2CBG_deps_dchempot (0, Scan_Set, c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3,
+						LOG_outfile, Save_data);
 
-  DP f = Calculate_Gibbs_Free_Energy (TBA_Set, c_int, mu, Omega, kBT);
-  DP dfdmu = Calculate_dGibbs_dchempot (DSet_dmu, TBA_Set, c_int, mu, Omega, kBT);
-  DP dfdOm = Calculate_dGibbs_dchempot (DSet_dOmega, TBA_Set, c_int, mu, Omega, kBT);
+      Scan_dSet_dOmega = Solve_2CBG_deps_dchempot (1, Scan_Set, c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3,
+						   LOG_outfile, Save_data);
 
-  if (Save_data)
-    GFE_outfile << f << "\t" << TBA_Set.diff
-		<< "\t" << dfdmu << "\t" << dfdOm << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm)
-		<< endl;
+      DP dfdmu = Calculate_dGibbs_dchempot (Scan_dSet_dmu, Scan_Set, c_int, mu, Omega, kBT);
+      DP dfdOm = Calculate_dGibbs_dchempot (Scan_dSet_dOmega, Scan_Set, c_int, mu, Omega, kBT);
 
-  else cout << setprecision(16) << f << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm);
+      GFE_outfile << mu << "\t" << Calculate_Gibbs_Free_Energy (Scan_Set, c_int, mu, Omega, kBT) << "\t" << Scan_Set.diff
+		  << "\t" << dfdmu << "\t" << dfdOm << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm)
+		  << endl;
+    } // for imu
 
-  if (Save_data) {
     LOG_outfile.close();
     GFE_outfile.close();
+
+    return;
   }
 
-  TBA_Data_2CBG answer;
-  answer.c_int = c_int;
-  answer.mu = mu;
-  answer.Omega = Omega;
-  answer.kBT = kBT;
-  answer.f = f;
-  answer.n1 = 0.5 * (-dfdmu - dfdOm);
-  answer.n2 = 0.5 * (-dfdmu + dfdOm);
-
-  return(answer);
-}
-
-void GFE_muscan_2CBG (DP c_int, DP mu_min, DP mu_max, DP Omega, DP kBT, int Npts_mu, int Max_Secs, bool Save_data)
-{
-  DP dmu = (mu_max - mu_min)/(Npts_mu - 1);
-
-  stringstream LOG_stringstream;
-  string LOG_string;
-  stringstream GFE_stringstream;
-  string GFE_string;
-
-  LOG_stringstream << "GFE_2CBG_c_" << c_int << "_mu_min_" << mu_min << "_mu_max_" << mu_max
-		   << "_Npts_mu_" << Npts_mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".log";
-  GFE_stringstream << "GFE_2CBG_c_" << c_int << "_mu_min_" << mu_min << "_mu_max_" << mu_max
-		   << "_Npts_mu_" << Npts_mu << "_Omega_" << Omega << "_kBT_" << kBT << "_Secs_" << Max_Secs << ".dat";
-
-  LOG_string = LOG_stringstream.str();
-  const char* LOG_Cstr = LOG_string.c_str();
-
-  GFE_string = GFE_stringstream.str();
-  const char* GFE_Cstr = GFE_string.c_str();
-
-  ofstream LOG_outfile;
-  ofstream GFE_outfile;
-
-  LOG_outfile.open(LOG_Cstr);
-  LOG_outfile.precision(6);
-
-  GFE_outfile.open(GFE_Cstr);
-  GFE_outfile.precision(16);
-
-
-  Root_Density_Set Scan_Set (10, 10, 10.0);;
-  Root_Density_Set Scan_dSet_dmu (10, 10, 10.0);;
-  Root_Density_Set Scan_dSet_dOmega (10, 10, 10.0);;
-  DP mu;
-  int Max_Secs_per_mu_pt = Max_Secs/Npts_mu;
-
-  for (int imu = 0; imu < Npts_mu; ++imu) {
-    mu = mu_min + imu * dmu;
-    Scan_Set = Solve_2CBG_TBAE_via_refinements (c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3, LOG_outfile, Save_data);
-    //Solve_2CBG_TBAE_via_refinements (Scan_Set, c_int, mu, Omega, kBT, Max_Secs_per_mu_pt, LOG_outfile);
-
-    Scan_dSet_dmu = Solve_2CBG_deps_dchempot (0, Scan_Set, c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3, LOG_outfile, Save_data);
-
-    Scan_dSet_dOmega = Solve_2CBG_deps_dchempot (1, Scan_Set, c_int, mu, Omega, kBT, Max_Secs_per_mu_pt/3, LOG_outfile, Save_data);
-
-    DP dfdmu = Calculate_dGibbs_dchempot (Scan_dSet_dmu, Scan_Set, c_int, mu, Omega, kBT);
-    DP dfdOm = Calculate_dGibbs_dchempot (Scan_dSet_dOmega, Scan_Set, c_int, mu, Omega, kBT);
-
-    GFE_outfile << mu << "\t" << Calculate_Gibbs_Free_Energy (Scan_Set, c_int, mu, Omega, kBT) << "\t" << Scan_Set.diff
-		<< "\t" << dfdmu << "\t" << dfdOm << "\t" << 0.5 * (-dfdmu - dfdOm) << "\t" << 0.5 * (-dfdmu + dfdOm)
-		<< endl;
-  } // for imu
-
-  LOG_outfile.close();
-  GFE_outfile.close();
-
-  return;
-}
-
 } // namespace ABACUS
diff --git a/src/TBA/TBA_LiebLin.cc b/src/TBA/TBA_LiebLin.cc
index 5f322db..0a3c4b2 100644
--- a/src/TBA/TBA_LiebLin.cc
+++ b/src/TBA/TBA_LiebLin.cc
@@ -90,8 +90,6 @@ namespace ABACUS {
       rho_GS = LiebLin_rho_GS (c_int, k_F, lambdamax, Npts, req_prec);
       n_found = Density_GS (rho_GS);
 
-      //cout << "k_F " << k_F << "\tn_found " << n_found << endl;
-
     } while (fabs(dk_F) > req_prec && dk_F > 1.0/Npts
 	     && fabs(n - n_found) > req_prec && fabs(n - n_found) > 1.0/Npts);
 
@@ -189,23 +187,6 @@ namespace ABACUS {
 
 
   // Finite T functions:
-  /*
-  // from ABACUS_TBA.h
-
-  struct LiebLin_TBA_Solution {
-
-    DP c_int;
-    DP mu;
-    DP nbar;
-    DP kBT;
-    Root_Density epsilon;
-    Root_Density depsilon_dmu;
-    Root_Density rho;
-    Root_Density rhoh;
-
-    LiebLin_TBA_Solution (DP c_int_ref, DP mu_ref, DP kBT_ref, int Npts_ref, DP req_diff, int Max_Secs);
-  };
-  */
 
   LiebLin_TBA_Solution::LiebLin_TBA_Solution (DP c_int_ref, DP mu_ref, DP kBT_ref, DP req_diff, int Max_Secs)
     : c_int(c_int_ref), mu(mu_ref), kBT(kBT_ref)
@@ -218,20 +199,10 @@ namespace ABACUS {
     ebar = LiebLin_ebar_TBA (rho);
     sbar = LiebLin_sbar_TBA (rho, rhoh);
   }
-  /*
-  LiebLin_TBA_Solution::LiebLin_TBA_Solution (DP c_int_ref, DP mu_ref, DP kBT_ref, int Npts_ref, DP req_diff, int Max_Secs, const LiebLin_TBA_Solution& prev_sol)
-    : c_int(c_int_ref), mu(mu_ref), kBT(kBT_ref)
-  {
-    epsilon = LiebLin_epsilon_TBA (c_int, mu, kBT, Npts_ref, req_diff, Max_Secs, prev_sol.epsilon);
-    depsilon_dmu = LiebLin_depsilon_dmu_TBA (c_int, mu, kBT, Npts_ref, req_diff, Max_Secs, epsilon, prev_sol.depsilon_dmu);
-    rho = LiebLin_rho_TBA (kBT, epsilon, depsilon_dmu);
-    rhoh = LiebLin_rhoh_TBA (kBT, epsilon, depsilon_dmu);
-    nbar = LiebLin_nbar_TBA (rho);
-  }
-  */
 
 
-  LiebLin_TBA_Solution LiebLin_TBA_Solution_fixed_nbar_ebar (DP c_int, DP nbar_required, DP ebar_required, DP req_diff, int Max_Secs)
+  LiebLin_TBA_Solution LiebLin_TBA_Solution_fixed_nbar_ebar (DP c_int, DP nbar_required, DP ebar_required,
+							     DP req_diff, int Max_Secs)
   {
     // This function finds the TBA solution for a required nbar and ebar (mean energy).
     // We here try to triangulate the temperature; the chemical potential is triangulated
@@ -263,26 +234,7 @@ namespace ABACUS {
       lnkBT += dlnkBT;
       tbasol = LiebLin_TBA_Solution_fixed_nbar(c_int, nbar_required, exp(lnkBT), req_diff, Max_Secs);
       niter++;
-      //cout << setprecision(16) << "kBT: niter " << niter << "\tebar = " << tbasol.ebar << "\trequired = " << ebar_required << "\tlnkBT = " << lnkBT << "\tdlnkBT = " << dlnkBT << endl;
     }
-    // FIRST VERSION
-
-    /* SECOND VERSION
-    DP lnkBT = 1.0;
-    DP dlnkBT = 1.0;
-
-    DP mu = 2.0;
-    DP dmu = 1.0;
-
-    LiebLin_TBA_Solution tbasol = LiebLin_TBA_Solution(c_int, mu, kBT, req_diff, Max_Secs);
-    DP mu_prev = mu;
-    DP nbar_prev = tbasol.nbar;
-    DP lnkBT_prev = lnkBT;
-    DP ebar_prev = LiebLin_ebar_TBA (tbasol.rho);
-
-    DP dmu_prev, dnbardmu;
-    DP dlnkBT_prev, debardlnkBT;
-    */ // SECOND VERSION
 
     return(tbasol);
   }
@@ -292,48 +244,6 @@ namespace ABACUS {
   LiebLin_TBA_Solution LiebLin_TBA_Solution_fixed_nbar (DP c_int, DP nbar_required, DP kBT, DP req_diff, int Max_Secs)
   {
     // Find the required mu. Provide some initial guess.
-    /*
-    DP gamma_required = c_int/nbar_required;
-
-
-    // We define a matrix of mu's (calculated once and for all and filled in here)
-    // from which we can define an accurate first guess for mu, given gamma_required and kBT.
-
-    // The gamma's are by definition (1/16) * 2^{igamma},
-    // and the kBT are (1/16) * 2^{iT}.
-    int ndata = 16;
-    Vect<DP> gamma(ndata);
-    Vect<DP> Tred(ndata); // reduced temperature, kBT/
-    for (int i = 0; i < ndata; ++i) {
-      gamma[i] = 0.0625 * pow(2.0, i);
-      T[i] = 0.0625 * pow(2.0, i);
-    }
-    SQMat<DP> mudata(ndata);
-    // This data was computed separately, for unit filling, using 512 points, to accuracy 10^-4:
-
-    // Figure out which index igamma we should use:
-    // (1/16) * 2^{igamma_low} <~ gamma_required, so
-    int igamma_below = int(log(gamma_required * 16.0)/log(2.0));
-    // Similarly,
-    int iT_below = int(log(kBT * 16.0)/log(2.0));
-
-    igamma_below = ABACUS::max(igamma_below, 0);
-    igamma_below = ABACUS::min(igamma_below, ndata - 1);
-    iT_below = ABACUS::max(iT_below, 0);
-    iT_below = ABACUS::min(iT_below, ndata - 1);
-    // We use indices igamma_below, igamma_below + 1, iT_below, iT_below + 1 to guess mu:
-    // do a four-point extrapolation,
-    DP mu = ((gamma[igamma_below + 1] - gamma_required) * (T[iT_below + 1] - kBT) * mudata[igamma_below][iT_below]
-	     + (gamma_required - gamma[igamma_below]) * (T[iT_below + 1] - kBT) * mudata[igamma_below + 1][iT_below]
-	     + (gamma[igamma_below + 1] - gamma_required) * (kBT - T[iT_below]) * mudata[igamma_below][iT_below + 1]
-	     + (gamma_required - gamma[igamma_below]) * (kBT - T[iT_below]) * mudata[igamma_below + 1][iT_below + 1])
-      /((gamma[igamma_below + 1] - gamma[igamma_below]) * (T[iT_below + 1] - T[iT_below]));
-
-    // Translate to the required filling:
-
-
-    DP dnbardmu = ;
-    */
 
     DP mu = 2.0;
     DP dmu = 1.0;
@@ -360,7 +270,6 @@ namespace ABACUS {
       mu += dmu;
       tbasol = LiebLin_TBA_Solution(c_int, mu, kBT, req_diff, Max_Secs);
       niter++;
-      //cout << setprecision(16) << "\tmu: niter " << niter << "\tnbar = " << tbasol.nbar << "\trequired = " << nbar_required<< "\tmu = " << mu << "\tdmu = " << dmu << endl;
     }
 
     return(tbasol);
@@ -398,7 +307,7 @@ namespace ABACUS {
 
       tni[i] = measure_factor * epsilon.value[i];
       tni_ex[i] = measure_factor * (epsilon.value[i-1] *  (epsilon.lambda[i+1] - epsilon.lambda[i])
-				       + epsilon.value[i+1] * (epsilon.lambda[i] - epsilon.lambda[i-1]))
+				    + epsilon.value[i+1] * (epsilon.lambda[i] - epsilon.lambda[i-1]))
 	/(epsilon.lambda[i+1] - epsilon.lambda[i-1]);
 
       max_delta_tni_dlambda = ABACUS::max(max_delta_tni_dlambda, fabs(tni[i] - tni_ex[i]) * epsilon.dlambda[i]);
@@ -454,7 +363,9 @@ namespace ABACUS {
       int nr_pts_to_add_left = epsilon.Npts/10;
       int nr_pts_to_add_right = epsilon.Npts/10;
       Root_Density epsilon_before_update = epsilon;
-      epsilon = Root_Density(epsilon_before_update.Npts + nr_pts_to_add_left + nr_pts_to_add_right, epsilon_before_update.lambdamax + nr_pts_to_add_left * epsilon_before_update.dlambda[0] + nr_pts_to_add_right * epsilon_before_update.dlambda[epsilon_before_update.Npts - 1]);
+      epsilon = Root_Density(epsilon_before_update.Npts + nr_pts_to_add_left + nr_pts_to_add_right,
+			     epsilon_before_update.lambdamax + nr_pts_to_add_left * epsilon_before_update.dlambda[0]
+			     + nr_pts_to_add_right * epsilon_before_update.dlambda[epsilon_before_update.Npts - 1]);
 
       // Initialize points added on left
       for (int i = 0; i < nr_pts_to_add_left; ++i) {
@@ -470,9 +381,13 @@ namespace ABACUS {
       }
       // Initialize points added on right
       for (int i = 0; i < nr_pts_to_add_right; ++i) {
-	epsilon.lambda[nr_pts_to_add_left + epsilon_before_update.Npts + i] = epsilon_before_update.lambda[epsilon_before_update.Npts - 1] + (i+1) * epsilon_before_update.dlambda[epsilon_before_update.Npts - 1];
-	epsilon.dlambda[nr_pts_to_add_left + epsilon_before_update.Npts + i] = epsilon_before_update.dlambda[epsilon_before_update.Npts - 1];
-	epsilon.value[nr_pts_to_add_left + epsilon_before_update.Npts + i] = epsilon_before_update.value[epsilon_before_update.Npts - 1];
+	epsilon.lambda[nr_pts_to_add_left + epsilon_before_update.Npts + i] =
+	  epsilon_before_update.lambda[epsilon_before_update.Npts - 1]
+	  + (i+1) * epsilon_before_update.dlambda[epsilon_before_update.Npts - 1];
+	epsilon.dlambda[nr_pts_to_add_left + epsilon_before_update.Npts + i] =
+	  epsilon_before_update.dlambda[epsilon_before_update.Npts - 1];
+	epsilon.value[nr_pts_to_add_left + epsilon_before_update.Npts + i] =
+	  epsilon_before_update.value[epsilon_before_update.Npts - 1];
       }
     }
 
@@ -484,14 +399,16 @@ namespace ABACUS {
   {
 
     clock_t StartTime = clock();
-    int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
+    int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;
+    // give 30 seconds to wrap up, assume we time to 2% accuracy.
 
     // Set basic precision needed:
     DP running_prec = 1.0;
 
     DP refine_fraction = 0.5; // value fraction of points to be refined
 
-    DP lambdamax_init = 10.0 * sqrt(ABACUS::max(1.0, kBT + mu));  // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
+    DP lambdamax_init = 10.0 * sqrt(ABACUS::max(1.0, kBT + mu));
+    // such that exp(-(lambdamax^2 - mu - Omega)/T) <~ machine_eps
 
     int Npts = 50;
 
@@ -522,7 +439,6 @@ namespace ABACUS {
       // Refine... returns sum_delta_tni_dlambda, so running prec is estimated as...
       previous_running_prec = running_prec;
       running_prec = Refine_LiebLin_epsilon_TBA (epsilon, c_int, mu, kBT, refine_fraction);
-      //cout << "ncycles = " << ncycles << "\trunning_prec = " << running_prec << endl;
       running_prec = ABACUS::min(running_prec, previous_running_prec);
 
       // Now iterate to convergence for given discretization
@@ -537,11 +453,13 @@ namespace ABACUS {
 	Vect<DP> Tln1plusemineps(epsilon.Npts);
 	for (int i = 0; i < epsilon.Npts; ++i) {
 	  Tln1plusemineps[i] = epsilon.value[i] > 0.0 ?
-	    kBT * (epsilon.value[i] < 24.0 * kBT ? log(1.0 + exp(-epsilon.value[i]/kBT)) : exp(-epsilon.value[i]/kBT) * (1.0 - 0.5 * exp(-epsilon.value[i]/kBT)))
-	    //kBT * log(1.0 + exp(-epsilon.value[i]/kBT))
+	    kBT * (epsilon.value[i] < 24.0 * kBT
+		   ? log(1.0 + exp(-epsilon.value[i]/kBT))
+		   : exp(-epsilon.value[i]/kBT) * (1.0 - 0.5 * exp(-epsilon.value[i]/kBT)))
 	    :
-	    -epsilon.value[i] + kBT * (-epsilon.value[i] < 24.0 * kBT ? log (1.0 + exp(epsilon.value[i]/kBT)) : exp(epsilon.value[i]/kBT) * (1.0 - 0.5 * exp(epsilon.value[i]/kBT)));
-	  //-epsilon.value[i] + kBT * log (1.0 + exp(epsilon.value[i]/kBT));
+	    -epsilon.value[i] + kBT * (-epsilon.value[i] < 24.0 * kBT
+				       ? log (1.0 + exp(epsilon.value[i]/kBT))
+				       : exp(epsilon.value[i]/kBT) * (1.0 - 0.5 * exp(epsilon.value[i]/kBT)));
 	  // Keep previous rapidities:
 	  epsilon.prev_value[i] = epsilon.value[i];
 	}
@@ -551,7 +469,11 @@ namespace ABACUS {
 	for (int i = 0; i < epsilon.Npts; ++i) {
 	  a_2_Tln_conv[i] = 0.0;
 
-	  for (int j = 0; j < epsilon.Npts; ++j) a_2_Tln_conv[i] += oneoverpi * (atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] + 0.5 * epsilon.dlambda[j])) - atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] - 0.5 * epsilon.dlambda[j]))) * Tln1plusemineps[j];
+	  for (int j = 0; j < epsilon.Npts; ++j)
+	    a_2_Tln_conv[i] +=
+	      oneoverpi * (atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] + 0.5 * epsilon.dlambda[j]))
+			   - atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j]
+					      - 0.5 * epsilon.dlambda[j]))) * Tln1plusemineps[j];
 
 	} // a_2_Tln_conv is now calculated
 
@@ -563,9 +485,7 @@ namespace ABACUS {
 	  epsilon.value[i] -= a_2_Tln_conv[i];
 
 	  // Include some damping:
-	  //epsilon.value[i] = 0.1 * epsilon.prev_value[i] + 0.9 * epsilon.value[i];
 	  epsilon.value[i] = 0.1 * epsilon.prev_value[i] + 0.9 * epsilon.value[i];
-	  //* (1.0 + (epsilon.value[i] - epsilon.prev_value[i])/fabs(epsilon.value[i] + epsilon.prev_value[i]));
 	}
 
 	niter++;
@@ -582,15 +502,11 @@ namespace ABACUS {
 	StopTime = clock();
 	CPU_ticks += StopTime - StartTime;
 
-	//cout << "epsilon: niter = " << niter << "\tdiff = " << epsilon.diff << endl;
-	//cout << epsilon.lambda[0] << "\t" << epsilon.dlambda[0] << endl;
-	//cout << "a_2_Tln_conv[0] = " << a_2_Tln_conv[0] << "\tTln1plusemineps[0] = " << Tln1plusemineps[0] << endl;
       } while (niter < 5 || niter < niter_max && CPU_ticks < Max_CPU_ticks && epsilon.diff > 0.1*running_prec);
 
       ncycles++;
       niter_tot += niter;
 
-      //cout << "End of a cycle: niter = " << niter << "\tniter_tot = " << niter_tot << "\tepsilon.diff = " << epsilon.diff << "\tNpts = " << epsilon.Npts << "\tlambdamax = " << epsilon.lambda[0] << "\trunning_prec = " << running_prec << "\treq_diff = " << req_diff << endl;
     } // do cycles
     while (ncycles < 5 || running_prec > req_diff && CPU_ticks < Max_CPU_ticks);
 
@@ -603,7 +519,8 @@ namespace ABACUS {
   {
 
     clock_t StartTime = clock();
-    int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;  // give 30 seconds to wrap up, assume we time to 2% accuracy.
+    int Max_CPU_ticks = 98 * (Max_Secs - 0) * CLOCKS_PER_SEC/100;
+    // give 30 seconds to wrap up, assume we time to 2% accuracy.
 
     Root_Density depsilon_dmu = epsilon;
 
@@ -644,7 +561,10 @@ namespace ABACUS {
 	a_2_depsover1plusepluseps_conv[i] = 0.0;
 
 	for (int j = 0; j < depsilon_dmu.Npts; ++j)
-	  a_2_depsover1plusepluseps_conv[i] += oneoverpi * (atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] + 0.5 * epsilon.dlambda[j])) - atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] - 0.5 * epsilon.dlambda[j]))) * depsover1plusepluseps[j];
+	  a_2_depsover1plusepluseps_conv[i] +=
+	    oneoverpi * (atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] + 0.5 * epsilon.dlambda[j]))
+			 - atan(oneoverc * (epsilon.lambda[i] - epsilon.lambda[j] - 0.5 * epsilon.dlambda[j])))
+	    * depsover1plusepluseps[j];
       }
 
       // Reconstruct the depsilon_dmu function:
@@ -668,7 +588,6 @@ namespace ABACUS {
       StopTime = clock();
       CPU_ticks += StopTime - StartTime;
 
-      //cout << "depsilon_dmu: niter = " << niter << "\tdiff = " << depsilon_dmu.diff << endl;
     } while (niter < 5 || niter < niter_max && CPU_ticks < Max_CPU_ticks && depsilon_dmu.diff > req_diff);
 
     return(depsilon_dmu);
@@ -717,7 +636,9 @@ namespace ABACUS {
   DP LiebLin_sbar_TBA (const Root_Density& rho, const Root_Density& rhoh)
   {
     DP sbar = 0.0;
-    for (int i = 0; i < rho.Npts; ++i) sbar += ((rho.value[i] + rhoh.value[i]) * log(rho.value[i] + rhoh.value[i]) - rho.value[i] * log(rho.value[i]+1.0e-30) - rhoh.value[i] * log(rhoh.value[i]+1.0e-30)) * rho.dlambda[i];
+    for (int i = 0; i < rho.Npts; ++i)
+      sbar += ((rho.value[i] + rhoh.value[i]) * log(rho.value[i] + rhoh.value[i])
+	       - rho.value[i] * log(rho.value[i]+1.0e-30) - rhoh.value[i] * log(rhoh.value[i]+1.0e-30)) * rho.dlambda[i];
 
     return(sbar);
   }
@@ -743,20 +664,12 @@ namespace ABACUS {
 	  lambda_found[Nfound++] = 0.25 * (rho.lambda[i-1] + 3.0 * rho.lambda[i]);
 	}
 	else {
-	  //lambda_found[Nfound] = rho.lambda[i];
 	  // Better: center the lambda_found between these points:
-	  //lambda_found[Nfound] = rho.lambda[i-1] + (rho.lambda[i] - rho.lambda[i-1]) * ((Nfound + 1.0) - integral_prev)/(integral - integral_prev);
 	  lambda_found[Nfound] = 0.5 * (rho.lambda[i-1] + rho.lambda[i]);
 	  Nfound++;
 	}
       }
-      //cout << "\ti = " << i << "\tintegral = " << integral << "\tNfound = " << Nfound << endl;
     }
-    //cout << "rho: " << rho.Npts << " points" << endl << rho.value << endl;
-    //cout << "sym: " << rho.value[0] << " " << rho.value[rho.value.size() - 1]
-    // << "\t" << rho.value[rho.value.size()/2] << " " << rho.value[rho.value.size()/2 + 1] << endl;
-    //cout << "Found " << Nfound << " particles." << endl;
-    //cout << "lambda_found = " << lambda_found << endl;
 
     Vect<DP> lambda(N);
     // Fill up the found rapidities:
@@ -770,11 +683,9 @@ namespace ABACUS {
     for (int i = 0; i < N; ++i) {
       DP sum = 0.0;
       for (int j = 0; j < N; ++j) sum += 2.0 * atan((lambda[i] - lambda[j])/c_int);
-      //Ix2[i] = 2.0* int((L * lambda[i] + sum)/twoPI) + (N % 2) - 1;
       // For N is even/odd, we want to round off to the nearest odd/even integer.
       Ix2[i] = 2.0 * floor((L* lambda[i] + sum)/twoPI + 0.5 * (N%2 ? 1 : 2)) + (N%2) - 1;
     }
-    //cout << "Found quantum numbers " << endl << Ix2 << endl;
 
     // Check that the quantum numbers are all distinct:
     bool allOK = false;
@@ -784,12 +695,10 @@ namespace ABACUS {
       allOK = true;
       for (int i = 0; i < N-1; ++i) if (Ix2[i] == Ix2[i+1]) allOK = false;
     }
-    //cout << "Found modified quantum numbers " << endl << Ix2 << endl;
 
     LiebLin_Bethe_State rhostate(c_int, L, N);
     rhostate.Ix2 = Ix2;
     rhostate.Compute_All(true);
-    //cout << "rapidities of state found: " << rhostate.lambdaoc << endl;
 
     return(rhostate);
   }
diff --git a/src/TBA/TBA_XXZ.cc b/src/TBA/TBA_XXZ.cc
index a0a50ac..53fd5ab 100755
--- a/src/TBA/TBA_XXZ.cc
+++ b/src/TBA/TBA_XXZ.cc
@@ -80,7 +80,8 @@ namespace ABACUS {
       conv = 0.0;
       for (int j = 0; j < rhotot_GS.Npts; ++j)
 	conv += fabs(rhotot_GS.lambda[j]) > B ? 0.0 :
-	  XXZ_a2_kernel (sin2zeta, cos2zeta, rhotot_GS.lambda[i] - rhotot_GS.lambda[j]) * rhotot_GS.prev_value[j] * rhotot_GS.dlambda[j];
+	  XXZ_a2_kernel (sin2zeta, cos2zeta, rhotot_GS.lambda[i] - rhotot_GS.lambda[j])
+	  * rhotot_GS.prev_value[j] * rhotot_GS.dlambda[j];
       rhotot_GS.value[i] = XXZ_a1_kernel(sinzeta, coszeta, rhotot_GS.lambda[i]) - conv;
     }
 
@@ -109,11 +110,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_rhotot_GS (zeta, B, rhotot_GS);
-      //cout << rhotot_GS.diff << endl;
       niter ++;
     } while (rhotot_GS.diff > req_prec);
 
-    //cout << "rhotot: niter = " << niter << endl;
     return(rhotot_GS);
   }
 
@@ -147,7 +146,6 @@ namespace ABACUS {
 	  XXZ_a2_kernel (sin2zeta, cos2zeta, eps_GS.lambda[i] - eps_GS.lambda[j])
 	  * eps_GS.prev_value[j] * eps_GS.dlambda[j];
       eps_GS.value[i] = Hz - PI * sinzeta * XXZ_a1_kernel(sinzeta, coszeta, eps_GS.lambda[i]) - conv;
-      //cout << i << "\t" << eps_GS.lambda[i] << "\t" << eps_GS.value[i] << "\t" << conv << endl;
     }
 
     // Calculate the sum of differences:
@@ -175,12 +173,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_eps_GS (zeta, Hz, eps_GS);
-      //cout << eps_GS.diff << endl;
       niter++;
-      //cout << niter << "\t" << eps_GS.diff << endl;
     } while (eps_GS.diff > req_prec);
 
-  //cout << "eps: niter = " << niter << endl;
     return(eps_GS);
   }
 
@@ -232,11 +227,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_depsdlambda_GS (zeta, B, depsdlambda_GS);
-      //cout << depsdlambda_GS.diff << endl;
       niter++;
     } while (depsdlambda_GS.diff > req_prec);
 
-    //cout << "depsdlambda: niter = " << niter << endl;
     return(depsdlambda_GS);
   }
 
@@ -289,12 +282,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_b2BB_lambda_B (zeta, B, b2BB_lambda_B);
-      //cout << eps_GS.diff << endl;
       niter++;
     } while (b2BB_lambda_B.diff > req_prec);
 
-    //if (lambda_p + lambda_h + 2.0*B > 0.0) cout << "Nonzero backflow possible." << endl;
-    //cout << "Kbackflow: niter = " << niter << endl;
     return(b2BB_lambda_B);
   }
 
@@ -347,12 +337,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_b2BB_lambda_lambdap (zeta, B, lambdap, b2BB_lambda_lambdap);
-      //cout << eps_GS.diff << endl;
       niter++;
     } while (b2BB_lambda_lambdap.diff > req_prec);
 
-    //if (lambda_p + lambda_h + 2.0*B > 0.0) cout << "Nonzero backflow possible." << endl;
-    //cout << "Kbackflow: niter = " << niter << endl;
     return(b2BB_lambda_lambdap);
   }
 
@@ -368,14 +355,12 @@ namespace ABACUS {
     DP sin2zeta = sin(2.0*zeta);
     DP cos2zeta = cos(2.0*zeta);
     for (int i = 0; i < Kbackflow_GS.Npts; ++i) {
-      //if (Kbackflow_GS.lambda[i] < -B || Kbackflow_GS.lambda[i] > lambda_p + lambda_h + B) Kbackflow_GS.value[i] = 0.0;
       if (false && fabs(Kbackflow_GS.lambda[i]) > B) Kbackflow_GS.value[i] = 0.0;
       else {
 	// First, calculate the convolution
 	conv = 0.0;
 	for (int j = 0; j < Kbackflow_GS.Npts; ++j)
 	  conv += fabs(Kbackflow_GS.lambda[j]) > B ? 0.0 :
-	    //(Kbackflow_GS.lambda[i] < -B || Kbackflow_GS.lambda[i] > lambda_p + lambda_h + B) ? 0.0 :
 	    XXZ_a2_kernel (sin2zeta, cos2zeta, Kbackflow_GS.lambda[i] - Kbackflow_GS.lambda[j])
 	    * Kbackflow_GS.prev_value[j] * Kbackflow_GS.dlambda[j];
 	Kbackflow_GS.value[i] = -XXZ_a2_kernel(sin2zeta, cos2zeta, Kbackflow_GS.lambda[i] - lambda_p)
@@ -407,12 +392,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_Kbackflow_GS (zeta, B, lambda_p, lambda_h, Kbackflow_GS);
-      //cout << eps_GS.diff << endl;
       niter++;
     } while (Kbackflow_GS.diff > req_prec);
 
-    //if (lambda_p + lambda_h + 2.0*B > 0.0) cout << "Nonzero backflow possible." << endl;
-    //cout << "Kbackflow: niter = " << niter << endl;
     return(Kbackflow_GS);
   }
 
@@ -465,12 +447,9 @@ namespace ABACUS {
     int niter = 0;
     do {
       Iterate_XXZ_Fbackflow_GS (zeta, B, lambda_p, lambda_h, Fbackflow_GS);
-      //cout << eps_GS.diff << endl;
       niter++;
     } while (Fbackflow_GS.diff > req_prec);
 
-    //if (lambda_p + lambda_h + 2.0*B > 0.0) cout << "Nonzero backflow possible." << endl;
-    //cout << "Fbackflow: niter = " << niter << endl;
     return(Fbackflow_GS);
   }
 
@@ -519,199 +498,10 @@ namespace ABACUS {
 
     do {
       Iterate_XXZ_Z_GS (zeta, B, Z_GS);
-      //cout << depsdlambda_GS.diff << endl;
     } while (Z_GS.diff > req_prec);
 
     return(Z_GS);
   }
 
-  /*
-  void XXZ_Compare_Lattice_and_Continuum_Backflows_base_1010 (DP Delta, int N, int M, long long int id)
-  {
-    // Define the chain:  J, Delta, h, Nsites
-    Heis_Chain chain(1.0, Delta, 0.0, N);
-
-    // Define the base:  chain, Mdown
-    Heis_Base gbase(chain, M);
-
-    // Define the ground state
-    XXZ_Bethe_State gstate(chain, gbase);
-
-    // Compute everything about the ground state
-    gstate.Compute_All(true);
-
-    // Define the number of rapidities for an excited state
-    Vect_INT Nrapidities(0, chain.Nstrings);
-    Nrapidities[0] = M;
-
-    Vect_INT Nexcitations(0, 2* chain.Nstrings + 2);
-    Nexcitations[0] = 0;
-    Nexcitations[1] = 1;
-    Nexcitations[2] = 0;
-    Nexcitations[3] = 1;
-
-    // Define a base configuration for this set of rapidity numbers
-    Heis_Base ebase (chain, Nrapidities);
-
-    // Define the excited state
-    XXZ_Bethe_State estate(chain, ebase);
-
-    // Define an offset from a base and a number of holes
-    Ix2_Offsets offsets(ebase, Nexcitations);
-
-    // Set the offset to the desired id
-    offsets.Set_to_id (id);
-
-    // Set the offset data into the quantum numbers
-    estate.Set_Ix2_Offsets(offsets);
-
-    // Compute everything about this eigenstate
-    estate.Compute_All(true);
-
-    DP k_ext_N = estate.K - gstate.K;
-    DP omega_N = estate.E - gstate.E;
-    DP lambdap = estate.lambda[0][0];
-    DP lambdah = 0.0;
-    for (int alpha = 1; alpha < estate.base[0] - 1; ++alpha)
-      if (estate.Ix2[0][alpha + 1] > estate.Ix2[0][alpha] + 2) {
-	lambdah = 0.5 * (estate.lambda[0][alpha] + estate.lambda[0][alpha + 1]);
-      }
-
-
-    // Now solve the thermodynamic equations:
-    DP req_prec = 1.0e-12;
-    DP Hz_N = H_vs_M (Delta, N, M);
-    cout << "Hz_N = " << Hz_N << endl;
-    DP lambdamax = 4.0 * fabs(gstate.lambda[0][0]);
-    int Npts = 2000;
-    Root_Density eps_GS = XXZ_eps_GS (Delta, Hz_N, lambdamax, Npts, req_prec);
-
-    // We can read off the value of B from the obtained eps_GS function:
-    DP B_eps = 0.0;
-    int iB_eps = Npts/2;
-    for (int i = Npts/2; i < Npts - 1; ++i)
-      if (eps_GS.value[i] * eps_GS.value[i+1] < 0.0) {
-	B_eps = 0.5 * (eps_GS.lambda[i] + eps_GS.lambda[i+1]);
-	iB_eps = i;
-      }
-    if (B_eps == 0.0) {
-      cout << "Delta = " << Delta << "\tN = " << N << "\tM = " << M << "\tHz_N = " << Hz_N << "\tid = " << id << endl;
-      ABACUSerror("B not found.");
-    }
-    if (B_eps > 0.5 * lambdamax) {
-      cout << "Delta = " << Delta << "\tN = " << N << "\tM = " << M << "\tHz_N = " << Hz_N << "\tid = " << id << endl;
-      ABACUSerror("Use a higher value of lambdamax.");
-    }
-    //DP lambdamax = lambdamax_eps;
-    //DP lambdamax = 2.0 * fabs(gstate.lambda[0][0]); // window of definition of Kbackflow is covered
-    // Start by finding the appropriate B:
-    DP B = DP(M)/N; // initial guess;
-    DP B_old = B;
-    DP dB = 0.5 * B; // variation we allow;
-    DP m_TBA = 0.0;
-    //Vect<Root_Density> rhotot_GS_iter(10);// = XXZ_rhotot_GS (Delta, B, lambdamax, Npts, req_prec);
-    Root_Density rhotot_GS = XXZ_rhotot_GS (Delta, B, lambdamax, Npts, req_prec);
-    int iter = 0;
-    do {
-      B_old = B;
-      rhotot_GS = XXZ_rhotot_GS (Delta, B, lambdamax, Npts, req_prec);
-      m_TBA = 0.0;
-      for (int i1 = 0; i1 < rhotot_GS.Npts; ++i1) if (fabs(rhotot_GS.lambda[i1]) < B) m_TBA += rhotot_GS.value[i1] * rhotot_GS.dlambda[i1];
-      if (m_TBA < DP(M)/N) B += dB;
-      else B -= dB;
-      cout << "B_old = " << B_old << "\tB = " << B << "\tdB = " << dB << "\tm = " << m_TBA << "\tM/N = " << DP(M)/N << endl;
-      dB *= 1.0/1.9;
-    } while (dB > 0.001 && iter < 9);
-
-    cout << "Check of consistency:  B_eps = " << B_eps << "\tB = " << B << "\tm_TBA = " << m_TBA << "\tM/N = " << DP(M)/N << endl;
-
-    //Root_Density rhotot_GS = XXZ_rhotot_GS (Delta, B, lambdamax, Npts, req_prec);
-    Root_Density b2BB_lambda_B = XXZ_b2BB_lambda_B (Delta, B, lambdamax, Npts, req_prec);
-    Root_Density Kbackflow_GS = XXZ_Kbackflow_GS (Delta, B, lambdamax, lambdap, lambdah, Npts, req_prec);
-    Root_Density Fbackflow_GS = XXZ_Fbackflow_GS (Delta, B, lambdamax, lambdap, lambdah, Npts, req_prec);
-
-    // Calculate the momentum and energy of the TBA state:
-    // Momentum:
-    DP conv = 0.0;
-    DP zeta = acos(Delta);
-    for (int l = 0; l < rhotot_GS.Npts; ++l) {
-      conv += fabs(rhotot_GS.lambda[l]) > B ? 0.0 :
-	(XXZ_phi2_kernel(zeta, lambdap - rhotot_GS.lambda[l])
-	 - XXZ_phi2_kernel(zeta, lambdah - rhotot_GS.lambda[l]))
-	* rhotot_GS.value[l] * rhotot_GS.dlambda[l];
-    }
-    DP k_ext_TBA = -XXZ_phi1_kernel (zeta, lambdap) + XXZ_phi1_kernel (zeta, lambdah) + conv;
-    // Energy:
-    conv = 0.0;
-    DP sinzeta = sin(zeta);
-    DP coszeta = cos(zeta);
-    for (int l = 0; l < Kbackflow_GS.Npts; ++l)
-      conv += fabs(Kbackflow_GS.lambda[l]) > B ? 0.0 :
-	(Hz_N - PI * sinzeta * XXZ_a1_kernel(sinzeta, coszeta, Kbackflow_GS.lambda[l]))
-	* Kbackflow_GS.value[l] * Kbackflow_GS.dlambda[l];
-    DP omega_TBA = -PI * sin(zeta) * (XXZ_a1_kernel(sinzeta, coszeta, lambdap)
-				      - XXZ_a1_kernel(sinzeta, coszeta, lambdah)) + conv;
-
-    cout << "k_ext_N = " << k_ext_N << "\tk_ext_TBA = " << k_ext_TBA << "\tomega_N = " << omega_N << "\tomega_TBA = " << omega_TBA << endl;
-
-    // Get back to the finite lattice case:
-    // Define densities as 1/N \sum_j delta (lambda - lambda_j);  use Gaussian smoothing of functions.
-    DP gwidth = 2.0 * B * log(DP(N))/M; // mean rapidity spacing times some factor, for smoothing of Gaussians
-    DP gwidthsq = gwidth * gwidth;
-    Vect_DP rhoGS_N (0.0, Npts);
-    Vect_DP rhoexc_N (0.0, Npts);
-    Vect_DP Kflow_N (0.0, Npts);
-    for (int iout = 0; iout < Npts; ++iout) {
-      DP lambdaout = rhotot_GS.lambda[iout];
-      // We compute the densities at each point...
-      for (int alpha = 0; alpha < M; ++alpha)
-	rhoGS_N[iout] += exp(-(gstate.lambda[0][alpha] - lambdaout) * (gstate.lambda[0][alpha] - lambdaout)/gwidthsq);
-      rhoGS_N[iout] /= N * sqrt(PI) * gwidth;
-      for (int alpha = 1; alpha < estate.base[0]; ++alpha)
-	rhoexc_N[iout] += exp(-(estate.lambda[0][alpha] - lambdaout) * (estate.lambda[0][alpha] - lambdaout)/gwidthsq);
-      rhoexc_N[iout] /= N * sqrt(PI) * gwidth;
-
-      Kflow_N[iout] = N * (rhoexc_N[iout] - rhoGS_N[iout] + (1.0/N) * exp(-(lambdah - lambdaout) * (lambdah - lambdaout)/gwidthsq)/(sqrt(PI) * gwidth));
-    } // for iout
-
-    cout << "Here 1" << endl;
-    // Now produce a file with the density flow:
-    stringstream flo_stringstream;
-    flo_stringstream << "Flow_D_" << Delta << "_N_" << N << "_M_" << M << "_base_id_" << estate.base_id << "_type_id_" << estate.type_id
-		     << "_id_" << estate.id << ".flo";
-    string flo_string = flo_stringstream.str();
-    const char* flo_Cstr = flo_string.c_str();
-
-    ofstream outfile_flo;
-    outfile_flo.open(flo_Cstr);
-    outfile_flo.precision(16);
-
-    for (int iout = 0; iout < Npts; ++iout)
-      outfile_flo << rhotot_GS.lambda[iout] << "\t" << rhotot_GS.value[iout] << "\t" << rhoGS_N[iout] << "\t"
-		  << rhoexc_N[iout] << "\t" << Kbackflow_GS.value[iout] << "\t"
-		  << (fabs(Kbackflow_GS.lambda[iout]) > B ? 0.0 : Kbackflow_GS.value[iout]) - b2BB_lambda_B.value[iout] * Fbackflow_GS.value[iB_eps]
-	+ b2BB_lambda_B.value[b2BB_lambda_B.Npts - 1 - iout] * Fbackflow_GS.value[Fbackflow_GS.Npts - 1 - iB_eps] << "\t" << Kflow_N[iout] << endl;
-
-    outfile_flo.close();
-
-    // ... and a file with the remainder of the data:
-    stringstream dat_stringstream;
-    dat_stringstream << "Flow_D_" << Delta << "_N_" << N << "_M_" << M << "_base_id_" << estate.base_id << "_type_id_" << estate.type_id
-		     << "_id_" << estate.id << ".dat";
-    string dat_string = dat_stringstream.str();
-    const char* dat_Cstr = dat_string.c_str();
-
-    ofstream outfile_dat;
-    outfile_dat.open(dat_Cstr);
-    outfile_dat.precision(16);
-
-    outfile_dat << "lambdap\tlambdah\tk_ext_N\tk_ext_TBA\tomega_N\tomega_TBA"
-		<< lambdap << "\t" << lambdah << "\t" << k_ext_N << "\t" << k_ext_TBA << "\t" << omega_N << "\t" << omega_TBA << endl;
-
-    outfile_dat.close();
-
-    return;
-  }
-  */
 
 }  // namespace ABACUS
diff --git a/src/UTILS/Data_File_Name.cc b/src/UTILS/Data_File_Name.cc
index bcf2260..03b4514 100644
--- a/src/UTILS/Data_File_Name.cc
+++ b/src/UTILS/Data_File_Name.cc
@@ -28,7 +28,8 @@ namespace ABACUS {
 
   // Lieb-Liniger:
 
-  void Data_File_Name (stringstream& name, char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax, DP kBT, DP L2, string defaultScanStatename)
+  void Data_File_Name (stringstream& name, char whichDSF, DP c_int, DP L, int N, int iKmin, int iKmax,
+		       DP kBT, DP L2, string defaultScanStatename)
   {
     name << "LiebLin_";
     if (whichDSF == 'Z') name << "Z";
@@ -42,7 +43,6 @@ namespace ABACUS {
     else ABACUSerror("Option not implemented in Data_File_Name");
 
     name << "_c_" << c_int << "_L_" << L << "_N_" << N;
-    //if (fixed_iK) name << "_iK_" << iKneeded;
     if (defaultScanStatename == "") name << "_" << N << "_0_"; // simulates label of ground state
     else name << "_" << defaultScanStatename;
     if (iKmin == iKmax) name << "_iK_" << iKmin;
@@ -53,8 +53,8 @@ namespace ABACUS {
     return;
   }
 
-  //void Data_File_Name (stringstream& name, char whichDSF, bool fixed_iK, int iKneeded, LiebLin_Bethe_State& State, LiebLin_Bethe_State& RefScanState)
-  void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT, LiebLin_Bethe_State& State, LiebLin_Bethe_State& RefScanState, string defaultScanStatename)
+  void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT,
+		       LiebLin_Bethe_State& State, LiebLin_Bethe_State& RefScanState, string defaultScanStatename)
   {
     name << "LiebLin_";
     if (whichDSF == 'Z') name << "Z";
@@ -68,7 +68,6 @@ namespace ABACUS {
     else ABACUSerror("Option not implemented in Data_File_Name");
 
     name << "_c_" << State.c_int << "_L_" << State.L << "_N_" << State.N;
-    //if (fixed_iK) name << "_iK_" << iKneeded;
     if (defaultScanStatename == "") name << "_" << State.label;
     else name << "_" << defaultScanStatename;
     if (iKmin == iKmax) name << "_iK_" << iKmin;  else name << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
@@ -81,8 +80,8 @@ namespace ABACUS {
 
   // Heisenberg:
 
-  //void Data_File_Name (stringstream& name, char whichDSF, DP Delta, int N, int M, bool fixed_iK, int iKneeded, int N2)
-  void Data_File_Name (stringstream& name, char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax, DP kBT, int N2, string defaultScanStatename)
+  void Data_File_Name (stringstream& name, char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax,
+		       DP kBT, int N2, string defaultScanStatename)
   {
     name << "HEIS_";
     if (whichDSF == 'Z') name << "Z";
@@ -104,21 +103,14 @@ namespace ABACUS {
     if (defaultScanStatename == "") name << "_" << M << "_0_"; // simulates label of ground state
     else name << "_" << defaultScanStatename;
 
-    //if (fixed_iK) {
-    //  name << "_iK_";
-    //  for (int i = 0; i < int(log10(DP(N/2))) - int(log10(DP(iKneeded))); ++i) name << "0";
-    //  name << iKneeded;
-    //}
-    // From ABACUS++G_8 onwards: for Heisenberg, always scan over all momentum integers
-    //if (iKmin == iKmax) name << "_iK_" << iKmin;  else name << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (kBT > 0.0) name << "_kBT_" << kBT;
     if (whichDSF == 'q') name << "_N2_" << N2;
 
     return;
   }
 
-  //void Data_File_Name (stringstream& name, char whichDSF, bool fixed_iK, int iKneeded, Heis_Bethe_State& State, Heis_Bethe_State& RefScanState)
-  void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT, Heis_Bethe_State& State, Heis_Bethe_State& RefScanState, string defaultScanStatename)
+  void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT,
+		       Heis_Bethe_State& State, Heis_Bethe_State& RefScanState, string defaultScanStatename)
   {
     name << "HEIS_";
     if (whichDSF == 'Z') name << "Z";
@@ -139,15 +131,6 @@ namespace ABACUS {
     name << State.base.Mdown;
     if (defaultScanStatename == "") name << "_" << State.label;
     else name << "_" << defaultScanStatename;
-    /*
-    if (fixed_iK) {
-      name << "_iK_";
-      for (int i = 0; i < int(log10(DP(State.chain.Nsites/2))) - int(log10(DP(iKneeded))); ++i) name << "0";
-      name << iKneeded;
-    }
-    */
-    // From ABACUS++G_8 onwards: for Heisenberg, always scan over all momentum integers
-    //if (iKmin == iKmax) name << "_iK_" << iKmin;  else name << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (kBT > 0.0) name << "_kBT_" << kBT;
     if (whichDSF == 'q') name << "_N2_" << RefScanState.chain.Nsites;
 
@@ -155,10 +138,11 @@ namespace ABACUS {
   }
 
 
-  // One-D spinless fermions:
+  // One-D spinless fermions: IN DEVELOPMENT
   /*
-  void ODSLF_Data_File_Name (stringstream& name, char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax, DP kBT, int N2, string defaultScanStatename)
-  {
+    void ODSLF_Data_File_Name (stringstream& name, char whichDSF, DP Delta, int N, int M, int iKmin, int iKmax,
+    DP kBT, int N2, string defaultScanStatename)
+    {
     name << "ODSLF_";
     if (whichDSF == 'Z') name << "Z";
     else if (whichDSF == 'm') name << "cdag_c";
@@ -176,11 +160,11 @@ namespace ABACUS {
     if (whichDSF == 'q') name << "_N2_" << N2;
 
     return;
-  }
+    }
 
-  //void Data_File_Name (stringstream& name, char whichDSF, bool fixed_iK, int iKneeded, Heis_Bethe_State& State, Heis_Bethe_State& RefScanState)
-  void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT, ODSLF_Bethe_State& State, ODSLF_Bethe_State& RefScanState, string defaultScanStatename)
-  {
+    void Data_File_Name (stringstream& name, char whichDSF, int iKmin, int iKmax, DP kBT,
+    ODSLF_Bethe_State& State, ODSLF_Bethe_State& RefScanState, string defaultScanStatename)
+    {
     name << "ODSLF_";
     if (whichDSF == 'Z') name << "Z";
     else if (whichDSF == 'm') name << "cdag_c";
@@ -195,16 +179,16 @@ namespace ABACUS {
     for (int i = 0; i < int(log10(DP(State.chain.Nsites/2))) - int(log10(DP(State.base.Mdown))); ++i) name << "0";
     name << State.base.Mdown;
     if (fixed_iK) {
-      name << "_iK_";
-      for (int i = 0; i < int(log10(DP(State.chain.Nsites/2))) - int(log10(DP(iKneeded))); ++i) name << "0";
-      name << iKneeded;
+    name << "_iK_";
+    for (int i = 0; i < int(log10(DP(State.chain.Nsites/2))) - int(log10(DP(iKneeded))); ++i) name << "0";
+    name << iKneeded;
     }
     if (iKmin == iKmax) name << "_iK_" << iKmin;  else name << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (kBT > 0.0) name << "_kBT_" << kBT;
     if (whichDSF == 'q') name << "_N2_" << RefScanState.chain.Nsites;
 
     return;
-  }
-*/
+    }
+  */
 
 } // namespace ABACUS
diff --git a/src/UTILS/Smoothen_RAW_into_SF.cc b/src/UTILS/Smoothen_RAW_into_SF.cc
index 638ddbf..72259c1 100644
--- a/src/UTILS/Smoothen_RAW_into_SF.cc
+++ b/src/UTILS/Smoothen_RAW_into_SF.cc
@@ -49,7 +49,6 @@ namespace ABACUS {
     DP omega;
     int iK;
     DP FF;
-    //int conv;
     DP dev;
     string label;
 
@@ -69,16 +68,13 @@ namespace ABACUS {
     DP SFfactor = 1.0;
 
     while (RAW_infile.peek() != EOF) {
-      //RAW_infile >> omega >> iK >> FF >> conv >> label;
       RAW_infile >> omega >> iK >> FF >> dev >> label;
       if (iK >= iKmin && iK <= iKmax  && fabs(omega) > 1.0e-8) { // remove connected part of DSF
-	//SSF[iK - iKmin] += FF * FF;
 	for (int deltaiK = -DiK; deltaiK <= DiK; ++deltaiK)
 	  if (iK + deltaiK >= iKmin && iK + deltaiK <= iKmax)
 	    SSF[iK + deltaiK - iKmin] += Kweight[abs(deltaiK)] * FF * FF;
 	for (int iomega = 0; iomega < Nom; ++iomega)
 	  if (big_gwidth_used > (d_omega = fabs(omegaout[iomega] - omega))) {
-	    //DSF[iomega][iK - iKmin] += FF * FF * exp(-d_omega*d_omega * oneovertwowidthsq);
 	    SFfactor = FF * FF * exp(-d_omega*d_omega * oneovertwowidthsq);
 	    ASF[iomega] += SFfactor;
 	    if (fabs(omega) > 1.0e-12) // exclude the delta function contribution coming from diagonal term, if present
@@ -107,7 +103,6 @@ namespace ABACUS {
 
     stringstream DSF_stringstream;    string DSF_string;
     DSF_stringstream << prefix;
-    //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (DiK > 0) DSF_stringstream << "_DiK_" << DiK;
     DSF_stringstream << "_ommin_"<< ommin << "_ommax_" << ommax << "_Nom_" << Nom << "_w_" << gwidth << ".dsf";
     DSF_string = DSF_stringstream.str();    const char* DSF_Cstr = DSF_string.c_str();
@@ -125,7 +120,6 @@ namespace ABACUS {
 
     stringstream SSF_stringstream;    string SSF_string;
     SSF_stringstream << prefix;
-    //if (iKmin != iKmax) SSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (DiK > 0) SSF_stringstream << "_DiK_" << DiK;
     SSF_stringstream << ".ssf";
     SSF_string = SSF_stringstream.str();    const char* SSF_Cstr = SSF_string.c_str();
@@ -143,7 +137,6 @@ namespace ABACUS {
 
     stringstream ASF_stringstream;    string ASF_string;
     ASF_stringstream << prefix;
-    //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     ASF_stringstream << "_ommin_"<< ommin << "_ommax_" << ommax << "_Nom_" << Nom << "_w_" << gwidth << ".asf";
     ASF_string = ASF_stringstream.str();    const char* ASF_Cstr = ASF_string.c_str();
 
@@ -192,7 +185,6 @@ namespace ABACUS {
     DP omega;
     int iK;
     DP FF;
-    //int conv;
     DP dev;
     string label;
 
@@ -214,10 +206,6 @@ namespace ABACUS {
     for (int ns = 0; ns < weight.size(); ++ns) {
 
       // Open the original raw file:
-      //stringstream RAW_stringstream;    string RAW_string;
-      //RAW_stringstream << prefix << ".raw";
-      //RAW_string = RAW_stringstream.str();
-      //const char* RAW_Cstr = RAW_string.c_str();
       const char* RAW_Cstr = rawfilename[ns].c_str();
 
       ifstream RAW_infile;
@@ -228,13 +216,11 @@ namespace ABACUS {
       }
 
       while (RAW_infile.peek() != EOF) {
-	//RAW_infile >> omega >> iK >> FF >> conv >> label;
 	RAW_infile >> omega >> iK >> FF >> dev >> label;
 	if (iK >= iKmin && iK <= iKmax && fabs(omega) > 1.0e-8) { // remove connected part of DSF)
 	  SSF[iK - iKmin] += weight[ns] * FF * FF;
 	  for (int iomega = 0; iomega < Nom; ++iomega)
 	    if (big_gwidth_used > (d_omega = fabs(omegaout[iomega] - omega))) {
-	      //DSF[iomega][iK - iKmin] += FF * FF * exp(-d_omega*d_omega * oneovertwowidthsq);
 	      SFfactor = weight[ns] * FF * FF * exp(-d_omega*d_omega * oneovertwowidthsq);
 	      ASF[iomega] += SFfactor;
 	      if (fabs(omega) > 1.0e-12) // exclude the delta function contribution coming from diagonal term, if present
@@ -264,7 +250,6 @@ namespace ABACUS {
 
     stringstream DSF_stringstream;    string DSF_string;
     DSF_stringstream << prefix;
-    //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     DSF_stringstream << "_ns_" << weight.size();
     if (DiK > 0) DSF_stringstream << "_DiK_" << DiK;
     DSF_stringstream << "_ommin_"<< ommin << "_ommax_" << ommax << "_Nom_" << Nom << "_w_" << gwidth << ".dsf";
@@ -283,7 +268,6 @@ namespace ABACUS {
 
     stringstream SSF_stringstream;    string SSF_string;
     SSF_stringstream << prefix;
-    //if (iKmin != iKmax) SSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     SSF_stringstream << "_ns_" << weight.size();
     SSF_stringstream << ".ssf";
     SSF_string = SSF_stringstream.str();    const char* SSF_Cstr = SSF_string.c_str();
@@ -301,7 +285,6 @@ namespace ABACUS {
 
     stringstream ASF_stringstream;    string ASF_string;
     ASF_stringstream << prefix;
-    //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     ASF_stringstream << "_ns_" << weight.size();
     ASF_stringstream << "_ommin_"<< ommin << "_ommax_" << ommax << "_Nom_" << Nom << "_w_" << gwidth << ".asf";
     ASF_string = ASF_stringstream.str();    const char* ASF_Cstr = ASF_string.c_str();
@@ -317,7 +300,6 @@ namespace ABACUS {
     ASF_outfile.close();
 
 
-
     // Check sums:
     DP sumdsf = 0.0;
     DP sumssf = 0.0;
@@ -361,7 +343,6 @@ namespace ABACUS {
     DP omega;
     int iK;
     DP FF;
-    //int conv;
     DP dev;
     string label;
 
@@ -373,7 +354,6 @@ namespace ABACUS {
     DP oneovertwowidthsq = 1.0/(2.0 * gwidth * gwidth);
 
     while (RAW_infile.peek() != EOF) {
-      //RAW_infile >> omega >> iK >> FF >> conv >> label;
       RAW_infile >> omega >> iK >> FF >> dev >> label;
       if (iK >= iKmin && iK <= iKmax && fabs(omega) > 1.0e-8) { // remove connected part of DSF)
 	for (int iomega = 0; iomega < Nom; ++iomega)
@@ -390,7 +370,6 @@ namespace ABACUS {
       ASF[iomega] *= normalization_used;
 
     // Output to .asf file
-
     stringstream ASF_stringstream;    string ASF_string;
     ASF_stringstream << prefix;
     //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
diff --git a/src/UTILS/Smoothen_RAW_into_SF_LiebLin_Scaled.cc b/src/UTILS/Smoothen_RAW_into_SF_LiebLin_Scaled.cc
index 2853a32..4365dcd 100644
--- a/src/UTILS/Smoothen_RAW_into_SF_LiebLin_Scaled.cc
+++ b/src/UTILS/Smoothen_RAW_into_SF_LiebLin_Scaled.cc
@@ -21,7 +21,7 @@ using namespace ABACUS;
 namespace ABACUS {
 
   DP Smoothen_RAW_into_SF_LiebLin_Scaled (string prefix, DP L, int N, int iKmin, int iKmax, int DiK,
-				       DP ommin, DP ommax, int Nom, DP width, DP normalization)
+					  DP ommin, DP ommax, int Nom, DP width, DP normalization)
   {
     // DiK is the (half-)window in iK which is averaged over. A single iK means DiK == 0.
 
@@ -51,7 +51,6 @@ namespace ABACUS {
     DP omega;
     int iK;
     DP FF;
-    //int conv;
     DP dev;
     string label;
 
@@ -76,9 +75,10 @@ namespace ABACUS {
     // For iK > N, there are N states.
 
     for (iK = iKmin; iK <= iKmax; ++iK)
-      //gwidth[iK - iKmin] = width * 2.0 * ABACUS::min( pow(twoPI * ABACUS::max(abs(iK),1)/L, 2.0), pow(twoPI * N/L, 2.0))/ABACUS::min(N, ABACUS::max(abs(iK), 1));
-    // Make sure the width does not become lower than the omegaout raster:
-      gwidth[iK - iKmin] = ABACUS::max(2.0 * (ommax - ommin)/Nom, width * 2.0 * ABACUS::min( pow(twoPI * ABACUS::max(abs(iK),1)/L, 2.0), pow(twoPI * N/L, 2.0))/ABACUS::min(N, ABACUS::max(abs(iK), 1)));
+      // Make sure the width does not become lower than the omegaout raster:
+      gwidth[iK - iKmin] = ABACUS::max(2.0 * (ommax - ommin)/Nom, width * 2.0
+				       * ABACUS::min( pow(twoPI * ABACUS::max(abs(iK),1)/L, 2.0),
+						      pow(twoPI * N/L, 2.0))/ABACUS::min(N, ABACUS::max(abs(iK), 1)));
 
 
     Vect_DP big_gwidth_used (iKmax - iKmin + 1);
@@ -95,14 +95,12 @@ namespace ABACUS {
 
     DP FFsq = 0.0;
     while (RAW_infile.peek() != EOF) {
-      //RAW_infile >> omega >> iK >> FF >> conv >> label;
       RAW_infile >> omega >> iK >> FF >> dev >> label;
       if (iK >= iKmin && iK <= iKmax && fabs(omega) > 1.0e-8) { // remove connected part of DSF
 	FFsq = FF * FF;
 	SSF[iK - iKmin] += FFsq;
 	for (int iomega = 0; iomega < Nom; ++iomega)
 	  if (big_gwidth_used[iK - iKmin] > (d_omega = fabs(omegaout[iomega] - omega)))
-	    //DSF[iomega][iK - iKmin] += FF * FF * normalization_used[iK - iKmin] * exp(-d_omega*d_omega * oneovertwowidthsq[iK - iKmin]);
 	    for (int deltaiK = -DiK; deltaiK <= DiK; ++deltaiK)
 	      if (iK + deltaiK >= iKmin && iK + deltaiK <= iKmax)
 		DSFS[iomega][iK + deltaiK - iKmin] += Kweight[abs(deltaiK)] * FFsq * normalization_used[iK + deltaiK - iKmin]
@@ -112,19 +110,13 @@ namespace ABACUS {
     RAW_infile.close();
 
     // Reset proper normalization:
-    //DP normalization_used = normalization * 1.0/(sqrt(twoPI) * gwidth); // Gaussian factor
     for (int iK = 0; iK < iKmax - iKmin + 1; ++iK) {
       SSF[iK] *= normalization/twoPI;
-    //FSR[iK] *= normalization_used;
-    //for (int iomega = 0; iomega < Nom; ++iomega)
-    //DSF[iomega][iK] *= normalization_used;
     }
 
     // Output to .dsfs and .ssf files
-
     stringstream DSFS_stringstream;    string DSFS_string;
     DSFS_stringstream << prefix;
-    //if (iKmax != iKmin) DSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     if (DiK > 0) DSFS_stringstream << "_DiK_" << DiK;
     DSFS_stringstream << "_ommin_"<< ommin << "_ommax_" << ommax << "_Nom_" << Nom << "_w_" << width << ".dsfs";
     DSFS_string = DSFS_stringstream.str();    const char* DSFS_Cstr = DSFS_string.c_str();
@@ -143,7 +135,6 @@ namespace ABACUS {
 
     stringstream SSF_stringstream;    string SSF_string;
     SSF_stringstream << prefix;
-    //if (iKmin != iKmax) SSF_stringstream << "_iKmin_" << iKmin << "_iKmax_" << iKmax;
     SSF_stringstream << ".ssf";
     SSF_string = SSF_stringstream.str();    const char* SSF_Cstr = SSF_string.c_str();
 
@@ -159,13 +150,10 @@ namespace ABACUS {
 
     // Check sums:
     DP sumdsf = 0.0;
-    //DP sumssf = 0.0;
     for (int iK = 0; iK < iKmax - iKmin + 1; ++iK) {
-      //sumssf += FSR[iK];
       for (int iomega = 0; iomega < Nom; ++iomega)
 	sumdsf += DSFS[iomega][iK];
     }
-    //sumssf /= (iKmax - iKmin + 1);
     sumdsf /= (iKmax - iKmin + 1) * Nom;
 
     return(sumdsf);
diff --git a/src/UTILS/Sort_RAW_File.cc b/src/UTILS/Sort_RAW_File.cc
index 3d4ffe1..67f162a 100644
--- a/src/UTILS/Sort_RAW_File.cc
+++ b/src/UTILS/Sort_RAW_File.cc
@@ -26,106 +26,99 @@ namespace ABACUS {
 
 
   void Sort_RAW_File (const char ff_file[], char optionchar, char whichDSF)
-    {
-      // Sorts FF in decreasing order for 'f' option, or energies in increasing order for option 'e', and writes .dat_srt file
+  {
+    // Sorts FF in decreasing order for 'f' option, or energies in increasing order for option 'e', and writes .dat_srt file
 
-      if (!(optionchar == 'e' || optionchar == 'f')) ABACUSerror("Wrong option in Sort_FF");
+    if (!(optionchar == 'e' || optionchar == 'f')) ABACUSerror("Wrong option in Sort_FF");
 
-      // Check size of threads file:
-      struct stat statbuf;
+    // Check size of threads file:
+    struct stat statbuf;
 
-      stat (ff_file, &statbuf);
-      int filesize = statbuf.st_size;
+    stat (ff_file, &statbuf);
+    int filesize = statbuf.st_size;
 
-      // Determine the number of entries approximately
-      int entry_size = 2* sizeof(float) + 2*sizeof(int);
+    // Determine the number of entries approximately
+    int entry_size = 2* sizeof(float) + 2*sizeof(int);
 
-      //const int MAXDATA = 50000000;
-      const int MAXDATA = filesize/entry_size + 10;
+    //const int MAXDATA = 50000000;
+    const int MAXDATA = filesize/entry_size + 10;
 
-      DP* omega = new DP[MAXDATA];
-      int* iK = new int[MAXDATA];
-      DP* ff = new DP[MAXDATA];
-      DP* ff_im = new DP[MAXDATA];
-      //int* conv = new int[MAXDATA];
-      DP* dev = new DP[MAXDATA];
-      string* label = new string[MAXDATA];
+    DP* omega = new DP[MAXDATA];
+    int* iK = new int[MAXDATA];
+    DP* ff = new DP[MAXDATA];
+    DP* ff_im = new DP[MAXDATA];
+    DP* dev = new DP[MAXDATA];
+    string* label = new string[MAXDATA];
 
-      ifstream infile;
-      infile.open(ff_file);
+    ifstream infile;
+    infile.open(ff_file);
 
-      if (infile.fail()) ABACUSerror("The input file was not opened successfully in Sort_RAW_File. ");
+    if (infile.fail()) ABACUSerror("The input file was not opened successfully in Sort_RAW_File. ");
 
-      stringstream outfilename;
-      string outfilename_string;
-      outfilename << ff_file << "_srt_" << optionchar;
-      outfilename_string = outfilename.str();
-      const char* outfilename_c_str = outfilename_string.c_str();
+    stringstream outfilename;
+    string outfilename_string;
+    outfilename << ff_file << "_srt_" << optionchar;
+    outfilename_string = outfilename.str();
+    const char* outfilename_c_str = outfilename_string.c_str();
 
-      ofstream outfile;
-      outfile.open(outfilename_c_str);
-      outfile.precision(16);
+    ofstream outfile;
+    outfile.open(outfilename_c_str);
+    outfile.precision(16);
 
-      int Ndata = 0;
-      while (((infile.peek()) != EOF) && (Ndata < MAXDATA)) {
+    int Ndata = 0;
+    while (((infile.peek()) != EOF) && (Ndata < MAXDATA)) {
 
-      	infile >> omega[Ndata];
-	infile >> iK[Ndata];
-	if (whichDSF != 'Z') infile >> ff[Ndata];
-	if (whichDSF == 'q') infile >> ff_im[Ndata];  // imaginary part of overlap, quench case
-	//infile >> conv[Ndata];
-	infile >> dev[Ndata];
-	infile >> label[Ndata];
+      infile >> omega[Ndata];
+      infile >> iK[Ndata];
+      if (whichDSF != 'Z') infile >> ff[Ndata];
+      if (whichDSF == 'q') infile >> ff_im[Ndata];  // imaginary part of overlap, quench case
+      infile >> dev[Ndata];
+      infile >> label[Ndata];
 
-	Ndata++;
+      Ndata++;
+    }
+    infile.close();
+
+    int* index = new int[Ndata];
+    DP* ffsq = new DP[Ndata];
+
+    for (int i = 0; i < Ndata; ++i) index[i] = i;
+    for (int i = 0; i < Ndata; ++i) ffsq[i] = ff[i] * ff[i];
+
+    if (optionchar == 'f') QuickSort(ffsq, index, 0, Ndata - 1);
+    else if (optionchar == 'e') QuickSort(omega, index, 0, Ndata - 1);
+
+    for (int i = 0; i < Ndata; i++) {
+
+      if (i > 0) outfile << endl;
+      if (optionchar == 'f') {
+
+	outfile << omega[index[Ndata - 1 - i] ] << "\t" << iK[index[Ndata - 1 - i] ];
+	if (whichDSF != 'Z') outfile << "\t" << ff[index[Ndata - 1 - i] ];
+	if (whichDSF == 'q') outfile << "\t" << ff_im[index[Ndata - 1 - i] ];
+	outfile << "\t" << dev[index[Ndata - 1 - i] ] << "\t" << label[index[Ndata - 1 - i] ];
       }
-      infile.close();
-
-      int* index = new int[Ndata];
-      DP* ffsq = new DP[Ndata];
-
-      for (int i = 0; i < Ndata; ++i) index[i] = i;
-      for (int i = 0; i < Ndata; ++i) ffsq[i] = ff[i] * ff[i];
-
-      if (optionchar == 'f') QuickSort(ffsq, index, 0, Ndata - 1);
-      else if (optionchar == 'e') QuickSort(omega, index, 0, Ndata - 1);
-
-      for (int i = 0; i < Ndata; i++) {
-
-	if (i > 0) outfile << endl;
-	if (optionchar == 'f') {
-
-	  outfile << omega[index[Ndata - 1 - i] ] << "\t" << iK[index[Ndata - 1 - i] ];
-	  if (whichDSF != 'Z') outfile << "\t" << ff[index[Ndata - 1 - i] ];
-	  if (whichDSF == 'q') outfile << "\t" << ff_im[index[Ndata - 1 - i] ];
-	  outfile << "\t" //<< conv[index[Ndata - 1 - i] ] << "\t"
-		  << dev[index[Ndata - 1 - i] ] << "\t"
-		  << label[index[Ndata - 1 - i] ];
-	}
-	else if (optionchar == 'e') {
-	  outfile << omega[i] << "\t" << iK[index[i] ];
-	  if (whichDSF != 'Z') outfile << "\t" << ff[index[i] ];
-	  if (whichDSF == 'q') outfile << "\t" << ff_im[index[i] ];
-	  outfile << "\t" //<< conv[index[i] ] << "\t"
-		  << dev[index[i] ] << "\t"
-		  << label[index[i] ];
-	}
+      else if (optionchar == 'e') {
+	outfile << omega[i] << "\t" << iK[index[i] ];
+	if (whichDSF != 'Z') outfile << "\t" << ff[index[i] ];
+	if (whichDSF == 'q') outfile << "\t" << ff_im[index[i] ];
+	outfile << "\t" << dev[index[i] ] << "\t" << label[index[i] ];
       }
-
-      outfile.close();
-
-      delete[] omega;
-      delete[] iK;
-      delete[] ff;
-      delete[] ff_im;
-      //delete[] conv;
-      delete[] dev;
-      delete[] label;
-
-      delete[] index;
-      delete[] ffsq;
-
-      return;
     }
 
+    outfile.close();
+
+    delete[] omega;
+    delete[] iK;
+    delete[] ff;
+    delete[] ff_im;
+    delete[] dev;
+    delete[] label;
+
+    delete[] index;
+    delete[] ffsq;
+
+    return;
+  }
+
 } // namespace ABACUS
diff --git a/src/UTILS/State_Label.cc b/src/UTILS/State_Label.cc
index d2d0874..f6eaaf7 100644
--- a/src/UTILS/State_Label.cc
+++ b/src/UTILS/State_Label.cc
@@ -50,9 +50,7 @@ namespace ABACUS {
   {
     string::size_type i1 = label.find(LABELSEP);
 
-    //cout << "Extracting Base_Label from label " << label << endl;
     string baselabel = label.substr(0, i1);
-    //cout << "Extracted Base_Label " << baselabel << endl;
 
     return(baselabel);
   }
@@ -76,7 +74,6 @@ namespace ABACUS {
     int remainder = int_to_convert;
     stringstream result_strstrm;
     do {
-      //cout << "remainder = " << remainder << "\tnext index = " << remainder - ABACUScodingsize * (remainder/ABACUScodingsize) << endl;
       result_strstrm << ABACUScoding[remainder - ABACUScodingsize * (remainder/ABACUScodingsize)];
       remainder /= ABACUScodingsize;
     } while (remainder > 0);
@@ -120,22 +117,13 @@ namespace ABACUS {
     string nexclabel = label.substr(i1+1, i2-i1-1);
     string Ix2exclabel = label.substr(i2+1);
 
-    //cout << "label: " << label << endl;
-    //cout << "baselabel: " << baselabel << endl;
-    //cout << "nexclabel: " << nexclabel << endl;
-    //cout << "Ix2exclabel: " << Ix2exclabel << endl;
-
     // Read off the base label: count the number of TYPESEP in baselabel
     int nbar = 0;
     for (unsigned int i = 0; i < baselabel.length(); ++i) if (baselabel[i] == TYPESEP) nbar++;
 
-    //cout << "nbar = " << nbar << endl;
-
     // There are now nbar + 1 base label data:
     int ntypes = nbar + 1;
 
-    //cout << "ntypes = " << ntypes << endl;
-
     Vect<int> type(ntypes); // integer type labels of the types present
     type[0] = 0; // always the case by convention
     Vect<int> M(ntypes); // how many particles of each type
@@ -183,8 +171,6 @@ namespace ABACUS {
 
     } // else if ntypes > 1
 
-    //cout << "ntypes = " << ntypes << "\tM = " << M << endl;
-
     // baselabel is now completely read
 
     // Define some dud nex, Ix2old, Ix2exc:
@@ -213,22 +199,14 @@ namespace ABACUS {
     string nexclabel = label.substr(i1+1, i2-i1-1);
     string Ix2exclabel = label.substr(i2+1);
 
-    //cout << "label: " << label << endl;
-    //cout << "baselabel: " << baselabel << endl;
-    //cout << "nexclabel: " << nexclabel << endl;
-    //cout << "Ix2exclabel: " << Ix2exclabel << endl;
 
     // Read off the base label: count the number of TYPESEP in baselabel
     int nbar = 0;
     for (unsigned int i = 0; i < baselabel.length(); ++i) if (baselabel[i] == TYPESEP) nbar++;
 
-    //cout << "nbar = " << nbar << endl;
-
     // There are now nbar + 1 base label data:
     int ntypes = nbar + 1;
 
-    //cout << "ntypes = " << ntypes << endl;
-
     Vect<int> type(ntypes); // integer type labels of the types present
     type[0] = 0; // always the case by convention
     Vect<int> M(ntypes); // how many particles of each type
@@ -317,10 +295,6 @@ namespace ABACUS {
 
     // nexc is now completely read
 
-    //cout << "typefound: " << type << endl;
-    //cout << "Mfound: " << M << endl;
-    //cout << "nexcfound: " << nexc << endl;
-
 
     // Now read off the (compressed) jexc and Ix2exc vectors of vectors:
     Vect<Vect<int> > Ix2old(ntypes); // which Ix2 will be excited
@@ -340,22 +314,17 @@ namespace ABACUS {
       }
       for (int iexc = 0; iexc < nexc[itype]; ++iexc) {
 	//string::size_type i1 = Ix2exclabelremaining.find(INEXCSEP);
-	string::size_type i2 = (iexc < nexc[itype] - 1 ? Ix2exclabelremaining.find(EXCSEP) : Ix2exclabelremaining.find(TYPESEP)); // careful here!
-	//cout << "\t" << itype << "\t" << iexc << "\t" << Ix2exclabelremaining << "\t" << i1 << "\t" << i2 << endl;
-	//string Ix2oldread = Ix2exclabelremaining.substr(0,i1);
-	//string Ix2excread = Ix2exclabelremaining.substr(i1+1,i2-i1-1);
+	string::size_type i2 = (iexc < nexc[itype] - 1 ? Ix2exclabelremaining.find(EXCSEP)
+				: Ix2exclabelremaining.find(TYPESEP)); // careful here!
 	string Ix2excIDread = Ix2exclabelremaining.substr(0,i2);
 	int Ix2excID = Convert_STR_to_POSINT(Ix2excIDread);
-	Ix2old[itype][iexc] = OriginIx2[type[itype] ][Ix2excID - M[itype] * (Ix2excID/M[itype])]; // index is remainder w/r to nr of strings of this type
+	Ix2old[itype][iexc] = OriginIx2[type[itype] ][Ix2excID - M[itype] * (Ix2excID/M[itype])];
+	// index is remainder w/r to nr of strings of this type
 	// Convention: if remainder is even, moving left. If odd, moving right.
 	// 0 means move one unit left, 1 means move one unit right, etc.
-	//Ix2exc[itype][iexc] = Ix2old[itype][iexc] + (Ix2excID/M[itype] % 2 ? 2 : -2) * (Ix2excID/(2 * M[itype]) + 0);
-	Ix2exc[itype][iexc] = Ix2old[itype][iexc] + (Ix2excID/M[itype] % 2 ? 2 : -2) * (Ix2excID/(2 * M[itype]) + 1); // ABACUS++T_8 onwards
+	Ix2exc[itype][iexc] = Ix2old[itype][iexc] + (Ix2excID/M[itype] % 2 ? 2 : -2)
+	  * (Ix2excID/(2 * M[itype]) + 1); // ABACUS++T_8 onwards
 
-	//istringstream Ix2oldreadbuffer (Ix2oldread);
-	//Ix2oldreadbuffer >> Ix2old[itype][iexc];
-	//istringstream Ix2excreadbuffer (Ix2excread);
-	//Ix2excreadbuffer >> Ix2exc[itype][iexc];
 	// Remove everything up to index i2 in Ix2exclabelremaining
 	Ix2exclabelremaining = Ix2exclabelremaining.substr(i2+1);
       }
@@ -363,47 +332,27 @@ namespace ABACUS {
 
     // Now read off the Ix2old, Ix2exc of the last type: this is always done
     for (int iexc = 0; iexc < nexc[ntypes - 1] - 1; ++iexc) {
-      //string::size_type i1 = Ix2exclabelremaining.find(INEXCSEP);
       string::size_type i2 = Ix2exclabelremaining.find(EXCSEP);
-      //string Ix2oldread = Ix2exclabelremaining.substr(0,i1);
-      //string Ix2excread = Ix2exclabelremaining.substr(i1+1,i2-i1-1);
       string Ix2excIDread = Ix2exclabelremaining.substr(0,i2);
       int Ix2excID = Convert_STR_to_POSINT(Ix2excIDread);
-      Ix2old[ntypes - 1][iexc] = OriginIx2[type[ntypes - 1] ][Ix2excID - M[ntypes - 1] * (Ix2excID/M[ntypes - 1])]; // index is remainder w/r to nr of strings of this type
+      Ix2old[ntypes - 1][iexc] = OriginIx2[type[ntypes - 1] ][Ix2excID - M[ntypes - 1] * (Ix2excID/M[ntypes - 1])];
+      // index is remainder w/r to nr of strings of this type
       // Convention: if remainder is even, moving left. If odd, moving right.
-      //Ix2exc[ntypes - 1][iexc] = Ix2old[ntypes - 1][iexc] + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2) * (Ix2excID/(2 * M[ntypes - 1]) + 0);
-      Ix2exc[ntypes - 1][iexc] = Ix2old[ntypes - 1][iexc] + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2) * (Ix2excID/(2 * M[ntypes - 1]) + 1); // ABACUS++T_8 onwards
+      Ix2exc[ntypes - 1][iexc] = Ix2old[ntypes - 1][iexc] + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2)
+	* (Ix2excID/(2 * M[ntypes - 1]) + 1); // ABACUS++T_8 onwards
 
-      //istringstream Ix2oldreadbuffer (Ix2oldread);
-      //Ix2oldreadbuffer >> Ix2old[ntypes - 1][iexc];
-      //istringstream Ix2excreadbuffer (Ix2excread);
-      //Ix2excreadbuffer >> Ix2exc[ntypes - 1][iexc];
       // Remove everything up to index i2 in Ix2exclabelremaining
       Ix2exclabelremaining = Ix2exclabelremaining.substr(i2+1);
     }
 
     // Now read off the last pair:
     int Ix2excID = Convert_STR_to_POSINT(Ix2exclabelremaining);
-    Ix2old[ntypes - 1][ABACUS::max(nexc[ntypes - 1] - 1,0)] = OriginIx2[type[ntypes - 1] ][Ix2excID - M[ntypes - 1] * (Ix2excID/M[ntypes - 1])]; // index is remainder w/r to nr of strings of this type
+    Ix2old[ntypes - 1][ABACUS::max(nexc[ntypes - 1] - 1,0)] =
+      OriginIx2[type[ntypes - 1] ][Ix2excID - M[ntypes - 1] * (Ix2excID/M[ntypes - 1])];
+    // index is remainder w/r to nr of strings of this type
     // Convention: if remainder is even, moving left. If odd, moving right.
-    //Ix2exc[ntypes - 1][ABACUS::max(nexc[ntypes - 1] - 1,0)] = Ix2old[ntypes - 1][nexc[ntypes - 1] - 1] + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2) * (Ix2excID/(2 * M[ntypes - 1]) + 0);
-    Ix2exc[ntypes - 1][ABACUS::max(nexc[ntypes - 1] - 1,0)] = Ix2old[ntypes - 1][nexc[ntypes - 1] - 1] + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2) * (Ix2excID/(2 * M[ntypes - 1]) + 1); // ABACUS++T_8 onwards
-
-    //string::size_type ilast = Ix2exclabelremaining.find(INEXCSEP);
-    //string Ix2oldread = Ix2exclabelremaining.substr(0,ilast);
-    //string Ix2excread = Ix2exclabelremaining.substr(ilast+1);
-    //istringstream Ix2oldreadbuffer (Ix2oldread);
-    //Ix2oldreadbuffer >> Ix2old[ntypes - 1][nexc[ntypes - 1] - 1];
-    //istringstream Ix2excreadbuffer (Ix2excread);
-    //Ix2excreadbuffer >> Ix2exc[ntypes - 1][nexc[ntypes - 1] - 1];
-
-    //cout << "nexc = " << endl;
-    //cout << nexc << endl;
-    //cout << "Ix2old and Ix2exc: " << endl;
-    //for (int itype = 0; itype < ntypes; ++itype) {
-    //cout << "Ix2old["<< itype << "]: " << Ix2old[itype] << endl;
-    //cout << "Ix2exc["<< itype << "]: " << Ix2exc[itype] << endl;
-    //}
+    Ix2exc[ntypes - 1][ABACUS::max(nexc[ntypes - 1] - 1,0)] = Ix2old[ntypes - 1][nexc[ntypes - 1] - 1]
+      + (Ix2excID/M[ntypes - 1] % 2 ? 2 : -2) * (Ix2excID/(2 * M[ntypes - 1]) + 1); // ABACUS++T_8 onwards
 
     State_Label_Data labeldata (type, M, nexc, Ix2old, Ix2exc);
 
@@ -421,43 +370,28 @@ namespace ABACUS {
   {
     // This function produces a compressed label.
 
-    //cout << "Label A" << endl;
-
-    //cout << "\t" << data.M.size() << endl;
-    //cout << "\t" << data.M[0] << endl;
-
     string label;
 
     // Write the base:
     // First, particles of type 0:
-    //cout << "Label A2" << endl;
     stringstream M0out;
-    //cout << "Label A3" << endl;
-    //cout << "\t" << data.M[0] << endl;
     M0out << data.M[0];
-    //cout << "Label A4" << endl;
     label += M0out.str();
-    //cout << "Label A5" << endl;
 
     for (int itype = 1; itype < data.M.size(); ++itype) {
-      //cout << "\ta" << itype << "\t" << data.M.size() << endl;
       if (data.M[itype] > 0) {
 	label += TYPESEP;
 	stringstream typeout;
 	typeout << data.type[itype];
 	label += typeout.str();
 	label += EXCSEP;
-	//cout << "\tc" << endl;
 	stringstream Mout;
 	Mout << data.M[itype];
 	label += Mout.str();
-	//cout << "\td" << endl;
       }
     }
     label += LABELSEP;
 
-    //cout << "Label B" << endl;
-
     // Now the nexc:
     stringstream nexc0out;
     nexc0out << data.nexc[0];
@@ -470,8 +404,6 @@ namespace ABACUS {
     }
     label += LABELSEP;
 
-    //cout << "Label C" << endl;
-
     // Now the displacements:
     // The conventions are as follows.
     // For each excitation, an integer number ID is given according to the following rules:
@@ -489,15 +421,15 @@ namespace ABACUS {
 	int holeindex = -1;
 	do {
 	  holeindex++;
-	} while (OriginIx2[data.type[itype] ][holeindex] != data.Ix2old[itype][iexc] && holeindex < OriginIx2[data.type[itype] ].size() - 1);
-	if (holeindex == OriginIx2[data.type[itype] ].size()) ABACUSerror("Going out of bounds in Compress_Label.");
+	} while (OriginIx2[data.type[itype] ][holeindex] != data.Ix2old[itype][iexc]
+		 && holeindex < OriginIx2[data.type[itype] ].size() - 1);
+	if (holeindex == OriginIx2[data.type[itype] ].size())
+	  ABACUSerror("Going out of bounds in Compress_Label.");
 	excID = excID * data.M[itype] + holeindex;
 	label += Convert_POSINT_to_STR(excID);
       } // for iexc
     } // for itype
 
-    //cout << "Label D" << endl;
-
     return(label);
   }
 
@@ -512,13 +444,17 @@ namespace ABACUS {
   {
     // This function does not assume any ordering of the Ix2.
 
-    if (ScanIx2.size() != OriginIx2.size()) ABACUSerror("ScanIx2.size() != OriginIx2.size() in Find_Label.");
-    for (int i = 0; i < ScanIx2.size(); ++i) if (ScanIx2[i].size() != OriginIx2[i].size()) ABACUSerror("ScanIx2[i].size() != OriginIx2[i].size() in Find_Label.");
+    if (ScanIx2.size() != OriginIx2.size())
+      ABACUSerror("ScanIx2.size() != OriginIx2.size() in Find_Label.");
+    for (int i = 0; i < ScanIx2.size(); ++i)
+      if (ScanIx2[i].size() != OriginIx2[i].size())
+	ABACUSerror("ScanIx2[i].size() != OriginIx2[i].size() in Find_Label.");
 
     // Set the state ulabel:
     // Count the number of types present:
     int ntypespresent = 0;
-    for (int is = 0; is < ScanIx2.size(); ++is) if (is == 0 || ScanIx2[is].size() > 0) ntypespresent++; // type 0 is by default always present
+    for (int is = 0; is < ScanIx2.size(); ++is)
+      if (is == 0 || ScanIx2[is].size() > 0) ntypespresent++; // type 0 is by default always present
 
     Vect<int> type_ref(ntypespresent);
     Vect<int> M_ref(ntypespresent);
@@ -541,10 +477,15 @@ namespace ABACUS {
     for (int it = 0; it < ntypespresent; ++it) Ix2exc_ref[it] = Vect<int>(ABACUS::max(nexc_ref[it],1));
     for (int it = 0; it < ntypespresent; ++it) {
       int nexccheck = 0;
-      for (int i = 0; i < M_ref[it]; ++i) if (!OriginIx2[type_ref[it] ].includes(ScanIx2[type_ref[it] ][i])) Ix2exc_ref[it][nexccheck++] = ScanIx2[type_ref[it] ][i];
-      if (nexccheck != nexc_ref[it]) ABACUSerror("Counting excitations wrong (1) in Return_State_Label");
+      for (int i = 0; i < M_ref[it]; ++i)
+	if (!OriginIx2[type_ref[it] ].includes(ScanIx2[type_ref[it] ][i]))
+	  Ix2exc_ref[it][nexccheck++] = ScanIx2[type_ref[it] ][i];
+      if (nexccheck != nexc_ref[it])
+	ABACUSerror("Counting excitations wrong (1) in Return_State_Label");
       nexccheck = 0;
-      for (int i = 0; i < M_ref[it]; ++i) if (!ScanIx2[type_ref[it] ].includes(OriginIx2[type_ref[it] ][i])) Ix2old_ref[it][nexccheck++] = OriginIx2[type_ref[it] ][i];
+      for (int i = 0; i < M_ref[it]; ++i)
+	if (!ScanIx2[type_ref[it] ].includes(OriginIx2[type_ref[it] ][i]))
+	  Ix2old_ref[it][nexccheck++] = OriginIx2[type_ref[it] ][i];
       if (nexccheck != nexc_ref[it]) {
 	cout << OriginIx2 << endl;
 	cout << ScanIx2 << endl;
@@ -572,19 +513,16 @@ namespace ABACUS {
   }
 
 
-  //bool Set_to_Label (string label_ref, Vect<Vect<int> >& Ix2, const Vect<Vect<int> >& OriginIx2)
+
   Vect<Vect<int> > Return_Ix2_from_Label (string label_ref, const Vect<Vect<int> >& OriginIx2)
   {
     // ASSUMPTIONS:
     // OriginIx2 is ordered.
 
-    //Ix2 = OriginIx2; // this will fail if the sizes are incompatible
     Vect<Vect<int> > Ix2 = OriginIx2; // this will fail if the sizes are incompatible
 
     State_Label_Data labeldata = Read_State_Label (label_ref, OriginIx2);
 
-    //cout << "Read label OK" << endl;
-
     // Now set the excitations:
     for (int it = 0; it < labeldata.type.size(); ++it)
       for (int iexc = 0; iexc < labeldata.nexc[it]; ++iexc)
@@ -596,12 +534,9 @@ namespace ABACUS {
     // Now reorder the Ix2 to follow convention:
     for (int il = 0; il < Ix2.size(); ++il) Ix2[il].QuickSort();
 
-    //cout << "Ix2 found = " << Ix2 << endl;
-
     return(Ix2);
-    //return(true);
   }
-  // Specialization to Lieb-Liniger:
+
   Vect<int> Return_Ix2_from_Label (string label_ref, const Vect<int>& OriginIx2)
   {
     Vect<Vect<int> > OriginIx2here(1);
@@ -611,47 +546,4 @@ namespace ABACUS {
   }
 
 
-  /* DEPRECATED ++G_5
-  // Conversion from one to the other:
-  string Compress_Ulabel (string ulabel_ref, const Vect<Vect<int> >& OriginIx2)
-  {
-    // From a normal label, return a compressed one.
-    State_Label_Data data = Read_State_Ulabel (ulabel_ref);
-
-    return(Return_State_Label(data, OriginIx2));
-  }
-
-  string Compress_Ulabel (string ulabel_ref, const Vect<int>& OriginIx2)
-  // if there is only one type
-  {
-    // From a normal label, return a compressed one.
-    Vect<Vect<int> > OriginIx2here(1);
-    OriginIx2here[0] = OriginIx2;
-
-    State_Label_Data data = Read_State_Ulabel (ulabel_ref);
-
-    return(Return_State_Label(data, OriginIx2here));
-  }
-
-  string Uncompress_Label (string label_ref, const Vect<Vect<int> >& OriginIx2)
-  {
-    // From a compressed label, return a normal one.
-    State_Label_Data data = Read_State_Label (label_ref, OriginIx2);
-
-    return(Return_State_Ulabel(data));
-  }
-
-  string Uncompress_Label (string label_ref, const Vect<int>& OriginIx2)
-  // if there is only one type
-  {
-    // From a compressed label, return a normal one.
-    Vect<Vect<int> > OriginIx2here(1);
-    OriginIx2here[0] = OriginIx2;
-
-    State_Label_Data data = Read_State_Label (label_ref, OriginIx2here);
-
-    return(Return_State_Ulabel(data));
-  }
-  */
-
 } // namespace ABACUS
diff --git a/src/XXX_VOA/SF_4p_client.cc b/src/XXX_VOA/SF_4p_client.cc
index 7c83a6f..657be29 100755
--- a/src/XXX_VOA/SF_4p_client.cc
+++ b/src/XXX_VOA/SF_4p_client.cc
@@ -41,9 +41,6 @@ namespace ABACUS {
     MPI_Comm_rank (comm, &rank);
 
     MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-    //cout << "Client of rank " << rank << ":  received request ";
-    //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-    //cout << "with tag " << status.MPI_TAG << endl;
 
 
     while (status.MPI_TAG) {
@@ -59,112 +56,12 @@ namespace ABACUS {
       client_result[7] = SF_4p_opt (client_request[2], client_request[3], client_request[4],
 				    int(client_request[5]), int(client_request[6]), Itable);
       MPI_Send (client_result, client_result_size, MPI_DOUBLE, server, status.MPI_TAG, comm);
-      //cout << "Client of rank " << rank << " sending complete" << endl;
 
       // Wait for subsequent request from server
       MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-      //cout << "Client of rank " << rank << ":  received request ";
-      //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-      //cout << "with tag " << status.MPI_TAG << endl;
-
-    }
-
-    return;
-  }
-  /*
-  void SF_4p_kwKW_alpha_client (MPI_Comm comm, DP req_prec, int max_rec, I_table Itable)
-  {
-    int server = 0;
-    int rank;
-
-    MPI_Status status;
-
-    int client_request_size = 2;
-    DP client_request[client_request_size]; // this is k, alpha
-    int client_result_size = 3;
-    DP client_result[client_result_size]; // this is k, alpha, SF_4p_kwKW_alpha
-
-    MPI_Comm_rank (comm, &rank);
-
-    MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-    //cout << "Client of rank " << rank << ":  received request ";
-    //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-    //cout << "with tag " << status.MPI_TAG << endl;
-
-    Vect_DP args_to_SF_4p_kwKW_alpha(4);
-    args_to_SF_4p_kwKW_alpha[2] = req_prec;
-    args_to_SF_4p_kwKW_alpha[3] = DP(max_rec);
-
-    while (status.MPI_TAG) {
-
-      args_to_SF_4p_kwKW_alpha[0] = client_request[0];
-      args_to_SF_4p_kwKW_alpha[1] = client_request[1];
-
-      // Result:  SF_4p (k, omega)
-      client_result[0] = client_request[0];
-      client_result[1] = client_request[1];
-      client_result[2] = SF_4p_kwKW_alpha (args_to_SF_4p_kwKW_alpha, Itable);
-
-      MPI_Send (client_result, client_result_size, MPI_DOUBLE, server, status.MPI_TAG, comm);
-      //cout << "Client of rank " << rank << " sending complete" << endl;
-
-      // Wait for subsequent request from server
-      MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-      //cout << "Client of rank " << rank << ":  received request ";
-      //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-      //cout << "with tag " << status.MPI_TAG << endl;
-
     }
 
     return;
   }
 
-  void SF_4p_kwKW_alpha_opt_client (MPI_Comm comm, DP req_prec, int Npts_K, int Npts_W, I_table Itable)
-  {
-    int server = 0;
-    int rank;
-
-    MPI_Status status;
-
-    int client_request_size = 2;
-    DP client_request[client_request_size]; // this is k, alpha
-    int client_result_size = 3;
-    DP client_result[client_result_size]; // this is k, alpha, SF_4p_kwKW_alpha_opt
-
-    MPI_Comm_rank (comm, &rank);
-
-    MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-    //cout << "Client of rank " << rank << ":  received request ";
-    //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-    //cout << "with tag " << status.MPI_TAG << endl;
-
-    Vect_DP args_to_SF_4p_kwKW_alpha_opt(5);
-    args_to_SF_4p_kwKW_alpha_opt[2] = req_prec;
-    args_to_SF_4p_kwKW_alpha_opt[3] = DP(Npts_K);
-    args_to_SF_4p_kwKW_alpha_opt[4] = DP(Npts_W);
-
-    while (status.MPI_TAG) {
-
-      args_to_SF_4p_kwKW_alpha_opt[0] = client_request[0];
-      args_to_SF_4p_kwKW_alpha_opt[1] = client_request[1];
-
-      // Result:  SF_4p (k, omega)
-      client_result[0] = client_request[0];
-      client_result[1] = client_request[1];
-      client_result[2] = SF_4p_kwKW_alpha_opt (args_to_SF_4p_kwKW_alpha_opt, Itable);
-
-      MPI_Send (client_result, client_result_size, MPI_DOUBLE, server, status.MPI_TAG, comm);
-      //cout << "Client of rank " << rank << " sending complete" << endl;
-
-      // Wait for subsequent request from server
-      MPI_Recv (client_request, client_request_size, MPI_DOUBLE, server, MPI_ANY_TAG, comm, &status);
-      //cout << "Client of rank " << rank << ":  received request ";
-      //for (int i = 0; i < client_request_size; ++i) cout << client_request[i] << " ";
-      //cout << "with tag " << status.MPI_TAG << endl;
-
-    }
-
-    return;
-  }
-  */
 } // namespace ABACUS
diff --git a/src/XXX_VOA/SF_4p_server.cc b/src/XXX_VOA/SF_4p_server.cc
index ce21480..7d61bb1 100755
--- a/src/XXX_VOA/SF_4p_server.cc
+++ b/src/XXX_VOA/SF_4p_server.cc
@@ -57,7 +57,6 @@ namespace ABACUS {
 	for (int ik = 0; ik < dim_k; ++ik) {
 	  SF_outfile >> SF_4p[dim_k * iomega + ik];
 	  if (SF_4p[dim_k * iomega + ik] == 0.0) total_nr_req++;
-	  //cout << ik << "\t" << iomega << "\t" << SF_4p[dim_k * iomega + ik] << "\t" << (SF_4p[dim_k * iomega + ik] == 0.0) << "\t" << total_nr_req << endl;
 
 	  // We only load the LHS of the BZ, so we load N/2 empty values...
 	}
@@ -87,11 +86,6 @@ namespace ABACUS {
       cout << total_nr_req << "\t" << index << endl;
       ABACUSerror("Not counting total_nr_req correctly in SF_4p_opt_server");
     }
-    //ofstream SFsrc_outfile;
-    //SFsrc_outfile.open(SFsrc_Cstr);
-
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
 
     int nr_machines;
     MPI_Comm_size (comm, &nr_machines);
@@ -129,22 +123,16 @@ namespace ABACUS {
       client_request[6] = DP(Npts_W);
 
       MPI_Send(client_request, client_request_size, MPI_DOUBLE, i, scanning, comm);
-      //cout << "Server:  sent request ";
-      //for (int ii = 0; ii < client_request_size; ++ii) cout << client_request[ii] << " ";
-      //cout << "with tag " << scanning << " to client " << i << endl;
       nr_sent_out++;
-      //cout << "nr_sent_out = " << nr_sent_out << endl;
     }
 
     DP Actual_Time_MPI = MPI_Wtime();
 
-    //while (nr_returned < total_nr_req) {
     while (nr_returned < nr_sent_out) {
 
-      //cout << "Server:  waiting for answers... " << endl;
       MPI_Recv (client_result, client_result_size, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, comm, &status);
       nr_returned++;
-      //cout << "Server:  received answer from client " << status.MPI_SOURCE << endl;
+
       // unbuffer result
       SF_4p[dim_k * int(client_result[1]) + int(client_result[0])] = client_result[7];
 
@@ -158,15 +146,10 @@ namespace ABACUS {
 	client_request[3] = (needed_iomega[nr_sent_out] + 0.5) * omegamax/Nomega;
 	MPI_Send (client_request, client_request_size, MPI_DOUBLE, status.MPI_SOURCE, scanning, comm);
 	nr_sent_out++;
-	//cout << "nr_sent_out = " << nr_sent_out << endl;
       }
 
     } // while (nr_returned < total_nr_req)
 
-    cout << endl << "Computed " << nr_returned << " points ouf of " << total_nr_req << " required. " << endl;
-
-    cout << endl << "Server saving file..." << endl;
-
     // Output all data:  double up to full [0, 2pi] interval in k with symmetry
     SF_outfile.seekp(0);
     for (int iomega = 0; iomega < Nomega; ++iomega) {
@@ -187,384 +170,5 @@ namespace ABACUS {
     return;
   }
 
-  /****************************************************
-
-  // TO COMPLETE FOR 5.5 (commented out in 5.7)
-
-  *********************************************************/
-  /*
-  void SF_4p_opt_server (MPI_Comm comm, DP k_needed, DP omegamax, int Nomega, DP req_prec, int Npts_K, int Npts_W, I_table Itable,
-			 int Max_Secs, bool refine)
-  {
-    double Start_Time_MPI = MPI_Wtime();
-
-    Heis_Write_w_File (Nomega, omegamax);
-
-    stringstream SF_stringstream;
-    string SF_string;
-    SF_stringstream << "SF_4p_k_" << k_needed << "_No_" << Nomega << "_omax_" << omegamax
-		    << "_prec_" << req_prec << "_Npts_K_" << Npts_K << "_Npts_W_" << Npts_W << ".dat";
-    SF_string = SF_stringstream.str();
-    const char* SF_Cstr = SF_string.c_str();
-
-    fstream SF_outfile;
-    if (!refine) SF_outfile.open(SF_Cstr, fstream::out | fstream::trunc);
-    else SF_outfile.open(SF_Cstr, fstream::in | fstream::out);
-    if (SF_outfile.fail()) ABACUSerror("Could not open SF_outfile... ");
-
-    SF_outfile.precision(12);
-
-    int dim_k = 1;
-
-    DP* SF_4p = new DP[dim_k * Nomega];
-
-    // Initialize to zero to be sure:
-    for (int i = 0; i < Nomega * dim_k; ++i) SF_4p[i] = 0.0;
-
-    // If refining, load SF from existing file
-
-    int total_nr_req = 0;
-    DP buff;
-    if (refine) {
-      for (int iomega = 0; iomega < Nomega; ++iomega) {
-	for (int ik = 0; ik < dim_k; ++ik) {
-	  SF_outfile >> SF_4p[dim_k * iomega + ik];
-	  if (SF_4p[dim_k * iomega + ik] == 0.0) total_nr_req++;
-	  //cout << ik << "\t" << iomega << "\t" << SF_4p[dim_k * iomega + ik] << "\t" << (SF_4p[dim_k * iomega + ik] == 0.0) << "\t" << total_nr_req << endl;
-
-	  // We only load the LHS of the BZ, so we load N/2 empty values...
-	}
-	for (int ibuff = 0; ibuff < N/2; ++ibuff) SF_outfile >> buff;
-      }
-    }
-    else if (!refine) total_nr_req = dim_k * Nomega;
-
-    // List iomega and ik which need to be computed:
-
-    int* needed_ik = new int[total_nr_req];
-    int* needed_iomega = new int[total_nr_req];
-
-    int index = 0;
-    for (int iomega = 0; iomega < Nomega; ++iomega)
-      for (int ik = 0; ik < dim_k; ++ik) {
-	if (SF_4p[dim_k * iomega + ik] == 0) {
-	  needed_ik[index] = ik;
-	  needed_iomega[index] = iomega;
-	  index++;
-	}
-      }
-
-    cout << total_nr_req << " points required." << endl;
-
-    if (index != total_nr_req) {
-      cout << total_nr_req << "\t" << index << endl;
-      ABACUSerror("Not counting total_nr_req correctly in SF_4p_opt_server");
-    }
-    //ofstream SFsrc_outfile;
-    //SFsrc_outfile.open(SFsrc_Cstr);
-
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
-
-    int nr_machines;
-    MPI_Comm_size (comm, &nr_machines);
-    int nr_clients = nr_machines - 1;  // one for the server
-
-    int client_request_size = 7;
-    DP client_request[client_request_size];
-    // this is:
-    // ik
-    // iomega
-    // k
-    // omega
-    // req_prec
-    // Npts_K
-    // Npts_W
-    int client_result_size = 8;
-    DP client_result[client_result_size]; // same, plus SF_4p
-
-    MPI_Status status;
-
-    int scanning = 1;
-
-    int nr_sent_out = 0;
-    int nr_returned = 0;
-
-    for (int i = 1; i <= nr_clients && i <= total_nr_req; ++i) {
-
-      // Send request to client i, in the form of the req_id_array vector
-      client_request[0] = DP(needed_ik[nr_sent_out]);
-      client_request[1] = DP(needed_iomega[nr_sent_out]);
-      client_request[2] = k_needed;//(twoPI * needed_ik[nr_sent_out])/N;
-      client_request[3] = (needed_iomega[nr_sent_out] + 0.5) * omegamax/Nomega;
-      client_request[4] = req_prec;
-      client_request[5] = DP(Npts_K);
-      client_request[6] = DP(Npts_W);
-
-      MPI_Send(client_request, client_request_size, MPI_DOUBLE, i, scanning, comm);
-      //cout << "Server:  sent request ";
-      //for (int ii = 0; ii < client_request_size; ++ii) cout << client_request[ii] << " ";
-      //cout << "with tag " << scanning << " to client " << i << endl;
-      nr_sent_out++;
-      //cout << "nr_sent_out = " << nr_sent_out << endl;
-    }
-
-    DP Actual_Time_MPI = MPI_Wtime();
-
-    //while (nr_returned < total_nr_req) {
-    while (nr_returned < nr_sent_out) {
-
-      //cout << "Server:  waiting for answers... " << endl;
-      MPI_Recv (client_result, client_result_size, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, comm, &status);
-      nr_returned++;
-      //cout << "Server:  received answer from client " << status.MPI_SOURCE << endl;
-      // unbuffer result
-      SF_4p[int(client_result[1])] = client_result[7];
-
-      Actual_Time_MPI = MPI_Wtime();
-
-      // Send out new request if needed and time available
-      if (nr_sent_out < total_nr_req && Actual_Time_MPI - Start_Time_MPI < Max_Secs) {
-	client_request[0] = needed_ik[nr_sent_out];
-	client_request[1] = needed_iomega[nr_sent_out];
-	client_request[2] = k_needed;//(twoPI * needed_ik[nr_sent_out])/N;
-	client_request[3] = (needed_iomega[nr_sent_out] + 0.5) * omegamax/Nomega;
-	MPI_Send (client_request, client_request_size, MPI_DOUBLE, status.MPI_SOURCE, scanning, comm);
-	nr_sent_out++;
-	//cout << "nr_sent_out = " << nr_sent_out << endl;
-      }
-
-    } // while (nr_returned < total_nr_req)
-
-    cout << endl << "Computed " << nr_returned << " points ouf of " << total_nr_req << " required. " << endl;
-
-    cout << endl << "Server saving file..." << endl;
-
-    // Output all data:
-    SF_outfile.seekp(0);
-    for (int iomega = 0; iomega < Nomega; ++iomega) {
-      SF_outfile << omega[iomega] << "\t" << SF_4p[iomega] << endl;
-    }
-
-    SF_outfile.close();
-
-    cout << endl << "Done !" << endl;
-
-    // Send term signal to clients
-    int scanning_completed = 0;
-    for (int i = 1; i <= nr_clients; ++i)
-      MPI_Send (client_request, client_request_size, MPI_DOUBLE, i, scanning_completed, comm);
-
-    return;
-  }
-  */
-  /*
-  // Function producing a fixed k scan, with data file
-  void SF_4p_rec_par (MPI_Comm comm, DP k, DP req_prec, int max_rec_w, int max_rec, I_table Itable)
-  {
-    int Npts_w = int(pow(3.0, max_rec_w + 2));
-
-    stringstream SFraw_stringstream;
-    string SFraw_string;
-    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".raw";
-    SFraw_string = SFraw_stringstream.str();
-    const char* SFraw_Cstr = SFraw_string.c_str();
-
-    stringstream SF_stringstream;
-    string SF_string;
-    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".dat";
-    SF_string = SF_stringstream.str();
-    const char* SF_Cstr = SF_string.c_str();
-
-    stringstream SFsrc_stringstream;
-    string SFsrc_string;
-    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".src";
-    SFsrc_string = SFsrc_stringstream.str();
-    const char* SFsrc_Cstr = SFsrc_string.c_str();
-
-    ofstream SFraw_outfile;
-    SFraw_outfile.open(SFraw_Cstr);
-    //ofstream SFsrc_outfile;
-    //SFsrc_outfile.open(SFsrc_Cstr);
-
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
-
-    int nr_machines;
-    MPI_Comm_size (comm, &nr_machines);
-    int nr_clients = nr_machines - 1;  // one for the server
-
-    int client_request_size = 2;
-    DP client_request[client_request_size]; // this is k, alpha
-    int client_result_size = 3;
-    DP client_result[client_result_size]; // this is k, alpha, SF_4p_kwKW_alpha
-
-    MPI_Status status;
-
-    int scanning = 1;
-
-    int total_nr_req = Npts_w;
-    int nr_sent_out = 0;
-    int nr_returned = 0;
-
-    Vect_DP alpha_req(Npts_w);
-    for (int iw = 0; iw < Npts_w; ++iw) alpha_req[iw] = 0.5 * PI * (iw + 0.5)/Npts_w;
-
-    //cout << "alpha_req = " << alpha_req << endl;
-
-    for (int i = 1; i <= nr_clients && i <= total_nr_req; ++i) {
-
-      // Send request to client i, in the form of the req_id_array vector
-      client_request[0] = k;
-      client_request[1] = alpha_req[nr_sent_out];
-
-      MPI_Send(client_request, client_request_size, MPI_DOUBLE, i, scanning, comm);
-      //cout << "Server:  sent request ";
-      //for (int ii = 0; ii < client_request_size; ++ii) cout << client_request[ii] << " ";
-      //cout << "with tag " << scanning << " to client " << i << endl;
-      nr_sent_out++;
-      //cout << "nr_sent_out = " << nr_sent_out << endl;
-    }
-
-    while (nr_returned < total_nr_req) {
-
-      //cout << "Server:  waiting for answers... " << endl;
-      MPI_Recv (client_result, client_result_size, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, comm, &status);
-      nr_returned++;
-      //cout << "Server:  received answer from client " << status.MPI_SOURCE << endl;
-
-      // unbuffer result
-      SFraw_outfile << client_result[1] << "\t" << client_result[2] << endl;
-
-      // Send out new request if needed
-      if (nr_sent_out < total_nr_req) {
-	client_request[1] = alpha_req[nr_sent_out];
-	MPI_Send (client_request, client_request_size, MPI_DOUBLE, status.MPI_SOURCE, scanning, comm);
-	nr_sent_out++;
-	//cout << "nr_sent_out = " << nr_sent_out << endl;
-      }
-
-    } // while (nr_returned < total_nr_req)
-
-    // Send term signal to clients
-    int scanning_completed = 0;
-    for (int i = 1; i <= nr_clients; ++i)
-      MPI_Send (client_request, client_request_size, MPI_DOUBLE, i, scanning_completed, comm);
-
-    SFraw_outfile.close();
-
-    //SFsrc_outfile << answer << endl;
-    //SFsrc_outfile.close();
-
-    // Translate raw data into SF_4p (k,omega) data
-
-    //Translate_raw_4p_data (k, max_rec_w, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
-    Translate_raw_4p_data (k, int(pow(3.0, max_rec_w + 2)), SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
-
-    return;
-  }
-
-  void SF_4p_opt_par (MPI_Comm comm, DP k, DP req_prec, int Npts_w, int Npts_K, int Npts_W, I_table Itable)
-  {
-    stringstream SFraw_stringstream;
-    string SFraw_string;
-    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".raw";
-    SFraw_string = SFraw_stringstream.str();
-    const char* SFraw_Cstr = SFraw_string.c_str();
-
-    stringstream SF_stringstream;
-    string SF_string;
-    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".dat";
-    SF_string = SF_stringstream.str();
-    const char* SF_Cstr = SF_string.c_str();
-
-    stringstream SFsrc_stringstream;
-    string SFsrc_string;
-    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".src";
-    SFsrc_string = SFsrc_stringstream.str();
-    const char* SFsrc_Cstr = SFsrc_string.c_str();
-
-    ofstream SFraw_outfile;
-    SFraw_outfile.open(SFraw_Cstr);
-    //ofstream SFsrc_outfile;
-    //SFsrc_outfile.open(SFsrc_Cstr);
-
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
-
-    int nr_machines;
-    MPI_Comm_size (comm, &nr_machines);
-    int nr_clients = nr_machines - 1;  // one for the server
-
-    int client_request_size = 2;
-    DP client_request[client_request_size]; // this is k, alpha
-    int client_result_size = 3;
-    DP client_result[client_result_size]; // this is k, alpha, SF_4p_kwKW_alpha_opt
-
-    MPI_Status status;
-
-    int scanning = 1;
-
-    int total_nr_req = Npts_w;
-    int nr_sent_out = 0;
-    int nr_returned = 0;
-
-    Vect_DP alpha_req(Npts_w);
-    for (int iw = 0; iw < Npts_w; ++iw) alpha_req[iw] = 0.5 * PI * (iw + 0.5)/Npts_w;
-
-    //cout << "alpha_req = " << alpha_req << endl;
-
-    for (int i = 1; i <= nr_clients && i <= total_nr_req; ++i) {
-
-      // Send request to client i, in the form of the req_id_array vector
-      client_request[0] = k;
-      client_request[1] = alpha_req[nr_sent_out];
-
-      MPI_Send(client_request, client_request_size, MPI_DOUBLE, i, scanning, comm);
-      //cout << "Server:  sent request ";
-      //for (int ii = 0; ii < client_request_size; ++ii) cout << client_request[ii] << " ";
-      //cout << "with tag " << scanning << " to client " << i << endl;
-      nr_sent_out++;
-      //cout << "nr_sent_out = " << nr_sent_out << endl;
-    }
-
-    while (nr_returned < total_nr_req) {
-
-      //cout << "Server:  waiting for answers... " << endl;
-      MPI_Recv (client_result, client_result_size, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG, comm, &status);
-      nr_returned++;
-      //cout << "Server:  received answer from client " << status.MPI_SOURCE << endl;
-
-      // unbuffer result
-      SFraw_outfile << client_result[1] << "\t" << client_result[2] << endl;
-
-      // Send out new request if needed
-      if (nr_sent_out < total_nr_req) {
-	client_request[1] = alpha_req[nr_sent_out];
-	MPI_Send (client_request, client_request_size, MPI_DOUBLE, status.MPI_SOURCE, scanning, comm);
-	nr_sent_out++;
-	//cout << "nr_sent_out = " << nr_sent_out << endl;
-      }
-
-    } // while (nr_returned < total_nr_req)
-
-    // Send term signal to clients
-    int scanning_completed = 0;
-    for (int i = 1; i <= nr_clients; ++i)
-      MPI_Send (client_request, client_request_size, MPI_DOUBLE, i, scanning_completed, comm);
-
-    SFraw_outfile.close();
-
-    //SFsrc_outfile << answer << endl;
-    //SFsrc_outfile.close();
-
-    // Translate raw data into SF_4p (k,omega) data
-
-    //Translate_raw_4p_data (k, max_rec_w, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
-    Translate_raw_4p_data (k, Npts_w, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
-
-    return;
-  }
-  */
 
 } // namespace ABACUS
diff --git a/src/XXX_VOA/XXX_VOA.cc b/src/XXX_VOA/XXX_VOA.cc
index 143a6d6..47fa091 100755
--- a/src/XXX_VOA/XXX_VOA.cc
+++ b/src/XXX_VOA/XXX_VOA.cc
@@ -26,7 +26,7 @@ namespace ABACUS {
 
     DP rho_used = fabs(rho);
 
-    if (rho_used > 10000.0) return(-PI * rho_used - 2.0 * Euler_Mascheroni);  // CHECK THIS
+    if (rho_used > 10000.0) return(-PI * rho_used - 2.0 * Euler_Mascheroni);
 
     Vect_DP args(2);
     args[0] = 0.0;
@@ -37,12 +37,6 @@ namespace ABACUS {
       - 2.0 * Integrate_rec (Integrand_12, args, 0, t1, tinf, req_prec, 12)
       - Integrate_rec (Integrand_2, args, 0, 0.0, tinf, req_prec, 12);
 
-    /*
-    DP answer = -2.0 * Euler_Mascheroni - 2.0 * log(4.0 * rho_used * t1)
-      + 2.0 * (Integrate_optimal (Integrand_11, args, 0, 0.0, t1, req_prec, 1.0e-30, 10000)).integ_est
-      - 2.0 * (Integrate_optimal (Integrand_12, args, 0, t1, tinf, req_prec, 1.0e-30, 10000)).integ_est
-      - (Integrate_optimal (Integrand_2, args, 0, 0.0, tinf, req_prec, 1.0e-30, 10000)).integ_est;
-    */
     return(answer);
   }
 
@@ -53,14 +47,17 @@ namespace ABACUS {
   {
     // Careful !  This is S(k, omega) = S (k, w) |dw/domega| = 2 S(k, w)
 
-    DP w = 2.0 * omega;  // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
+    DP w = 2.0 * omega;
+    // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
 
     DP wu = twoPI * sin(0.5 * k);
     DP wl = PI * fabs(sin(k));
 
     // Factor of 2:  return S(k, omega), not S(k, w)
-    // 0.25 factor:  1/4 * 2 * 1/2, where 1/4 comes from Bougourzi, 2 is the Jacobian |dw/domega| and 1/2 is S^{zz} = 1/2 * S^{+-}
-    return(w < wu && w > wl ? 2.0 * 0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI))/sqrt(wu * wu - w * w) : 0.0);
+    // 0.25 factor:  1/4 * 2 * 1/2, where 1/4 comes from Bougourzi, 2 is the Jacobian |dw/domega|
+    // and 1/2 is S^{zz} = 1/2 * S^{+-}
+    return(w < wu && w > wl ? 2.0 * 0.5
+	   * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI))/sqrt(wu * wu - w * w) : 0.0);
 
   }
 
@@ -75,7 +72,8 @@ namespace ABACUS {
 
     // 0.5 factor:  1 from Bougourzi, and 1/2 is S^{zz} = 1/2 * S^{+-}
     return(args[1] < wu && args[1] > wl ?
-	   0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(args[1] * args[1] - wl * wl)))/PI))/sqrt(wu * wu - args[1] * args[1]) : 0.0);
+	   0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(args[1] * args[1] - wl * wl)))
+					 /PI))/sqrt(wu * wu - args[1] * args[1]) : 0.0);
   }
 
   DP SF_2p_alt (Vect_DP args, I_table Itable)
@@ -87,8 +85,7 @@ namespace ABACUS {
     DP wu = twoPI * sin(0.5 * args[0]);
     DP wl = PI * fabs(sin(args[0]));
 
-    //DP w = wu * cos(args[1]);
-    //DP factor = 1.0;
+
     DP w = wl * cosh(args[1]);
 
     if (w >= wu || w <= wl) return(0.0);
@@ -109,8 +106,6 @@ namespace ABACUS {
     DP wu = twoPI * sin(0.5 * args[0]);
     DP wl = PI * fabs(sin(args[0]));
 
-    //DP w = wu * cos(args[1]);
-    //DP factor = 1.0;
     DP w = wl * cosh(args[1]);
     DP factor = sqrt((w * w - wl * wl)/(wu * wu - w * w));
 
@@ -153,7 +148,6 @@ namespace ABACUS {
     args_to_SF_2p[1] = 0.0; // this will be w
     args_to_SF_2p[2] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    //return(Integrate_rec_using_table (SF_2p_alt, args_to_SF_2p, 1, Itable, 0.0, acos(wl/wu), req_prec, max_rec)/twoPI);
     return(Integrate_rec_using_table (SF_2p_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(wu/wl), req_prec, max_rec)/twoPI);
   }
 
@@ -168,7 +162,8 @@ namespace ABACUS {
     args_to_SF_2p_intw[2] = DP(max_rec);
 
     // Factor 2:  int[0, 2PI] = 2 int[0, PI]
-    return(4.0 * 2.0 * Integrate_rec_using_table (SF_2p_intw, args_to_SF_2p_intw, 0, Itable, 0.0, PI, req_prec, max_rec)/twoPI);
+    return(4.0 * 2.0 * Integrate_rec_using_table (SF_2p_intw, args_to_SF_2p_intw, 0, Itable,
+						  0.0, PI, req_prec, max_rec)/twoPI);
     // 4 : because full integral gives 1/4, return value here is sr fraction obtained.
   }
 
@@ -183,7 +178,8 @@ namespace ABACUS {
     args_to_SF_2p_intw[2] = DP(max_rec);
 
     // Factor 2:  int[0, 2PI] = 2 int[0, PI]
-    return(4.0 * 2.0 * Integrate_rec_using_table (SF_2p_intw_alt, args_to_SF_2p_intw, 0, Itable, 0.0, PI, req_prec, max_rec)/twoPI);
+    return(4.0 * 2.0 * Integrate_rec_using_table (SF_2p_intw_alt, args_to_SF_2p_intw, 0, Itable,
+						  0.0, PI, req_prec, max_rec)/twoPI);
     // 4 : because full integral gives 1/4, return value here is sr fraction obtained.
   }
 
@@ -210,7 +206,8 @@ namespace ABACUS {
     args_to_SF_2p[1] = 0.0; // this will be w
     args_to_SF_2p[2] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    return((Integrate_rec_using_table (SF_2p_w, args_to_SF_2p, 1, Itable, wl, wu, req_prec, max_rec)/twoPI)/Fixed_k_sumrule_w(k));
+    return((Integrate_rec_using_table (SF_2p_w, args_to_SF_2p, 1,
+				       Itable, wl, wu, req_prec, max_rec)/twoPI)/Fixed_k_sumrule_w(k));
   }
 
   DP SF_2p_check_fixed_k_sumrule_alt (DP k, DP req_prec, int max_rec, I_table Itable)
@@ -225,7 +222,8 @@ namespace ABACUS {
     args_to_SF_2p[1] = 0.0; // this will be alpha
     args_to_SF_2p[2] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    return((Integrate_rec_using_table (SF_2p_w_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(wu/wl), req_prec, max_rec)/twoPI)/Fixed_k_sumrule_w(k));
+    return((Integrate_rec_using_table (SF_2p_w_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(wu/wl),
+				       req_prec, max_rec)/twoPI)/Fixed_k_sumrule_w(k));
   }
 
   DP SF_2p_check_fixed_k_sumrule_opt (DP k, DP req_prec, int Npts, I_table Itable)
@@ -240,7 +238,8 @@ namespace ABACUS {
     args_to_SF_2p[1] = 0.0; // this will be alpha
     args_to_SF_2p[2] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    return(((Integrate_optimal_using_table (SF_2p_w_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(wu/wl), req_prec, 1.0e-32, Npts)).integ_est/twoPI)/Fixed_k_sumrule_w(k));
+    return(((Integrate_optimal_using_table (SF_2p_w_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(wu/wl),
+					    req_prec, 1.0e-32, Npts)).integ_est/twoPI)/Fixed_k_sumrule_w(k));
   }
 
 
@@ -253,9 +252,6 @@ namespace ABACUS {
     complex<DP> g[4];
     for (int l = 0; l < 4; ++l) g[l] = 0.0;
 
-    //DP prefactor = 256.0 * pow(PI, 14.0);
-    //DP prefactor = 1.0; // All factors taken into account later
-
     complex<DP> Plm[4];
     complex<DP> Pm[4];
 
@@ -280,27 +276,7 @@ namespace ABACUS {
     }
 
     // Do m = 0 terms:
-    /*
-    Pm[0] *= Gamma_min_0p5 * exp(ln_Gamma(-0.5 + irhoj1[0]) - ln_Gamma(1.0 + irhoj1[0])
-				 + ln_Gamma(-0.5 + irhoj2[0]) - ln_Gamma(1.0 + irhoj2[0])
-				 + ln_Gamma(-0.5 + irhoj3[0]) - ln_Gamma(1.0 + irhoj3[0]));
-    Pm[1] *= Gamma_min_0p5 * exp(ln_Gamma(-0.5 + irhoj0[1]) - ln_Gamma(1.0 + irhoj0[1])
-				 + ln_Gamma(-0.5 + irhoj2[1]) - ln_Gamma(1.0 + irhoj2[1])
-				 + ln_Gamma(-0.5 + irhoj3[1]) - ln_Gamma(1.0 + irhoj3[1]));
-    Pm[2] *= Gamma_min_0p5 * exp(ln_Gamma(-0.5 + irhoj0[2]) - ln_Gamma(1.0 + irhoj0[2])
-				 + ln_Gamma(-0.5 + irhoj1[2]) - ln_Gamma(1.0 + irhoj1[2])
-				 + ln_Gamma(-0.5 + irhoj3[2]) - ln_Gamma(1.0 + irhoj3[2]));
-    Pm[3] *= Gamma_min_0p5 * exp(ln_Gamma(-0.5 + irhoj0[3]) - ln_Gamma(1.0 + irhoj0[3])
-				 + ln_Gamma(-0.5 + irhoj1[3]) - ln_Gamma(1.0 + irhoj1[3])
-				 + ln_Gamma(-0.5 + irhoj2[3]) - ln_Gamma(1.0 + irhoj2[3]));
-    */
     for (int j = 0; j < 4; ++j) {
-      /*
-      Pm[j] *= exp(ln_Gamma(-0.5 + irhoj0[j]) - ln_Gamma(1.0 + irhoj0[j])
-		   + ln_Gamma(-0.5 + irhoj1[j]) - ln_Gamma(1.0 + irhoj1[j])
-		   + ln_Gamma(-0.5 + irhoj2[j]) - ln_Gamma(1.0 + irhoj2[j])
-		   + ln_Gamma(-0.5 + irhoj3[j]) - ln_Gamma(1.0 + irhoj3[j]));
-      */
       // Calling only Gamma (z) for Re(z) >= 0.5, in view of Lanczos method:
       Pm[j] *= exp(ln_Gamma(0.5 + irhoj0[j]) - ln_Gamma(1.0 + irhoj0[j])
 		   + ln_Gamma(0.5 + irhoj1[j]) - ln_Gamma(1.0 + irhoj1[j])
@@ -315,23 +291,12 @@ namespace ABACUS {
 
 	if (j <= l) g[l] += Plm[j] * Pm[j]; // otherwise no m = 0 term
 
-	//cout << "j = " << j << "\tl = " << l << "\tPlm[j] = " << Plm[j] << "\tPm[j] = " << Pm[j] << "\tprod = " << Plm[j] * Pm[j] << endl;
       }
     }
 
-    /*
-    for (int j = 0; j < 4; ++j) {
-      if (j == 0) g[0] += irhoj1[j] * irhoj2[j] * irhoj3[j] * Pm[j];
-      if (j <= 1) g[1] += (-0.5 + irhoj0[j]) * irhoj2[j] * irhoj3[j] * Pm[j];
-      if (j <= 2) g[2] += (-0.5 + irhoj0[j]) * (-0.5 + irhoj1[j]) * irhoj3[j] * Pm[j];
-      g[3] += (-0.5 + irhoj0[j]) * (-0.5 + irhoj1[j]) * (-0.5 + irhoj2[j]) * Pm[j];
-    }
-    */
-
     DP sum_norm_gl = norm(g[0]) + norm(g[1]) + norm(g[2]) + norm(g[3]);
     DP old_sum_norm_gl = sum_norm_gl;
 
-    //cout << "|g1|^2 = " << prefactor * norm(g[0]) << "\t2 " << prefactor * norm(g[1]) << "\t3 " << prefactor * norm(g[2]) << "\t4 " << prefactor * norm(g[3]) << endl;
 
     // Do m = 1, 2, ... terms:
 
@@ -353,68 +318,17 @@ namespace ABACUS {
 	  Pm[j] *= (m - 1.5 + irhoj0[j]) * (m - 1.5 + irhoj1[j]) * (m - 1.5 + irhoj2[j]) * (m - 1.5 + irhoj3[j])
 	    / ((DP(m) + irhoj0[j]) * (DP(m) + irhoj1[j]) * (DP(m) + irhoj2[j]) * (DP(m) + irhoj3[j]));
 
-	    //for (int l = 0; l < 4; ++l) {
-
-	    //Plm[j] = 1.0;
-	    //for (int i = 0; i < 4; ++i) if (i != l) Plm[j] *= m - (l > i ? 0.5 : 0.0) + II * (rho[j] - rho[i]);
-
-	    //g[l] += Plm[j] * Pm[j];
-	  //}
-
 	  // FASTER:  unwrap l, i loops
 	  // l = 0:
-	  //Plm[j] = (DP(m) + II * (rho[j] - rho[1])) * (DP(m) + II * (rho[j] - rho[2])) * (DP(m) + II * (rho[j] - rho[3]));
-	  //Plm[j] = (DP(m) + irhoj1[j]) * (DP(m) + irhoj2[j]) * (DP(m) + irhoj3[j]);
-	  //g[0] += Plm[j] * Pm[j];
 	  g[0] += (DP(m) + irhoj1[j]) * (DP(m) + irhoj2[j]) * (DP(m) + irhoj3[j]) * Pm[j];
 	  // l = 1;
-	  //Plm[j] = (m - 0.5 + II * (rho[j] - rho[0])) * (DP(m) + II * (rho[j] - rho[2])) * (DP(m) + II * (rho[j] - rho[3]));
-	  //Plm[j] = (m - 0.5 + irhoj0[j]) * (DP(m) + irhoj2[j]) * (DP(m) + irhoj3[j]);
-	  //g[1] += Plm[j] * Pm[j];
 	  g[1] += (m - 0.5 + irhoj0[j]) * (DP(m) + irhoj2[j]) * (DP(m) + irhoj3[j]) * Pm[j];
 	  // l = 2;
-	  //Plm[j] = (m - 0.5 + II * (rho[j] - rho[0])) * (m - 0.5 + II * (rho[j] - rho[1])) * (DP(m) + II * (rho[j] - rho[3]));
-	  //Plm[j] = (m - 0.5 + irhoj0[j]) * (m - 0.5 + irhoj1[j]) * (DP(m) + irhoj3[j]);
-	  //g[2] += Plm[j] * Pm[j];
 	  g[2] += (m - 0.5 + irhoj0[j]) * (m - 0.5 + irhoj1[j]) * (DP(m) + irhoj3[j]) * Pm[j];
 	  // l = 3;
-	  //Plm[j] = (m - 0.5 + II * (rho[j] - rho[0])) * (m - 0.5 + II * (rho[j] - rho[1])) * (m - 0.5 + II * (rho[j] - rho[2]));
-	  //Plm[j] = (m - 0.5 + irhoj0[j]) * (m - 0.5 + irhoj1[j]) * (m - 0.5 + irhoj2[j]);
-	  //g[3] += Plm[j] * Pm[j];
 	  g[3] += (m - 0.5 + irhoj0[j]) * (m - 0.5 + irhoj1[j]) * (m - 0.5 + irhoj2[j]) * Pm[j];
 	}
 
-	/*
-	// Also unwrap j loop:
-	Pm[0] *= (m - 1.5 + irhoj0[0]) * (m - 1.5 + irhoj1[0]) * (m - 1.5 + irhoj2[0]) * (m - 1.5 + irhoj3[0])
-	  / ((DP(m) + irhoj0[0]) * (DP(m) + irhoj1[0]) * (DP(m) + irhoj2[0]) * (DP(m) + irhoj3[0]));
-	Pm[1] *= (m - 1.5 + irhoj0[1]) * (m - 1.5 + irhoj1[1]) * (m - 1.5 + irhoj2[1]) * (m - 1.5 + irhoj3[1])
-	  / ((DP(m) + irhoj0[1]) * (DP(m) + irhoj1[1]) * (DP(m) + irhoj2[1]) * (DP(m) + irhoj3[1]));
-	Pm[2] *= (m - 1.5 + irhoj0[2]) * (m - 1.5 + irhoj1[2]) * (m - 1.5 + irhoj2[2]) * (m - 1.5 + irhoj3[2])
-	  / ((DP(m) + irhoj0[2]) * (DP(m) + irhoj1[2]) * (DP(m) + irhoj2[2]) * (DP(m) + irhoj3[2]));
-	Pm[3] *= (m - 1.5 + irhoj0[3]) * (m - 1.5 + irhoj1[3]) * (m - 1.5 + irhoj2[3]) * (m - 1.5 + irhoj3[3])
-	  / ((DP(m) + irhoj0[3]) * (DP(m) + irhoj1[3]) * (DP(m) + irhoj2[3]) * (DP(m) + irhoj3[3]));
-
-	g[0] += ((DP(m) + irhoj1[0]) * (DP(m) + irhoj2[0]) * (DP(m) + irhoj3[0])) * Pm[0]
-	  + ((DP(m)) * (DP(m) + irhoj2[1]) * (DP(m) + irhoj3[1])) * Pm[1]
-	  + ((DP(m) + irhoj1[2]) * (DP(m)) * (DP(m) + irhoj3[0])) * Pm[2]
-	  + ((DP(m) + irhoj1[3]) * (DP(m) + irhoj2[3]) * (DP(m))) * Pm[3];
-
-	g[1] += ((m - 0.5) * (DP(m) + irhoj2[0]) * (DP(m) + irhoj3[0])) * Pm[0]
-	  + ((m - 0.5 + irhoj0[1]) * (DP(m) + irhoj2[1]) * (DP(m) + irhoj3[1])) * Pm[1]
-	  + ((m - 0.5 + irhoj0[2]) * (DP(m)) * (DP(m) + irhoj3[2])) * Pm[2]
-	  + ((m - 0.5 + irhoj0[3]) * (DP(m) + irhoj2[3]) * (DP(m))) * Pm[3];
-
-	g[2] += ((m - 0.5) * (m - 0.5 + irhoj1[0]) * (DP(m) + irhoj3[0])) * Pm[0]
-	  + ((m - 0.5 + irhoj0[1]) * (m - 0.5) * (DP(m) + irhoj3[1])) * Pm[1]
-	  + ((m - 0.5 + irhoj0[2]) * (m - 0.5 + irhoj1[2]) * (DP(m) + irhoj3[2])) * Pm[2]
-	  + ((m - 0.5 + irhoj0[3]) * (m - 0.5 + irhoj1[3]) * (DP(m))) * Pm[3];
-
-	g[3] += ((m - 0.5) * (m - 0.5 + irhoj1[0]) * (m - 0.5 + irhoj2[0])) * Pm[0]
-	  + ((m - 0.5 + irhoj0[1]) * (m - 0.5) * (m - 0.5 + irhoj2[1])) * Pm[1]
-	  + ((m - 0.5 + irhoj0[2]) * (m - 0.5 + irhoj1[2]) * (m - 0.5)) * Pm[2]
-	  + ((m - 0.5 + irhoj0[3]) * (m - 0.5 + irhoj1[3]) * (m - 0.5 + irhoj2[3])) * Pm[3];
-	*/
 	m++;
 
       } while (m < m_to_reach);
@@ -423,11 +337,6 @@ namespace ABACUS {
 
     } while (m < 10 || sum_norm_gl/old_sum_norm_gl - 1.0 > req_prec && m < 100000);
 
-    //cout << "g converged using " << m << " terms." << endl;
-
-    //cout << "|g1|^2 = " << prefactor * norm(g[0]) << "\t2 " << prefactor * norm(g[1]) << "\t3 " << prefactor * norm(g[2]) << "\t4 " << prefactor * norm(g[3]) << endl;
-
-    //return(prefactor * (norm(g[0]) + norm(g[1]) + norm(g[2]) + norm(g[3])));
     return(norm(g[0]) + norm(g[1]) + norm(g[2]) + norm(g[3]));
 
   }
@@ -436,13 +345,8 @@ namespace ABACUS {
   {
     Vect_DP args(2);
 
-    DP answer = exp(-8.0 * real(ln_Gamma (0.25)) - 9.0 * log(2.0) + 8.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 50.0, req_prec, 16))/3.0;
-    //DP answer = exp(-8.0 * real(ln_Gamma (0.25)) - 21.0 * log(2.0) - 14.0 * log(PI) + 8.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 100.0, req_prec, 16))/3.0;
-
-    //cout << "|A|^2 = " << exp(-2.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 100.0, req_prec))
-    //   << "\t Gamma (1/4) = " << exp ((ln_Gamma(0.25))) << endl;
-    //cout << "NPB: " << exp(-8.0 * real(ln_Gamma (0.25)) - 9.0 * log(2.0) + 8.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 50.0, req_prec))/3.0 << endl;
-    //cout << "c-m: " << exp(-8.0 * real(ln_Gamma (0.25)) - 21.0 * log(2.0) - 14.0 * log(PI) + 8.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 50.0, req_prec))/3.0 << endl;
+    DP answer = exp(-8.0 * real(ln_Gamma (0.25)) - 9.0 * log(2.0)
+		    + 8.0 * Integrate_rec (Integrand_A, args, 0, 0.0, 50.0, req_prec, 16))/3.0;
 
     return(answer);
   }
@@ -459,18 +363,11 @@ namespace ABACUS {
 
     sum_I_integrals = 0.0;
     for (int i1 = 0; i1 < 3; ++i1) for (int i2 = i1+1; i2 < 4; ++i2) {
-      //cout << "rho_ij = " << rho[i1] - rho[i2] << "\tI(rho_ij) = " << Itable.Return_val (rho[i1] - rho[i2]) << "\t" << I_integral (rho[i1] - rho[i2], req_prec) << endl;
-      //sum_I_integrals += I_integral(rho[i1] - rho[i2], req_prec);
-      sum_I_integrals += Itable.Return_val (rho[i1] - rho[i2]);
-    }
-    //cout << "sum_I_integrals = " << sum_I_integrals << "\texp(-sum) = " << exp(-sum_I_integrals) << endl;
+	sum_I_integrals += Itable.Return_val (rho[i1] - rho[i2]);
+      }
 
-    //sum_norm_g = 0.0;
-    //for (int i = 0; i < 4; ++i) sum_norm_g += norm(g(i, rho));
     sum_norm_g = Sum_norm_gl (rho, req_prec);
 
-    //cout << "sum_norm_g = " << sum_norm_g << "\t sqrt() = " << sqrt(Wu * Wu - W * W) << endl;
-
     return(exp(-sum_I_integrals) * sum_norm_g/sqrt(Wu * Wu - W * W));
   }
 
@@ -483,9 +380,9 @@ namespace ABACUS {
 
     sum_I_integrals = 0.0;
     for (int i1 = 0; i1 < 3; ++i1) for (int i2 = i1+1; i2 < 4; ++i2) {
-      if (fabs(rho[i1] - rho[i2]) < 1.0e-10 || fabs(rho[i1] - rho[i2]) >= 1000.0) return(0.0); // safety here
-      sum_I_integrals += Itable.Return_val (rho[i1] - rho[i2]);
-    }
+	if (fabs(rho[i1] - rho[i2]) < 1.0e-10 || fabs(rho[i1] - rho[i2]) >= 1000.0) return(0.0); // safety here
+	sum_I_integrals += Itable.Return_val (rho[i1] - rho[i2]);
+      }
 
     sum_norm_g = Sum_norm_gl (rho, req_prec);
 
@@ -498,7 +395,8 @@ namespace ABACUS {
 
     DP argacos1, argacos2;
 
-    if (fabs(argacos1 = W/(twoPI * sin(0.5*K))) > 1.0 || fabs(argacos2 = (w - W)/(twoPI * sin (0.5 * fabs(k - K)))) > 1.0) return(false);
+    if (fabs(argacos1 = W/(twoPI * sin(0.5*K))) > 1.0
+	|| fabs(argacos2 = (w - W)/(twoPI * sin (0.5 * fabs(k - K)))) > 1.0) return(false);
 
     DP acos1 = acos(argacos1);
     DP acos2 = acos(argacos2);
@@ -516,7 +414,7 @@ namespace ABACUS {
       p[3] = 0.5 * (K-k) - PI - acos2;
     }
 
-    for (int i = 0; i < 4; ++i) if (p[i] < -PI || p[i] > 0.0) return(false); // { cout << k << "\t" << w << "\t" << K << "\t" << W << "\t" << p; ABACUSerror("p out of bounds"); }
+    for (int i = 0; i < 4; ++i) if (p[i] < -PI || p[i] > 0.0) return(false);
 
     return(true);
   }
@@ -527,11 +425,9 @@ namespace ABACUS {
 
     if (!Set_p_given_kwKW (args_to_G[0], args_to_G[1], args_to_G[2], args_to_G[3], p)) return(0.0);
 
-    DP answer = Jacobian_p3p4_KW (args_to_G[0], args_to_G[1], args_to_G[2], args_to_G[3]) * SF_contrib (p, args_to_G[4], Itable);
+    DP answer = Jacobian_p3p4_KW (args_to_G[0], args_to_G[1], args_to_G[2], args_to_G[3])
+      * SF_contrib (p, args_to_G[4], Itable);
 
-    //  cout << "kwKW = " << args_to_G[0] << " " << args_to_G[1] << " " << args_to_G[2] << " " << args_to_G[3] << "\tp = " << p << "Jac = " << Jacobian_p3p4_KW (args_to_G[0], args_to_G[1], args_to_G[2], args_to_G[3]) << "\tG = " << answer << endl;
-
-    //return(Jacobian_p3p4_KW (args_to_G[0], args_to_G[1], args_to_G[2], args_to_G[3]) * SF_contrib (p, 0.01 * args_to_G[4], Itable));
     return(answer);
   }
 
@@ -583,12 +479,11 @@ namespace ABACUS {
 
     if (!Set_p_given_kwKW (args_to_G[0], args_to_G[1], args_to_G[2], W, p)) return(0.0);
 
-    DP answer = J_fn (p, args_to_G[4], Itable) * sqrt(W * (2.0 * args_to_G[6] - W)
-						      /((args_to_G[8] - W * W) * (args_to_G[9] - pow(args_to_G[1] - W, 2.0))));
+    DP answer = J_fn (p, args_to_G[4], Itable)
+      * sqrt(W * (2.0 * args_to_G[6] - W)/((args_to_G[8] - W * W) * (args_to_G[9] - pow(args_to_G[1] - W, 2.0))));
 
     if (is_nan(answer)) {
       cerr << setprecision(10) << "args_to_G1_fn_mid = " << args_to_G << "G1 = " << answer << "\tPut to zero..." << endl;
-      //ABACUSerror("non !");
       answer = 0.0;
     }
     return(answer);
@@ -609,13 +504,12 @@ namespace ABACUS {
     if (!Set_p_given_kwKW (args_to_G[0], args_to_G[1], args_to_G[2], W, p)) return(0.0);
 
     DP answer = J_fn (p, args_to_G[4], Itable)
-      //* sqrt((args_to_G[7] * args_to_G[7] - W * W)/((args_to_G[8] - W * W) * (args_to_G[9] - pow(args_to_G[1] - W, 2.0))));
       * args_to_G[7] * sin(args_to_G[3]) /sqrt((args_to_G[8] - W * W) * (args_to_G[9] - pow(args_to_G[1] - W, 2.0)));
 
     if (is_nan(answer)) {
       cerr << setprecision(10) << "args_to_G2_fn_mid = " << args_to_G << "G2 = " << answer << endl;
-      cerr << W << "\t" << (args_to_G[7] * args_to_G[7] - W * W) << "\t" << (args_to_G[8] - W * W) << "\t" << (args_to_G[9] - pow(args_to_G[1] - W, 2.0)) << endl;
-      //ABACUSerror("non !");
+      cerr << W << "\t" << (args_to_G[7] * args_to_G[7] - W * W) << "\t" << (args_to_G[8] - W * W)
+	   << "\t" << (args_to_G[9] - pow(args_to_G[1] - W, 2.0)) << endl;
       answer = 0.0;
     }
     return(answer);
@@ -634,7 +528,8 @@ namespace ABACUS {
     DP Wu1 = args_to_G[6];
     DP Wu2 = args_to_G[7];
 
-    return(J_fn (p, args_to_G[8], Itable) * sqrt((W * W - Wmin * Wmin)/((Wu1 * Wu1 - W * W) * (Wu2 * Wu2 - (args_to_G[1] - W) * (args_to_G[1] - W)))));
+    return(J_fn (p, args_to_G[8], Itable)
+	   * sqrt((W * W - Wmin * Wmin)/((Wu1 * Wu1 - W * W) * (Wu2 * Wu2 - (args_to_G[1] - W) * (args_to_G[1] - W)))));
   }
 
   DP H_fn (Vect_DP args_to_H, I_table Itable)
@@ -657,8 +552,8 @@ namespace ABACUS {
     DP Wmin_used = Wmin (k, w, K);
     DP Wmax_used = Wmax (k, w, K);
 
-    return(Wmax_used > Wmin_used ? Integrate_rec_using_table (G_fn, args_to_G, 3, Itable, Wmin_used, Wmax_used, req_prec, max_rec) : 0.0);
-    //return(Riemann_Integrate_rec_using_table (G_fn, args_to_G, 3, Itable, 0.0, 10.0, 500));
+    return(Wmax_used > Wmin_used ?
+	   Integrate_rec_using_table (G_fn, args_to_G, 3, Itable, Wmin_used, Wmax_used, req_prec, max_rec) : 0.0);
   }
 
   DP H2_fn (Vect_DP args_to_H, I_table Itable)
@@ -741,8 +636,10 @@ namespace ABACUS {
     DP alpha_L2 = 0.0;
     DP alpha_U2 = acos(Wmid/Wmax_used);
 
-    answer += Integrate_rec_using_table (G1_fn_mid, args_to_G, 3, Itable, alpha_L1, alpha_U1, args_to_H[3], int(args_to_H[4]));
-    answer += Integrate_rec_using_table (G2_fn_mid, args_to_G, 3, Itable, alpha_L2, alpha_U2, args_to_H[3], int(args_to_H[4]));
+    answer += Integrate_rec_using_table (G1_fn_mid, args_to_G, 3, Itable, alpha_L1,
+					 alpha_U1, args_to_H[3], int(args_to_H[4]));
+    answer += Integrate_rec_using_table (G2_fn_mid, args_to_G, 3, Itable, alpha_L2,
+					 alpha_U2, args_to_H[3], int(args_to_H[4]));
 
     return(answer);
   }
@@ -790,8 +687,10 @@ namespace ABACUS {
     DP alpha_L2 = 0.0;
     DP alpha_U2 = acos(Wmid/Wmax_used);
 
-    answer += (Integrate_optimal_using_table (G1_fn_mid, args_to_G, 3, Itable, alpha_L1, alpha_U1, args_to_H[3], 1.0e-32, int(args_to_H[4]))).integ_est;
-    answer += (Integrate_optimal_using_table (G2_fn_mid, args_to_G, 3, Itable, alpha_L2, alpha_U2, args_to_H[3], 1.0e-32, int(args_to_H[4]))).integ_est;
+    answer += (Integrate_optimal_using_table (G1_fn_mid, args_to_G, 3, Itable, alpha_L1, alpha_U1,
+					      args_to_H[3], 1.0e-32, int(args_to_H[4]))).integ_est;
+    answer += (Integrate_optimal_using_table (G2_fn_mid, args_to_G, 3, Itable, alpha_L2, alpha_U2,
+					      args_to_H[3], 1.0e-32, int(args_to_H[4]))).integ_est;
 
     return(answer);
   }
@@ -823,10 +722,9 @@ namespace ABACUS {
     args_to_G[9] = DP(max_rec);
 
     return(Integrate_rec_using_table (G_fn_alt, args_to_G, 3, Itable, 0.0, acosh(Wmax_used/Wmin_used), req_prec, max_rec));
-    //return(Riemann_Integrate_rec_using_table (G_fn, args_to_G, 3, Itable, 0.0, 10.0, 500));
   }
 
-  //DP SF_4p_kwKW (DP k, DP omega, DP req_prec, int max_rec, I_table Itable)
+
   DP SF_4p_kwKW (Vect_DP args, I_table Itable)
   {
     // Translate:
@@ -842,21 +740,20 @@ namespace ABACUS {
     Vect_DP args_to_H(5);
     args_to_H[0] = k;  // shift of PI in Bougourzi:  because they do FM case.
     // We want AFM, so SF_4p (k, omega) is correctly obtained directly from the RHS of their formula.
-    DP w = 2.0 * omega;  // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
+    DP w = 2.0 * omega;
+    // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
     args_to_H[1] = w;
     args_to_H[2] = 0.0;  // this is K
     args_to_H[3] = ABACUS::max(1.0e-14, 0.01 * req_prec);
     args_to_H[4] = DP(max_rec);
 
     if (w > wmax_4p(k) || w < wmin_4p(k)) {
-      //cout << "w out of bounds in SF_4p: " << "wmin = " << PI * sin(k) << " wmax = " <<  4.0 * PI * sin(0.25 * k) << " w = " << w << endl;
       return(0.0);
     }
 
     DP prefactor = 2.0 * 0.5 * 4.0 * Compute_C4 (req_prec);  // 4 comes from using p1 > p2 & p3 > p4 instead of whole interval.
     // 2 from Jacobian |dw/domega|
     // 0.5 from S^{zz} = S^{pm}/2
-    //DP prefactor = 4.0;
 
     // Define the K integral domain
     Domain<DP> Kdomain;
@@ -897,26 +794,17 @@ namespace ABACUS {
       Kdomain.Exclude (K3em, K3ep);
     }
 
-    //cout << "Kdomain: " << endl << Kdomain << endl;
-
     // Use (K,W) -> (k-K, w-W) symmetry to restrict to K in [k/2, k/2+PI]
     Kdomain.Exclude (0.0, 0.5 * k);
     Kdomain.Exclude (0.5 * k + PI, twoPI);
-    //Kdomain.Exclude (0.5 * k, 0.5 * k + PI);
     prefactor *= 2.0;
 
-    //cout << "Kdomain restricted: " << endl << Kdomain << endl;
-
     DP answer = 0.0;
 
     for (int idom = 0; idom < Kdomain.Ndomains(); ++idom)
-      //answer += Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H2_fn, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H_fn_alt, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      answer += Integrate_rec_using_table (H_fn_mid, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
+      answer += Integrate_rec_using_table (H_fn_mid, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom),
+					   req_prec, max_rec);
 
-    //return(prefactor * Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, 0.0, twoPI, req_prec, max_rec));
-    //return(prefactor * Riemann_Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, 0.0, twoPI, 500));
     return (prefactor * answer);
   }
 
@@ -938,21 +826,21 @@ namespace ABACUS {
     Vect_DP args_to_H(5);
     args_to_H[0] = k;  // shift of PI in Bougourzi:  because they do FM case.
     // We want AFM, so SF_4p (k, omega) is correctly obtained directly from the RHS of their formula.
-    DP w = 2.0 * omega;  // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
+    DP w = 2.0 * omega;
+    // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
     args_to_H[1] = w;
     args_to_H[2] = 0.0;  // this is K
     args_to_H[3] = ABACUS::max(1.0e-14, 0.01 * req_prec);
     args_to_H[4] = DP(Npts_W);
 
     if (w > wmax_4p(k) || w < wmin_4p(k)) {
-      //cout << "w out of bounds in SF_4p: " << "wmin = " << PI * sin(k) << " wmax = " <<  4.0 * PI * sin(0.25 * k) << " w = " << w << endl;
       return(0.0);
     }
 
-    DP prefactor = 2.0 * 0.5 * 4.0 * Compute_C4 (req_prec);  // 4 comes from using p1 > p2 & p3 > p4 instead of whole interval.
+    DP prefactor = 2.0 * 0.5 * 4.0 * Compute_C4 (req_prec);
+    // 4 comes from using p1 > p2 & p3 > p4 instead of whole interval.
     // 2 from Jacobian |dw/domega|
     // 0.5 from S^{zz} = S^{pm}/2
-    //DP prefactor = 4.0;
 
     // Define the K integral domain
     Domain<DP> Kdomain;
@@ -993,28 +881,17 @@ namespace ABACUS {
       Kdomain.Exclude (K3em, K3ep);
     }
 
-    //cout << "Kdomain: " << endl << Kdomain << endl;
-
     // Use (K,W) -> (k-K, w-W) symmetry to restrict to K in [k, k+PI]
     Kdomain.Exclude (0.0, 0.5 * k);
     Kdomain.Exclude (0.5 * k + PI, twoPI);
-    //Kdomain.Exclude (0.5 * k, 0.5 * k + PI);
     prefactor *= 2.0;
 
-    //cout << "Kdomain restricted: " << endl << Kdomain << endl;
-
     DP answer = 0.0;
 
     for (int idom = 0; idom < Kdomain.Ndomains(); ++idom)
-      //answer += Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H2_fn, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H_fn_alt, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H_fn_mid, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, max_rec);
-      //answer += Integrate_rec_using_table (H_fn_mid, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, 1);
-      answer += (Integrate_optimal_using_table (H_fn_mid_opt, args_to_H, 2, Itable, Kdomain.xmin(idom), Kdomain.xmax(idom), req_prec, 1.0e-32, Npts_K)).integ_est;
+      answer += (Integrate_optimal_using_table (H_fn_mid_opt, args_to_H, 2, Itable, Kdomain.xmin(idom),
+						Kdomain.xmax(idom), req_prec, 1.0e-32, Npts_K)).integ_est;
 
-    //return(prefactor * Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, 0.0, twoPI, req_prec, max_rec));
-    //return(prefactor * Riemann_Integrate_rec_using_table (H_fn, args_to_H, 2, Itable, 0.0, twoPI, 500));
     return (prefactor * answer);
   }
 
@@ -1070,11 +947,9 @@ namespace ABACUS {
 
     Vect_DP args_to_SF_4p_kwKW = args;
     DP omegamin = 0.5 * wmin_4p (args[0]);
-    //DP omegamax = 0.5 * wmax_4p (args[0]);
 
     args_to_SF_4p_kwKW[1] = omegamin * cosh(args[1]);
 
-    //return((omegamax - omegamin) * sin(args[1]) * SF_4p_kwKW_opt (args_to_SF_4p_kwKW, Itable));
     return(omegamin * sinh(args[1]) * SF_4p_kwKW_opt (args_to_SF_4p_kwKW, Itable));
   }
 
@@ -1117,7 +992,8 @@ namespace ABACUS {
 
   /******************************************************************************************/
 
-  void Translate_raw_4p_data (DP k, int dim_w, const char* SFraw_Cstr, const char* SF_Cstr, const char* SFsrc_Cstr, I_table Itable)
+  void Translate_raw_4p_data (DP k, int dim_w, const char* SFraw_Cstr, const char* SF_Cstr,
+			      const char* SFsrc_Cstr, I_table Itable)
   {
     DP omegamin = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
     DP omegamax = 0.5 * wmax_4p (k);
@@ -1127,7 +1003,6 @@ namespace ABACUS {
     DP alpha_in_old = -1.0;
     DP SF_in_old = -1.0;
 
-    //int dim_w = int(pow(3.0, max_rec_w + 2));
     DP* alpha = new DP[dim_w];
     DP* omega = new DP[dim_w];
     DP* SF_4p_dat = new DP[dim_w];
@@ -1167,8 +1042,6 @@ namespace ABACUS {
 
     QuickSort (omega, index, 0, dim_w - 1);
 
-    //for (int j = 0; j < dim_w; ++j) cout << j << "\t" << omega[j] << "\t" << index[j] << endl;
-
     DP fixed_k_sr_2p = 0.0;
     DP fixed_k_sr_4p = 0.0;
     DP full_sr_2p = 0.0;
@@ -1199,9 +1072,12 @@ namespace ABACUS {
       full_sr_2p += Jac_dalpha * SF_2p_dat[index[i] ];
     }
 
-    Jac_dalpha = (omegamax - omegamin) * sin(alpha[index[dim_w - 2] ]) * 0.5 * (alpha[index[dim_w - 1] ] - alpha[index[dim_w - 3] ]);
-    fixed_k_sr_4p += Jac_dalpha * (omega[dim_w - 2] * SF_4p_dat[index[dim_w - 2] ] + omega[dim_w - 1] * SF_4p_dat[index[dim_w - 1] ]);
-    fixed_k_sr_2p += Jac_dalpha * (omega[dim_w - 2] * SF_2p_dat[index[dim_w - 2] ] + omega[dim_w - 1] * SF_2p_dat[index[dim_w - 1] ]);
+    Jac_dalpha = (omegamax - omegamin) * sin(alpha[index[dim_w - 2] ])
+      * 0.5 * (alpha[index[dim_w - 1] ] - alpha[index[dim_w - 3] ]);
+    fixed_k_sr_4p += Jac_dalpha * (omega[dim_w - 2] * SF_4p_dat[index[dim_w - 2] ]
+				   + omega[dim_w - 1] * SF_4p_dat[index[dim_w - 1] ]);
+    fixed_k_sr_2p += Jac_dalpha * (omega[dim_w - 2] * SF_2p_dat[index[dim_w - 2] ]
+				   + omega[dim_w - 1] * SF_2p_dat[index[dim_w - 1] ]);
     full_sr_4p += Jac_dalpha * (SF_4p_dat[index[dim_w - 2] ] + SF_4p_dat[index[dim_w - 1] ]);
     full_sr_2p += Jac_dalpha * (SF_2p_dat[index[dim_w - 2] ] + SF_2p_dat[index[dim_w - 1] ]);
 
@@ -1234,19 +1110,18 @@ namespace ABACUS {
     return;
   }
 
-  void Translate_raw_4p_data_cosh (DP k, int dim_w, const char* SFraw_Cstr, const char* SF_Cstr, const char* SFsrc_Cstr, I_table Itable)
+  void Translate_raw_4p_data_cosh (DP k, int dim_w, const char* SFraw_Cstr, const char* SF_Cstr,
+				   const char* SFsrc_Cstr, I_table Itable)
   {
     // Here, omega = omegamin * cosh(alpha)
 
     DP omegamin = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax = 0.5 * wmax_4p (k);
 
     DP alpha_in;
     DP SF_in;
     DP alpha_in_old = -1.0;
     DP SF_in_old = -1.0;
 
-    //int dim_w = int(pow(3.0, max_rec_w + 2));
     DP* alpha = new DP[dim_w];
     DP* omega = new DP[dim_w];
     DP* SF_4p_dat = new DP[dim_w];
@@ -1267,11 +1142,9 @@ namespace ABACUS {
       SFraw >> alpha_in >> SF_in;
 
       alpha[i] = alpha_in;
-      //omega[i] = omegamin + (omegamax - omegamin) * (1.0 - cos(alpha_in));
       omega[i] = omegamin * cosh(alpha_in);
 
       // CAREFUL !!!  SF_in is S (k, w), and we want S (k, omega) = 2 S(k, w)
-      //SF_4p_dat[i] = 2.0 * SF_in/((omegamax - omegamin) * sin(alpha_in));
       SF_4p_dat[i] = 2.0 * SF_in/(omegamin * sinh(alpha_in));
 
       SF_2p_dat[i] = SF_2p (k, omega[i], Itable);  // This already is S(k, omega)
@@ -1288,13 +1161,10 @@ namespace ABACUS {
 
     QuickSort (omega, index, 0, dim_w - 1);
 
-    //for (int j = 0; j < dim_w; ++j) cout << j << "\t" << omega[j] << "\t" << index[j] << endl;
-
     DP fixed_k_sr_2p = 0.0;
     DP fixed_k_sr_4p = 0.0;
     DP full_sr_2p = 0.0;
     DP full_sr_4p = 0.0;
-    //DP Jac_dalpha = 0.0;  // This is domega = (omegamax - omegamin) sin alpha dalpha
     DP Jac_dalpha = 0.0;  // This is domega = omegamin sinh alpha dalpha
 
     ofstream SF;
@@ -1307,7 +1177,6 @@ namespace ABACUS {
     SF.close();
 
     // Compute first moment sum rule
-    //Jac_dalpha = (omegamax - omegamin) * sin(alpha[index[1] ]) * 0.5 * (alpha[index[2] ] - alpha[index[0] ]);
     Jac_dalpha = omegamin * sinh(alpha[index[1] ]) * 0.5 * (alpha[index[2] ] - alpha[index[0] ]);
     fixed_k_sr_4p += Jac_dalpha * (omega[0] * SF_4p_dat[index[0] ] + omega[1] * SF_4p_dat[index[1] ]);
     fixed_k_sr_2p += Jac_dalpha * (omega[0] * SF_2p_dat[index[0] ] + omega[1] * SF_2p_dat[index[1] ]);
@@ -1315,7 +1184,6 @@ namespace ABACUS {
     full_sr_2p += Jac_dalpha * (SF_2p_dat[index[0] ] + SF_2p_dat[index[1] ]);
 
     for (int i = 2; i < dim_w - 2; ++i) {
-      //Jac_dalpha = (omegamax - omegamin) * sin(alpha[index[i] ]) * 0.5 * (alpha[index[i + 1] ] - alpha[index[i - 1] ]);
       Jac_dalpha = omegamin * sinh(alpha[index[i] ]) * 0.5 * (alpha[index[i + 1] ] - alpha[index[i - 1] ]);
       fixed_k_sr_4p += Jac_dalpha * omega[i] * SF_4p_dat[index[i] ];
       fixed_k_sr_2p += Jac_dalpha * omega[i] * SF_2p_dat[index[i] ];
@@ -1323,10 +1191,11 @@ namespace ABACUS {
       full_sr_2p += Jac_dalpha * SF_2p_dat[index[i] ];
     }
 
-    //Jac_dalpha = (omegamax - omegamin) * sin(alpha[index[dim_w - 2] ]) * 0.5 * (alpha[index[dim_w - 1] ] - alpha[index[dim_w - 3] ]);
     Jac_dalpha = omegamin * sinh(alpha[index[dim_w - 2] ]) * 0.5 * (alpha[index[dim_w - 1] ] - alpha[index[dim_w - 3] ]);
-    fixed_k_sr_4p += Jac_dalpha * (omega[dim_w - 2] * SF_4p_dat[index[dim_w - 2] ] + omega[dim_w - 1] * SF_4p_dat[index[dim_w - 1] ]);
-    fixed_k_sr_2p += Jac_dalpha * (omega[dim_w - 2] * SF_2p_dat[index[dim_w - 2] ] + omega[dim_w - 1] * SF_2p_dat[index[dim_w - 1] ]);
+    fixed_k_sr_4p += Jac_dalpha * (omega[dim_w - 2] * SF_4p_dat[index[dim_w - 2] ]
+				   + omega[dim_w - 1] * SF_4p_dat[index[dim_w - 1] ]);
+    fixed_k_sr_2p += Jac_dalpha * (omega[dim_w - 2] * SF_2p_dat[index[dim_w - 2] ]
+				   + omega[dim_w - 1] * SF_2p_dat[index[dim_w - 1] ]);
     full_sr_4p += Jac_dalpha * (SF_4p_dat[index[dim_w - 2] ] + SF_4p_dat[index[dim_w - 1] ]);
     full_sr_2p += Jac_dalpha * (SF_2p_dat[index[dim_w - 2] ] + SF_2p_dat[index[dim_w - 1] ]);
 
@@ -1365,19 +1234,22 @@ namespace ABACUS {
   {
     stringstream SFraw_stringstream;
     string SFraw_string;
-    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".raw";
+    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w
+		       << "_max_rec_" << max_rec << ".raw";
     SFraw_string = SFraw_stringstream.str();
     const char* SFraw_Cstr = SFraw_string.c_str();
 
     stringstream SF_stringstream;
     string SF_string;
-    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".dat";
+    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w
+		    << "_max_rec_" << max_rec << ".dat";
     SF_string = SF_stringstream.str();
     const char* SF_Cstr = SF_string.c_str();
 
     stringstream SFsrc_stringstream;
     string SFsrc_string;
-    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w << "_max_rec_" << max_rec << ".src";
+    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_max_rec_w_" << max_rec_w
+		       << "_max_rec_" << max_rec << ".src";
     SFsrc_string = SFsrc_stringstream.str();
     const char* SFsrc_Cstr = SFsrc_string.c_str();
 
@@ -1386,16 +1258,13 @@ namespace ABACUS {
     ofstream SFsrc_outfile;
     SFsrc_outfile.open(SFsrc_Cstr, ofstream::app);
 
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
-
     Vect_DP args_to_SF(4);
     args_to_SF[0] = k;
     args_to_SF[1] = 0.0;  // integration variable
     args_to_SF[2] = req_prec;
     args_to_SF[3] = DP(max_rec);
 
-    //DP answer = Integrate_rec_using_table (SF_4p_kwKW, args_to_SF, 1, Itable, omegamin_used, omegamax_used, req_prec, max_rec, SF_outfile);
+
     // Version using omega = omegamin + (omegamax - omegamin) * (1-cos(alpha))
     DP answer = Integrate_rec_using_table (SF_4p_kwKW_alpha, args_to_SF, 1, Itable, 0.0, 0.5*PI, req_prec, max_rec_w, SFraw_outfile)/twoPI;
 
@@ -1405,7 +1274,6 @@ namespace ABACUS {
     SFsrc_outfile.close();
 
     // Translate raw data into SF_4p (k,omega) data
-
     Translate_raw_4p_data (k, int(pow(3.0, max_rec_w + 2)), SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
 
     return(answer);
@@ -1415,22 +1283,22 @@ namespace ABACUS {
   {
     stringstream SFraw_stringstream;
     string SFraw_string;
-    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".raw";
-    //SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.raw";
+    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_"
+		       << Npts_K << "_" << Npts_W << ".raw";
     SFraw_string = SFraw_stringstream.str();
     const char* SFraw_Cstr = SFraw_string.c_str();
 
     stringstream SF_stringstream;
     string SF_string;
-    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".dat";
-    //SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.dat";
+    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_"
+		    << Npts_K << "_" << Npts_W << ".dat";
     SF_string = SF_stringstream.str();
     const char* SF_Cstr = SF_string.c_str();
 
     stringstream SFsrc_stringstream;
     string SFsrc_string;
-    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".src";
-    //SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.src";
+    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_"
+		       << Npts_K << "_" << Npts_W << ".src";
     SFsrc_string = SFsrc_stringstream.str();
     const char* SFsrc_Cstr = SFsrc_string.c_str();
 
@@ -1439,9 +1307,6 @@ namespace ABACUS {
     ofstream SFsrc_outfile;
     SFsrc_outfile.open(SFsrc_Cstr);
 
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
-
     Vect_DP args_to_SF(5);
     args_to_SF[0] = k;
     args_to_SF[1] = 0.0;  // integration variable
@@ -1450,9 +1315,11 @@ namespace ABACUS {
     args_to_SF[4] = DP(Npts_W);
 
     // Version using omega = omegamin + (omegamax - omegamin) * (1-cos(alpha))
-    Integral_result answer = Integrate_optimal_using_table (SF_4p_kwKW_alpha_opt, args_to_SF, 1, Itable, 0.0, 0.5*PI, req_prec, 1.0e-32, Npts_w, SFraw_outfile);
+    Integral_result answer = Integrate_optimal_using_table (SF_4p_kwKW_alpha_opt, args_to_SF, 1,
+							    Itable, 0.0, 0.5*PI, req_prec, 1.0e-32, Npts_w, SFraw_outfile);
     // Version using omega = omegamin * cosh(alpha)
-    //Integral_result answer = Integrate_optimal_using_table (SF_4p_kwKW_cosh_alpha_opt, args_to_SF, 1, Itable, 0.0, acosh(wmax_4p(k)/wmin_4p(k)), req_prec, 1.0e-32, Npts_w, SFraw_outfile);
+    //Integral_result answer = Integrate_optimal_using_table (SF_4p_kwKW_cosh_alpha_opt, args_to_SF, 1, Itable, 0.0,
+    //acosh(wmax_4p(k)/wmin_4p(k)), req_prec, 1.0e-32, Npts_w, SFraw_outfile);
     answer.integ_est /= twoPI;
     answer.abs_prec /= twoPI;
 
@@ -1462,9 +1329,7 @@ namespace ABACUS {
     SFsrc_outfile.close();
 
     // Translate raw data into SF_4p (k,omega) data
-
     Translate_raw_4p_data (k, answer.n_vals, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
-    //Translate_raw_4p_data_cosh (k, answer.n_vals, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
 
     return(answer);
   }
@@ -1578,7 +1443,8 @@ namespace ABACUS {
 
     stringstream SF_stringstream;
     string SF_string;
-    SF_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec << "_max_rec_" << max_rec << ".dat";
+    SF_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec
+		    << "_max_rec_" << max_rec << ".dat";
     SF_string = SF_stringstream.str();
     const char* SF_Cstr = SF_string.c_str();
 
@@ -1623,7 +1489,7 @@ namespace ABACUS {
     stringstream SRC_stringstream;
     string SRC_string;
     SRC_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec
-		    << "_max_rec_" << max_rec << ".src";
+		     << "_max_rec_" << max_rec << ".src";
     SRC_string = SRC_stringstream.str();
     const char* SRC_Cstr = SRC_string.c_str();
 
@@ -1638,7 +1504,7 @@ namespace ABACUS {
     stringstream SR1_stringstream;
     string SR1_string;
     SR1_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec
-		    << "_max_rec_" << max_rec << ".sr1";
+		     << "_max_rec_" << max_rec << ".sr1";
     SR1_string = SR1_stringstream.str();
     const char* SR1_Cstr = SR1_string.c_str();
 
@@ -1714,7 +1580,7 @@ namespace ABACUS {
     stringstream SRC_stringstream;
     string SRC_string;
     SRC_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec
-		    << "_Npts_K_" << Npts_K << "_Npts_W_" << Npts_W << ".src";
+		     << "_Npts_K_" << Npts_K << "_Npts_W_" << Npts_W << ".src";
     SRC_string = SRC_stringstream.str();
     const char* SRC_Cstr = SRC_string.c_str();
 
@@ -1729,7 +1595,7 @@ namespace ABACUS {
     stringstream SR1_stringstream;
     string SR1_string;
     SR1_stringstream << "SF_4p_N_" << N << "_Nw_" << Nomega << "_wmax_" << omegamax << "_prec_" << req_prec
-		    << "_Npts_K_" << Npts_K << "_Npts_W_" << Npts_W << ".sr1";
+		     << "_Npts_K_" << Npts_K << "_Npts_W_" << Npts_W << ".sr1";
     SR1_string = SR1_stringstream.str();
     const char* SR1_Cstr = SR1_string.c_str();
 
@@ -1748,7 +1614,4 @@ namespace ABACUS {
   }
 
 
-
-
-
 } // namespace ABACUS
diff --git a/src/XXX_VOA/XXX_VOA_par.cc b/src/XXX_VOA/XXX_VOA_par.cc
index 1a0224a..dde0e0b 100755
--- a/src/XXX_VOA/XXX_VOA_par.cc
+++ b/src/XXX_VOA/XXX_VOA_par.cc
@@ -26,22 +26,22 @@ namespace ABACUS {
   {
     stringstream SFraw_stringstream;
     string SFraw_string;
-    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".raw";
-    //SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.raw";
+    SFraw_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K
+		       << "_" << Npts_W << ".raw";
     SFraw_string = SFraw_stringstream.str();
     const char* SFraw_Cstr = SFraw_string.c_str();
 
     stringstream SF_stringstream;
     string SF_string;
-    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".dat";
-    //SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.dat";
+    SF_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K
+		    << "_" << Npts_W << ".dat";
     SF_string = SF_stringstream.str();
     const char* SF_Cstr = SF_string.c_str();
 
     stringstream SFsrc_stringstream;
     string SFsrc_string;
-    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << ".src";
-    //SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K << "_" << Npts_W << "_ch.src";
+    SFsrc_stringstream << "SF_4p_k_" << k << "_prec_" << req_prec << "_Npts_" << Npts_w << "_" << Npts_K
+		       << "_" << Npts_W << ".src";
     SFsrc_string = SFsrc_stringstream.str();
     const char* SFsrc_Cstr = SFsrc_string.c_str();
 
@@ -50,8 +50,6 @@ namespace ABACUS {
     ofstream SFsrc_outfile;
     SFsrc_outfile.open(SFsrc_Cstr);
 
-    //DP omegamin_used = 0.5 * wmin_4p (k); // Correct for factor of 2 in E between me & Bougourzi
-    //DP omegamax_used = 0.5 * wmax_4p (k);
 
     Vect_DP args_to_SF(5);
     args_to_SF[0] = k;
@@ -73,7 +71,6 @@ namespace ABACUS {
     SFsrc_outfile.close();
 
     // Translate raw data into SF_4p (k,omega) data
-
     Translate_raw_4p_data (k, answer.n_vals, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
     //Translate_raw_4p_data_cosh (k, answer.n_vals, SFraw_Cstr, SF_Cstr, SFsrc_Cstr, Itable);
 
diff --git a/src/XXX_VOA/XXX_h0_v2.cc b/src/XXX_VOA/XXX_h0_v2.cc
index bdb886b..6c612f8 100755
--- a/src/XXX_VOA/XXX_h0_v2.cc
+++ b/src/XXX_VOA/XXX_h0_v2.cc
@@ -198,8 +198,6 @@ namespace ABACUS {
 	      omega = -0.5 * PI * (sinp0 + sinp1 + sinp2 + sin(p[3]));
 	      iomega = int(omega * Npts_o/omegamax);
 
-	      //cout << ik << "\t" << iomega << endl;
-
 	      J_fn_cont = J_fn (p, req_prec, Itable);
 	      sum_J_fn += (ik == Npts_p ? 1.0 : 2.0) * J_fn_cont;
 
diff --git a/src/XXZ_VOA/XXZ_VOA.cc b/src/XXZ_VOA/XXZ_VOA.cc
index a03e57a..9eb7177 100644
--- a/src/XXZ_VOA/XXZ_VOA.cc
+++ b/src/XXZ_VOA/XXZ_VOA.cc
@@ -67,7 +67,6 @@ namespace ABACUS {
 
     return(answer);
 
-    //return(sinh(args[0]*(1.0 + args[2])) * sinh(args[0]) * cos(4.0 * args[0] * args[1])/(args[0] * sinh(args[0] * args[2]) * pow(cosh(args[0]), 2.0)));
   }
 
   DP I_xi_11_integral (DP xi, DP rho, DP req_prec, int max_nr_pts, DP t1bar)
@@ -106,8 +105,8 @@ namespace ABACUS {
   }
 
   /*
-  inline DP Integrand_xi_2 (Vect_DP args)
-  {
+    inline DP Integrand_xi_2 (Vect_DP args)
+    {
     // This version is used for rho <= 1
 
     // args[0] corresponds to t, args[1] to rho, args[2] to xi
@@ -116,30 +115,34 @@ namespace ABACUS {
 
     if (args[0] * args[2] <= 1.0) {
 
-      if (args[0] <= 1.0) {
-	answer = sinh(args[0] * (args[2] + 1.0)) * pow(sin(2.0 * args[0] * args[1]), 2.0)
-	  /(args[0] * sinh(args[2] * args[0]) * sinh(args[0]) * pow(cosh(args[0]), 2.0));
-      }
+    if (args[0] <= 1.0) {
+    answer = sinh(args[0] * (args[2] + 1.0)) * pow(sin(2.0 * args[0] * args[1]), 2.0)
+    /(args[0] * sinh(args[2] * args[0]) * sinh(args[0]) * pow(cosh(args[0]), 2.0));
+    }
 
-      else if (args[0] >= 1.0) {
-	DP expm2t = exp(-2.0 * args[0]);
-	DP expm2txi = exp(-2.0*args[0]*args[2]);
-	//answer = 8.0 * expm2t * pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
-	answer = 8.0 * (1.0 - expm2t*expm2txi) * expm2t *
-	  pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2txi) * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
-      }
+    else if (args[0] >= 1.0) {
+    DP expm2t = exp(-2.0 * args[0]);
+    DP expm2txi = exp(-2.0*args[0]*args[2]);
+    //answer = 8.0 * expm2t * pow(sin(2.0 * args[0] * args[1]), 2.0)
+    /(args[0] * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
+    answer = 8.0 * (1.0 - expm2t*expm2txi) * expm2t *
+    pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2txi) * (1.0 - expm2t)
+    * (1.0 + expm2t) * (1.0 + expm2t));
+    }
 
     DP expm2t = exp(-2.0 * args[0]);
     DP expm2txi = exp(-2.0*args[0]*args[2]);
-    //answer = 8.0 * expm2t * pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
+    //answer = 8.0 * expm2t * pow(sin(2.0 * args[0] * args[1]), 2.0)
+    /(args[0] * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
     answer = 8.0 * (1.0 - expm2t*expm2txi) * expm2t *
-      pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2txi) * (1.0 - expm2t) * (1.0 + expm2t) * (1.0 + expm2t));
+    pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2txi) * (1.0 - expm2t)
+    * (1.0 + expm2t) * (1.0 + expm2t));
 
     return(answer);
 
-      ... NOT USED
+    ... NOT USED
 
-  }
+    }
   */
 
   inline DP Integrand_xi_22 (Vect_DP args)
@@ -153,7 +156,8 @@ namespace ABACUS {
     else if (args[0] >= 1.0) {
       DP expm2t = exp(-2.0 * args[0]);
       DP expm2txi = exp(-2.0*args[0]*args[2]);
-      answer = 4.0 * expm2t * (1.0 + expm2txi) * pow(sin(2.0 * args[0] * args[1]), 2.0)/(args[0] * (1.0 - expm2txi) * (1.0 + expm2t) * (1.0 + expm2t));
+      answer = 4.0 * expm2t * (1.0 + expm2txi) * pow(sin(2.0 * args[0] * args[1]), 2.0)
+	/(args[0] * (1.0 - expm2txi) * (1.0 + expm2t) * (1.0 + expm2t));
     }
     return(answer);
   }
@@ -170,9 +174,6 @@ namespace ABACUS {
 
     DP answer = 0.0;
 
-    //if (rho_used <= 1.0) answer = (Integrate_optimal (Integrand_xi_2, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts)).integ_est;
-
-    //else
     answer = PI * rho + log(0.5 * (1.0 + exp(-2.0 * PI * rho_used))) // This is I^{(21)}
       + (Integrate_optimal (Integrand_xi_22, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts)).integ_est;
 
@@ -201,22 +202,20 @@ namespace ABACUS {
   {
     // Careful !  This is S(k, omega) = S (k, w) |dw/domega| = 2 S(k, w)
 
-    DP w = 2.0 * omega;  // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
+    DP w = 2.0 * omega;
+    // Rescale energies by factor 2 because of definitions of H_XXX (omega:  S.S;  w: 0.5 * sigma.sigma = 2 S.S)
 
     DP vF = Fermi_velocity_XXZ_h0 (Delta); // in units of omega
 
-    //DP wu = twoPI * sin(0.5 * k);
     DP wu = 4.0 * vF * sin(0.5 * k);
-    //DP wl = PI * fabs(sin(k));
     DP wl = 2.0 * vF * fabs(sin(k));
 
     DP rho = acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI;
     DP xi = PI/acos(Delta) - 1.0;
     // Factor of 2:  return S(k, omega), not S(k, w)
-    // 0.25 factor:  1/4 * 2 * 1/2, where 1/4 comes from Bougourzi, 2 is the Jacobian |dw/domega| and 1/2 is S^{zz} = 1/2 * S^{+-}
-    //return(w < wu && w > wl ? 2.0 * 0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI))/sqrt(wu * wu - w * w) : 0.0);
+    // 0.25 factor:  1/4 * 2 * 1/2, where 1/4 comes from Bougourzi, 2 is the Jacobian |dw/domega|
+    // and 1/2 is S^{zz} = 1/2 * S^{+-}
     DP expmtwoPIrhooverxi = exp(-twoPI * rho/xi);
-    //return(w < wu && w > wl ? 2.0 * exp(-Itable.Return_val (rho))/(sqrt(wu * wu - w * w) * (cosh(twoPI * rho/xi) + cos(PI/xi))) : 0.0);
     return(w < wu && w > wl ? 2.0 * pow(1.0 + 1.0/xi, 2.0) * exp(-Itable.Return_val (rho))
 	   * expmtwoPIrhooverxi/(sqrt(wu * wu - w * w)
 				 * (0.5 * (1.0 + expmtwoPIrhooverxi * expmtwoPIrhooverxi) + expmtwoPIrhooverxi * cos(PI/xi)))
@@ -236,30 +235,6 @@ namespace ABACUS {
 
     return(Szz_XXZ_h0_2spinons (Delta, args[0], args[1], Itable));
 
-    /*
-
-    // This uses args[0] = k, args[1] = w, args[2] = xi
-
-    DP Delta = cos(PI/(args[2] + 1.0));
-
-    DP vF = Fermi_velocity_XXZ_h0 (Delta); // in units of omega
-
-    //DP wu = twoPI * sin(0.5 * k);
-    DP wu = 4.0 * vF * sin(0.5 * args[0]);
-    //DP wl = PI * fabs(sin(k));
-    DP wl = 2.0 * vF * fabs(sin(args[0]));
-
-    DP rho = acosh(sqrt((wu * wu - wl * wl)/(args[1] * args[1] - wl * wl)))/PI;
-    DP expmtwoPIrhooverxi = exp(-twoPI * rho/args[2]);
-
-    // 0.5 factor:  1 from Bougourzi, and 1/2 is S^{zz} = 1/2 * S^{+-}
-    return(args[1] < wu && args[1] > wl ?
-	   //0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(args[1] * args[1] - wl * wl)))/PI))/sqrt(wu * wu - args[1] * args[1]) : 0.0);
-	   pow(1.0 + 1.0/args[2], 2.0) * exp(-Itable.Return_val (rho)) * expmtwoPIrhooverxi
-	   /(sqrt(wu * wu - args[1] * args[1])
-	     * (0.5 * (1.0 + expmtwoPIrhooverxi * expmtwoPIrhooverxi) + expmtwoPIrhooverxi * cos(PI/args[2]))) : 0.0);
-
-    */
   }
 
   DP Szz_XXZ_h0_2spinons_alt (Vect_DP args, Integral_table Itable)
@@ -283,30 +258,6 @@ namespace ABACUS {
     DP Jacobian = sqrt(omega * omega - omegalow * omegalow);
 
     return(Jacobian * Szz_XXZ_h0_2spinons (Delta, args[0], omega, Itable));
-
-    /*
-    //DP wu = twoPI * sin(0.5 * k);
-    DP wu = 4.0 * vF * sin(0.5 * args[0]);
-    //DP wl = PI * fabs(sin(k));
-    DP wl = 2.0 * vF * fabs(sin(args[0]));
-
-    //DP w = wu * cos(args[1]);
-    //DP factor = 1.0;
-    DP w = wl * cosh(args[1]);
-
-    if (w >= wu || w <= wl) return(0.0);
-
-    DP factor = sqrt((w * w - wl * wl)/(wu * wu - w * w));
-
-    DP rho = acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI;
-    DP expmtwoPIrhooverxi = exp(-twoPI * rho/args[2]);
-
-    // 0.5 factor:  1 from Bougourzi, and 1/2 is S^{zz} = 1/2 * S^{+-}
-    //return(factor * 0.5 * exp(-Itable.Return_val (acosh(sqrt((wu * wu - wl * wl)/(w * w - wl * wl)))/PI)));
-    return(pow(1.0 + 1.0/args[2], 2.0) * factor
-	   * exp(-Itable.Return_val (rho)) * expmtwoPIrhooverxi/(0.5 * (1.0 + expmtwoPIrhooverxi * expmtwoPIrhooverxi) + expmtwoPIrhooverxi * cos(PI/args[2])));
-	   */
-
   }
 
   DP Szz_XXZ_h0_2spinons_omega (Vect_DP args, Integral_table Itable)
@@ -450,7 +401,8 @@ namespace ABACUS {
     args[0] = 0.0;
     args[1] = xi;
 
-    return(-0.25 * sqrt(1.0 - Delta * Delta) * (2.0/acos(Delta)) * 2.0 * (Integrate_optimal (Integrand_GSE_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est));
+    return(-0.25 * sqrt(1.0 - Delta * Delta) * (2.0/acos(Delta)) * 2.0
+	   * (Integrate_optimal (Integrand_GSE_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est));
   }
 
   DP Integrand_2_fSR_XXZ_h0 (Vect_DP args)
@@ -474,8 +426,10 @@ namespace ABACUS {
     args[0] = 0.0;
     args[1] = xi;
 
-    DP Xavg = -(0.25/(acos(Delta) * sqrt(1.0 - Delta * Delta))) * 2.0 * (Integrate_optimal (Integrand_GSE_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est)
-      + 0.25 * (Delta/pow(acos(Delta), 2.0)) * 2.0 * (Integrate_optimal (Integrand_2_fSR_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est);
+    DP Xavg = -(0.25/(acos(Delta) * sqrt(1.0 - Delta * Delta))) * 2.0
+      * (Integrate_optimal (Integrand_GSE_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est)
+      + 0.25 * (Delta/pow(acos(Delta), 2.0)) * 2.0
+      * (Integrate_optimal (Integrand_2_fSR_XXZ_h0, args, 0, 0.0, tinf, req_prec, req_prec, max_nr_pts).integ_est);
 
     return (-2.0 * (1.0 - cos(k)) * Xavg);
   }
@@ -495,7 +449,9 @@ namespace ABACUS {
     args_to_SF_2p[2] = PI/acos(Delta) - 1.0;
     args_to_SF_2p[3] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    return(((Integrate_optimal_using_table (Szz_XXZ_h0_2spinons_omega, args_to_SF_2p, 1, Itable, omegalow, omegaup, req_prec, req_prec, max_nr_pts)).integ_est/twoPI)/Fixed_k_sumrule_omega_Szz_XXZ_h0(Delta, k, req_prec, max_nr_pts));
+    return(((Integrate_optimal_using_table (Szz_XXZ_h0_2spinons_omega, args_to_SF_2p, 1, Itable,
+					    omegalow, omegaup, req_prec, req_prec, max_nr_pts)).integ_est/twoPI)
+	   /Fixed_k_sumrule_omega_Szz_XXZ_h0(Delta, k, req_prec, max_nr_pts));
   }
 
   DP Szz_XXZ_h0_2spinons_check_fixed_k_Szz_sumrule_alt (DP Delta, DP k, DP req_prec, int max_nr_pts, Integral_table Itable)
@@ -513,7 +469,9 @@ namespace ABACUS {
     args_to_SF_2p[2] = PI/acos(Delta) - 1.0;
     args_to_SF_2p[3] = ABACUS::max(1.0e-14, 0.01 * req_prec);
 
-    return(((Integrate_optimal_using_table (Szz_XXZ_h0_2spinons_omega_alt, args_to_SF_2p, 1, Itable, 0.0, acosh(omegaup/omegalow), req_prec, req_prec, max_nr_pts)).integ_est/twoPI)/Fixed_k_sumrule_omega_Szz_XXZ_h0(Delta, k, req_prec, max_nr_pts));
+    return(((Integrate_optimal_using_table (Szz_XXZ_h0_2spinons_omega_alt, args_to_SF_2p, 1, Itable,
+					    0.0, acosh(omegaup/omegalow), req_prec, req_prec, max_nr_pts)).integ_est/twoPI)
+	   /Fixed_k_sumrule_omega_Szz_XXZ_h0(Delta, k, req_prec, max_nr_pts));
   }
 
 
@@ -527,7 +485,8 @@ namespace ABACUS {
 
     stringstream SF_stringstream;
     string SF_string;
-    SF_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega << "_ommax_" << omegamax << ".dat";
+    SF_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega
+		    << "_ommax_" << omegamax << ".dat";
     SF_string = SF_stringstream.str();
     const char* SF_Cstr = SF_string.c_str();
 
@@ -571,7 +530,8 @@ namespace ABACUS {
 
     stringstream SRC_stringstream;
     string SRC_string;
-    SRC_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega << "_ommax_" << omegamax << ".src";
+    SRC_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega
+		     << "_ommax_" << omegamax << ".src";
     SRC_string = SRC_stringstream.str();
     const char* SRC_Cstr = SRC_string.c_str();
 
@@ -585,7 +545,8 @@ namespace ABACUS {
 
     stringstream SR1_stringstream;
     string SR1_string;
-    SR1_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega << "_ommax_" << omegamax << ".sr1";
+    SR1_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_N_" << N << "_Nom_" << Nomega
+		     << "_ommax_" << omegamax << ".sr1";
     SR1_string = SR1_stringstream.str();
     const char* SR1_Cstr = SR1_string.c_str();
 
@@ -606,9 +567,7 @@ namespace ABACUS {
 
     for (int iK = 1; iK < dim_K; ++iK)
       SR1_outfile << iK << "\t" << K[iK] << "\t" << sr1[iK] * omegamax/(twoPI * Nomega)
-	//<< "\t" << -((1.0 - cos(K[iK])) * 2.0 * (0.25 - log(2.0))/3.0) << "\t"
 		  << "\t" << (1.0 - cos(K[iK])) * sr1factor << "\t"
-	//<< -sr1[iK] * omegamax/(twoPI * Nomega)/((1.0 - cos(K[iK])) * 2.0 * (0.25 - log(2.0))/3.0) << endl;
 		  << sr1[iK] * omegamax/(twoPI * Nomega)/((1.0 - cos(K[iK])) * sr1factor) << endl;
 
     SR1_outfile.close();
@@ -636,7 +595,9 @@ namespace ABACUS {
     DP omegaup = 2.0 * vF * sin(0.5 * K);
     DP omegalow = vF * fabs(sin(K));
 
-    for (int iw = 0; iw < Nomega; ++iw) omega[iw] = 0.99 * omegalow + (1.01 * omegaup - 0.99 * omegalow) * (iw + 0.5)/Nomega; // factor of 1.01: just to have zeroes on either side of continuum.
+    for (int iw = 0; iw < Nomega; ++iw)
+      omega[iw] = 0.99 * omegalow + (1.01 * omegaup - 0.99 * omegalow) * (iw + 0.5)/Nomega;
+    // factor of 1.01: just to have zeroes on either side of continuum.
 
     DP* SF_XXZ_2p_dat = new DP[Nomega];
 
@@ -658,41 +619,8 @@ namespace ABACUS {
 
     SF_outfile.close();
 
-    // Do sum rule files:
-    /*
-      NOT ACCURATE SINCE WE'RE PLOTTING SOMEWHAT OUTSIDE OF CONTINUUM AS WELL (to get nice figures)
-    stringstream SR1_stringstream;
-    string SR1_string;
-    SR1_stringstream << "Szz_XXZ_h0_2spinons_Delta_" << Delta << "_Kover2PI_" << Kover2PI
-		     << "_Nom_" << Nomega << ".sr1";
-    SR1_string = SR1_stringstream.str();
-    const char* SR1_Cstr = SR1_string.c_str();
-
-    ofstream SR1_outfile;
-    SR1_outfile.open(SR1_Cstr);
-    SR1_outfile.precision(14);
-
-    // Figure out the f-sumrule factor:
-    int Nfsr = 600;
-    int Mfsr = 300;
-    DP Xavg = 0.0;
-    if (Delta > 0.0) Xavg = X_avg ('x', Delta, Nfsr, Mfsr);
-    else Xavg = 0.0;
-
-    DP sr1factor = 1.0;
-    if (Delta > 0.0) sr1factor = -2.0 * Xavg/Nfsr;
-    else sr1factor = 1.0;
-
-    SR1_outfile << Kover2PI << "\t" << sr1 * (omegaup - omegalow)/(twoPI * Nomega)
-		<< "\t" << (1.0 - cos(K)) * sr1factor << "\t"
-		<< sr1 * (omegaup - omegalow)/(twoPI * Nomega)/((1.0 - cos(K)) * sr1factor) << endl;
-
-    SR1_outfile.close();
-    */
-
     return;
   }
 
 
-
 } // namespace ABACUS
diff --git a/src/YOUNG/Young_Tableau.cc b/src/YOUNG/Young_Tableau.cc
index 208d834..94f96a4 100644
--- a/src/YOUNG/Young_Tableau.cc
+++ b/src/YOUNG/Young_Tableau.cc
@@ -20,7 +20,8 @@ using namespace std;
 namespace ABACUS {
 
   Young_Tableau::Young_Tableau () : Nrows(0), Ncols(0), Row_L(0), Col_L(0), id(0LL), maxid(0LL),
-				    map(new long long int [1]), map_computed(false), idnr_reached(0LL), nboxes_reached(-1),
+				    map(new long long int [1]), map_computed(false),
+				    idnr_reached(0LL), nboxes_reached(-1),
 				    dimchoose(0), choose_table(0LL)
   {}
 
@@ -47,32 +48,11 @@ namespace ABACUS {
       }
 
     // Fill map with zeros:
-    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i) map[i] = 0LL;
+    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i)
+      map[i] = 0LL;
 
   }
-  /*  SEGFAULTS
-  Young_Tableau::Young_Tableau (int Nr, int Nc, long long int idnr)
-    : Nrows(Nr), Ncols(Nc), Row_L(new int[Nrows]), Col_L(new int[Ncols]), id(idnr),
-      maxid(choose_lli(Nr + Nc, Nc) - 1LL),
-      map(new long long int [YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1]),
-      map_computed(false), idnr_reached(0LL), nboxes_reached(-1),
-      dimchoose (ABACUS::min(Nr, Nc) + 1),
-      choose_table(new long long int[(Nr + Nc + 1) * dimchoose])
-  {
-    // Constructs Young tableau of given idnr, if consistent with Nr, Nc.
 
-    // Construct the choose_table
-    for (int cti = 0; cti < Nr + Nc + 1; ++cti)
-      for (int ctj = 0; ctj < dimchoose; ++ctj) {
-	if (cti >= ctj) choose_table[dimchoose * cti + ctj] = choose_lli(cti, ctj);
-	else choose_table[dimchoose * cti + ctj] = 0LL;
-      }
-
-    for (int i = 0; i < YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1; ++i) map[i] = 0LL;
-
-    (*this).Set_to_id(idnr);
-  }
-  */
   Young_Tableau::Young_Tableau (const Young_Tableau& RefTableau)  // copy constructor
     : Nrows(RefTableau.Nrows), Ncols(RefTableau.Ncols), Row_L(new int[RefTableau.Nrows]), Col_L(new int[RefTableau.Ncols]),
       id(RefTableau.id), maxid(RefTableau.maxid),
@@ -93,47 +73,11 @@ namespace ABACUS {
       }
 
     // The map:
-    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i) map[i] = RefTableau.map[i];
+    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i)
+      map[i] = RefTableau.map[i];
 
   }
-  /*
-  Young_Tableau::Young_Tableau (int Nr, int Nc, long long int* ref_choose_table, int dimref)
-    : Nrows(Nr), Ncols(Nc), Row_L(new int[Nrows]), Col_L(new int[Ncols]), id(0LL),
-      maxid(choose_lli(Nr + Nc, Nc) - 1LL),
-      //choose_table(new long long int[(Nr + Nc + 1) * (Nr + Nc + 1)]),
-      choose_table(new long long int[(Nr + Nc + 1) * (ABACUS::min(Nr, Nc) + 1)]),
-      //map(new long long int[ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT)]),
-      map(new long long int[YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1]),
-      map_computed(false), idnr_reached(0LL), nboxes_reached(-1)
 
-  {
-    // Constructs empty tableau of appropriate size
-
-    for (int i = 0; i < Nrows; ++i) Row_L[i] = 0;
-    for (int i = 0; i < Ncols; ++i) Col_L[i] = 0;
-
-    // Construct the choose_table
-
-    // Copy entries from reference table
-    for (int cti = 0; cti < ABACUS::min(Nr + Nc + 1, dimref); ++cti)
-      for (int ctj = 0; ctj < ABACUS::min(Nr + Nc + 1, dimref); ++ctj)
-	  choose_table[(Nr + Nc + 1) * cti + ctj] = cti >= ctj ? ref_choose_table[dimref * cti + ctj] : 0LL;
-
-    // add missing parts if there are any
-    if (dimref < Nr + Nc + 1) {
-      for (int cti = 0; cti < Nr + Nc + 1; ++cti)
-	for (int ctj = dimref; ctj < Nr + Nc + 1; ++ctj)
-	  choose_table[(Nr + Nc + 1) * cti + ctj] = 0LL;
-      for (int cti = dimref; cti < Nr + Nc + 1; ++cti)
-	for (int ctj = 0; ctj < Nr + Nc + 1; ++ctj)
-	  choose_table[(Nr + Nc + 1) * cti + ctj] = cti >= ctj ? choose_lli(cti, ctj) : 0LL;
-    }
-
-    // The map:
-    //for (int i = 0; i < ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT); ++i) map[i] = 0LL;
-    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i) map[i] = 0LL;
-  }
-  */
   Young_Tableau::Young_Tableau (int Nr, int Nc, const Young_Tableau& RefTableau)
     : Nrows(Nr), Ncols(Nc), Row_L(new int[Nrows]), Col_L(new int[Ncols]), id(0LL),
       maxid(choose_lli(Nr + Nc, Nc) - 1LL),
@@ -163,7 +107,8 @@ namespace ABACUS {
 	  choose_table[dimchoose * cti + ctj] = cti >= ctj ? choose_lli(cti, ctj) : 0LL;
 
     // The map:
-    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i) map[i] = 0LL;
+    for (int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i)
+      map[i] = 0LL;
   }
 
   Young_Tableau& Young_Tableau::operator= (const Young_Tableau& RefTableau)
@@ -190,7 +135,9 @@ namespace ABACUS {
 
       if (map != 0LL) delete[] map;
       map = new long long int[YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1];
-      for (long long int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2 ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i) map[i] = RefTableau.map[i];
+      for (long long int i = 0; i < (YOUNG_TABLEAU_ID_OPTION == 2
+				     ? ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT) : 1); ++i)
+	map[i] = RefTableau.map[i];
       map_computed = RefTableau.map_computed;
       idnr_reached = RefTableau.idnr_reached;
       nboxes_reached = RefTableau.nboxes_reached;
@@ -263,7 +210,8 @@ namespace ABACUS {
     }
 
     else if (nboxes_to_dist > Ncols * (Nrows - level)) {
-      cout << Nrows << "\t" << Ncols << "\t" << level << "\t" << nboxes_to_dist << "\t" << idnr_reached << "\t" << nboxes_reached << endl;
+      cout << Nrows << "\t" << Ncols << "\t" << level << "\t" << nboxes_to_dist << "\t"
+	   << idnr_reached << "\t" << nboxes_reached << endl;
       ABACUSerror("nboxes_to_dist too high");
     }
     else if (nboxes_to_dist == 0) {
@@ -309,10 +257,11 @@ namespace ABACUS {
 					    + ABACUS::min(highest_occupied_row, Ncols_Desc - j)];
 
 	Vect_INT Desc_Desc_Row_L(highest_occupied_row);
-	for (int i = 0; i < highest_occupied_row; ++i) Desc_Desc_Row_L[i] = Desc_Row_L[i] - Desc_Row_L[highest_occupied_row];
+	for (int i = 0; i < highest_occupied_row; ++i)
+	  Desc_Desc_Row_L[i] = Desc_Row_L[i] - Desc_Row_L[highest_occupied_row];
 
-	answer += Compute_Descendent_id (0, Desc_Desc_Row_L, highest_occupied_row, Ncols_Desc - Desc_Row_L[highest_occupied_row],
-					 RefTableau);
+	answer += Compute_Descendent_id (0, Desc_Desc_Row_L, highest_occupied_row,
+					 Ncols_Desc - Desc_Row_L[highest_occupied_row], RefTableau);
 
       }
     }
@@ -338,7 +287,7 @@ namespace ABACUS {
 	else {
 
 	  for (int j = 0; j < ndiag; ++j) answer += RefTableau.choose_table[RefTableau.dimchoose * Nrows_Desc + j]
-	    * RefTableau.choose_table[RefTableau.dimchoose * Ncols_Desc + j];
+					    * RefTableau.choose_table[RefTableau.dimchoose * Ncols_Desc + j];
 
 	  Vect_INT Desc1_Row_L(ndiag);
 	  for (int i = 0; i < ndiag; ++i) Desc1_Row_L[i] = Desc_Row_L[i] - ndiag;
@@ -381,8 +330,9 @@ namespace ABACUS {
 	int highest_occupied_row = Nrows - 1;
 	while (Row_L[highest_occupied_row] == 0) highest_occupied_row--;  // index of highest occupied row;
 
-	for (int j = 0; j < Row_L[highest_occupied_row]; ++j) idnr += choose_table[dimchoose * (highest_occupied_row + Ncols - j)
-										   + ABACUS::min(highest_occupied_row, Ncols - j)];
+	for (int j = 0; j < Row_L[highest_occupied_row]; ++j)
+	  idnr += choose_table[dimchoose * (highest_occupied_row + Ncols - j)
+			       + ABACUS::min(highest_occupied_row, Ncols - j)];
 
 	Vect_INT Desc_Row_L(highest_occupied_row);
 
@@ -432,7 +382,8 @@ namespace ABACUS {
 
       Compute_id (0);  // sets the id according to rule 0
       Compute_Map (idnr);  // make sure the state map is computed
-      while (map[idnr] != id && idnr < ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT)) idnr++;  // match with inverse map to get the idnr according to rule 2
+      while (map[idnr] != id && idnr < ABACUS::min(maxid + 1LL, TABLEAU_ID_UPPER_LIMIT))
+	idnr++;  // match with inverse map to get the idnr according to rule 2
     }
 
     else ABACUSerror("Wrong option for Tableau ids");
@@ -448,13 +399,15 @@ namespace ABACUS {
     return(*this);
   }
 
-  Young_Tableau& Young_Tableau::Set_to_id (long long int idnr, int option) // sets the tableau to the one corresponding to idnr
+  Young_Tableau& Young_Tableau::Set_to_id (long long int idnr, int option)
+  // sets the tableau to the one corresponding to idnr
   {
 
     if (option == 0) {
 
       if ((idnr < 0) || ((maxid < idnr) && (Nrows*Ncols != 0))) {
-	cout << "Nrows = " << Nrows << "\tNcols = " << Ncols << "\tmaxid = " << maxid << "\trequested id = " << idnr << endl;
+	cout << "Nrows = " << Nrows << "\tNcols = " << Ncols
+	     << "\tmaxid = " << maxid << "\trequested id = " << idnr << endl;
 	ABACUSerror("Wrong idnr in Set_to_id for Young Tableau.");
       }
       id = idnr;
@@ -489,7 +442,8 @@ namespace ABACUS {
 
     else if (option == 1) {
 
-      if ((idnr < 0LL) || ((maxid < idnr) && (Nrows*Ncols != 0))) ABACUSerror("Wrong idnr in Set_to_id for Young Tableau.");
+      if ((idnr < 0LL) || ((maxid < idnr) && (Nrows*Ncols != 0)))
+	ABACUSerror("Wrong idnr in Set_to_id for Young Tableau.");
 
       if (Nrows*Ncols == 0 && idnr != 0LL) ABACUSerror("Trying nonzero id on empty Tableau.");
 
@@ -508,7 +462,8 @@ namespace ABACUS {
 	  sum += choose_table[dimchoose * Nrows + ndiag] * choose_table[dimchoose * Ncols + ndiag];
 	}
 
-	long long int residual_id = idnr - 1 - sum + choose_table[dimchoose * Nrows + ndiag] * choose_table[dimchoose * Ncols + ndiag];
+	long long int residual_id = idnr - 1 - sum + choose_table[dimchoose * Nrows + ndiag]
+	  * choose_table[dimchoose * Ncols + ndiag];
 
 	if (ndiag == 0 && idnr != 0LL) ABACUSerror("Zero ndiag for nonzero idnr in Tableau.");
 
@@ -561,9 +516,12 @@ namespace ABACUS {
 
   Young_Tableau& Young_Tableau::Set_Row_L (Vect_INT& Row_Lengths)  // set row lengths to elements of given vector
   {
-    if (Row_Lengths.size() != Nrows) ABACUSerror("Vector of incompatible dimension used to initialize Young Tableau.");
+    if (Row_Lengths.size() != Nrows)
+      ABACUSerror("Vector of incompatible dimension used to initialize Young Tableau.");
 
-    for (int i = 0; i < Row_Lengths.size() - 1; ++i) if (Row_Lengths[i] < Row_Lengths[i+1]) ABACUSerror("Vector is not a proper Young tableau.");
+    for (int i = 0; i < Row_Lengths.size() - 1; ++i)
+      if (Row_Lengths[i] < Row_Lengths[i+1])
+	ABACUSerror("Vector is not a proper Young tableau.");
 
     for (int i = 0; i < Nrows; ++i) Row_L[i] = Row_Lengths[i];
     (*this).Set_Col_L_given_Row_L();
@@ -663,8 +621,6 @@ namespace ABACUS {
   {
     // adds a box to the lowest nonzero length Row, recomputes id, returns true if tableau has changed
 
-    //cout << "Check before: "; (*this).Print();
-
     if (id == 0LL || Nrows == 0 || Ncols == 0) return(false);  // Tableau is empty
 
     // otherwise find the lowest nonzero row:
@@ -674,13 +630,12 @@ namespace ABACUS {
     if (iln0r < 0) ABACUSerror("id wrongly set in Young_Tableau (Raise_Lowest_Nonzero_Row).");
     // This should not happen, since if iln0r == -1, id should be 0.
 
-    else if (iln0r == 0 && Row_L[0] < Ncols || iln0r > 0 && Row_L[iln0r - 1] > Row_L[iln0r]) {  // there is space for at least one more box !
+    else if (iln0r == 0 && Row_L[0] < Ncols || iln0r > 0 && Row_L[iln0r - 1] > Row_L[iln0r]) {
+      // there is space for at least one more box !
       Row_L[iln0r] += 1;
       Set_Col_L_given_Row_L();
       Compute_id();
 
-      //cout << "Check after: iln0r = " << iln0r; (*this).Print();
-
       return(true);
     }
 
@@ -693,28 +648,20 @@ namespace ABACUS {
 
     // Important:  allow raising first row if tableau is empty.
 
-    //cout << "Check before: "; (*this).Print();
-
     if (Ncols == 0 || Nrows == 0) return(false); // no space !
 
     // Find index of lowest nonzero row:  can be -1 if Tableau is empty
     int iln0r = Nrows - 1;
     while (Row_L[iln0r] == 0 && iln0r >= 0) iln0r--;
 
-    //cout << "iln0r = " << iln0r << "\t" << Row_L[iln0r] << "\t" << Row_L[iln0r + 1] << endl;
-
-    //if (iln0r == Nrows - 1) return(false); // no row under that one;  allow raising of row 0
-
     if (iln0r == -1 && Row_L[0] < Ncols || iln0r >= 0 && iln0r < Nrows - 1 && Row_L[iln0r] > Row_L[iln0r + 1]) {
-	// there is space for at least one more box !
-	Row_L[iln0r + 1] += 1;
-	Set_Col_L_given_Row_L();
-	Compute_id();
+      // there is space for at least one more box !
+      Row_L[iln0r + 1] += 1;
+      Set_Col_L_given_Row_L();
+      Compute_id();
 
-	//cout << "Check after: iln0r = " << iln0r; (*this).Print();
-
-	return(true);
-      }
+      return(true);
+    }
 
     return(false);
   }
@@ -836,8 +783,6 @@ namespace ABACUS {
     // descendents considered here are thus those for which the raised
     // box is the highest still occupied box of the originally boosted state.
 
-    //cout << "Tableau in Desc_Boosted: " << (*this) << endl;
-
     int ndesc = 0;
 
     // Is tableau non-empty ?
@@ -860,19 +805,17 @@ namespace ABACUS {
     if (!fixed_Nboxes) {
       // The convention here is that we *remove* the highest yet unraised box only
 
-      //cout << "Removing box from " << (*this) << " with id " << (*this).id << endl;
       Young_Tableau descendent_attempt = (*this);
       if (descendent_attempt.Lower_Row(level_from)) ndesc = 1;
-      //cout << "Obtained: " << descendent_attempt << " with id " << descendent_attempt.id << endl;
 
       //if (ndesc > 0) {
       if (ndesc == 1) {
 	Vect<Young_Tableau> Tableau_desc(ndesc);
-	//if (ndesc == 1) Tableau_desc[0] = descendent_attempt;
 	Tableau_desc[0] = descendent_attempt;
 	return(Tableau_desc);
       }
-      else if (ndesc != 0) ABACUSerror("There should be either 0 or 1 descendents in Descended_Boosted_State with fixed_iK == true.");
+      else if (ndesc != 0)
+	ABACUSerror("There should be either 0 or 1 descendents in Descended_Boosted_State with fixed_iK == true.");
 
     } // if (!fixed_Nboxes)
 
@@ -886,10 +829,12 @@ namespace ABACUS {
       Young_Tableau Tableau_ref = (*this);
       Young_Tableau Tableau_check1 = (*this);
       bool check1 = (Tableau_check1.Lower_Row(level_from) && Tableau_check1.Raise_Lowest_Nonzero_Row());
-      if (check1 && Tableau_check1.Row_L[level_from] == Tableau_ref.Row_L[level_from] - 1) ndesc++;  // to make sure we don't Raise the one we've just removed
+      if (check1 && Tableau_check1.Row_L[level_from] == Tableau_ref.Row_L[level_from] - 1)
+	ndesc++;  // to make sure we don't Raise the one we've just removed
       Young_Tableau Tableau_check2 = (*this);
       bool check2 = (Tableau_check2.Lower_Row(level_from) && Tableau_check2.Raise_Next_to_Lowest_Nonzero_Row());
-      if (check2 && Tableau_check2.Row_L[level_from] == Tableau_ref.Row_L[level_from] - 1) ndesc++;   // to make sure we don't Raise the one we've just removed
+      if (check2 && Tableau_check2.Row_L[level_from] == Tableau_ref.Row_L[level_from] - 1)
+	ndesc++;   // to make sure we don't Raise the one we've just removed
 
       if (ndesc > 0) {
 	Vect<Young_Tableau> Tableau_desc(ndesc);
@@ -908,8 +853,6 @@ namespace ABACUS {
     // tries to add Nboxes to Tableau, returns number of boxes added.
     if (Ncols == 0 || Nrows == 0) return(0); // can't do anything !
 
-    //cout << "Requesting Nboxes " << Nboxes << " in tableau." << endl;
-
     int Nboxes_added = 0;
     int previous_Row_L = 0;
     for (int working_level = 0; working_level < Nrows; ++working_level) {