Merge [TRAFODION-2802] PR 1353 Prepare build environment in one command
diff --git a/DISCLAIMER b/DISCLAIMER
deleted file mode 100644
index 7f1f194..0000000
--- a/DISCLAIMER
+++ /dev/null
@@ -1,12 +0,0 @@
-Apache Trafodion is an effort undergoing incubation at The Apache Software 
-Foundation (ASF), sponsored by the Apache Incubator PMC. Incubation is 
-required of all newly accepted projects until a further review indicates that 
-the infrastructure, communications, and decision making process have stabilized
-in a manner consistent with other successful ASF projects. While incubation 
-status is not necessarily a reflection of the completeness or stability of the
-code, it does indicate that the project has yet to be fully endorsed by the ASF.
-
-For more information about the incubation status of the Apache Trafodion see:
-   http://incubator.apache.org/projects/trafodion.html
-
-
diff --git a/Makefile b/Makefile
index c69e51e..3a67ccb 100644
--- a/Makefile
+++ b/Makefile
@@ -36,15 +36,15 @@
 	@echo "Packaging all Trafodion components"
 	cd core && $(MAKE) package-all 
 
-package-src: $(SRCDIR)-${TRAFODION_VER}-incubating/LICENSE
+package-src: $(SRCDIR)-${TRAFODION_VER}/LICENSE
 	@echo "Packaging source for $(TRAFODION_VER_PROD) $(TRAFODION_VER)"
 	mkdir -p distribution
-	git archive --format tar --prefix $(SRCDIR)-${TRAFODION_VER}-incubating/ HEAD > distribution/$(SRCDIR)-${TRAFODION_VER}-incubating-src.tar
-	tar rf distribution/$(SRCDIR)-${TRAFODION_VER}-incubating-src.tar $^
-	gzip distribution/$(SRCDIR)-${TRAFODION_VER}-incubating-src.tar
-	rm -rf $(SRCDIR)-${TRAFODION_VER}-incubating LICENSE
+	git archive --format tar --prefix $(SRCDIR)-${TRAFODION_VER}/ HEAD > distribution/$(SRCDIR)-${TRAFODION_VER}-src.tar
+	tar rf distribution/$(SRCDIR)-${TRAFODION_VER}-src.tar $^
+	gzip distribution/$(SRCDIR)-${TRAFODION_VER}-src.tar
+	rm -rf $(SRCDIR)-${TRAFODION_VER} LICENSE
 
-$(SRCDIR)-${TRAFODION_VER}-incubating/LICENSE:
+$(SRCDIR)-${TRAFODION_VER}/LICENSE:
 	cd licenses && $(MAKE) LICENSE-src
 	mkdir -p $(@D)
 	cp licenses/LICENSE-src $@
@@ -57,7 +57,7 @@
 	@echo "Removing Trafodion objects"
 	cd core && $(MAKE) clean 
 	cd licenses && $(MAKE) clean
-	rm -rf $(SRCDIR)-${TRAFODION_VER}-incubating LICENSE
+	rm -rf $(SRCDIR)-${TRAFODION_VER} LICENSE
 
 cleanall:
 	@echo "Removing all Trafodion objects"
diff --git a/README b/README
index 8351595..ce58dc5 100644
--- a/README
+++ b/README
@@ -1,4 +1,4 @@
-******************************* Apache Trafodion (incubating) **************************
+******************************* Apache Trafodion **************************
 
 Apache Trafodion is a webscale SQL-on-Hadoop solution enabling transactional or 
 operational workloads on Hadoop.  The name "Trafodion" (the Welsh word for 
@@ -13,19 +13,19 @@
 
   - documentation for this release can be viewed on the Apache Trafodion 
     wiki page: 
-      http://trafodion.incubator.apache.org/documentation.html
+      http://trafodion.apache.org/documentation.html
 
   - release notes describing changes for particular releases can be viewed:
       http://trafodion.apache.org/release-notes.html
 
-  - the latest Apache Trafodion can be downloaded from Apache incubator
+  - the latest Apache Trafodion can be downloaded from the Apache
     distribution site or accessed from git
 
     - download site:
-      https://dist.apache.org/repos/dist/release/incubator/trafodion
+      https://dist.apache.org/repos/dist/release/trafodion
 
     - git site:
-      git@github.com:apache/incubator-trafodion  
+      git@github.com:apache/trafodion  
 
   - To build and try out Apache Trafodion, please following our build 
     instructions as described:
diff --git a/README.md b/README.md
index 3b93d2c..9e72337 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-# Apache Trafodion (incubating)
+# Apache Trafodion
 ![trafodion logo](http://trafodion.apache.org/images/logos/trafodion-logo.jpg)
 
 Apache Trafodion is a webscale SQL-on-Hadoop solution enabling transactional or 
@@ -20,14 +20,14 @@
   
   http://trafodion.apache.org/release-notes.html
 
-  - the latest Apache Trafodion can be downloaded from Apache incubator
+  - the latest Apache Trafodion can be downloaded from the Apache
     distribution site or accessed from git:
 
     - download site:
-      https://dist.apache.org/repos/dist/release/incubator/trafodion
+      https://dist.apache.org/repos/dist/release/trafodion
 
     - git site:
-      `git@github.com:apache/incubator-trafodion`
+      `git@github.com:apache/trafodion`
 
   - To build and try out Apache Trafodion, please following our build 
     instructions as described:
diff --git a/core/Makefile b/core/Makefile
index 19236dd..bbd0878 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -178,7 +178,7 @@
 
 ifeq ($(SQ_BUILD_TYPE),release)
   RELEASE ?= 1
-  SERVERTAR="$(PKG_PROD)_server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-incubating.tar.gz"
+  SERVERTAR="$(PKG_PROD)_server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}.tar.gz"
 else
   RELEASE ?= devel
   SERVERTAR="$(PKG_PROD)_server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-debug.tar.gz"
diff --git a/core/conn/Makefile b/core/conn/Makefile
index 5197837..f853896 100644
--- a/core/conn/Makefile
+++ b/core/conn/Makefile
@@ -30,7 +30,7 @@
 P_TYPE = $(shell uname -p)
 
 ifeq ($(SQ_BUILD_TYPE),release)
-  CLIENT_TAR	?= ../../${DISTRIBUTION_DIR}/apache-trafodion_clients-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-incubating.tar.gz
+  CLIENT_TAR	?= ../../${DISTRIBUTION_DIR}/apache-trafodion_clients-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}.tar.gz
 else
   CLIENT_TAR	?= ../../${DISTRIBUTION_DIR}/apache-trafodion_clients-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-debug.tar.gz
 endif
@@ -39,7 +39,7 @@
 .PHONY: all
 all: pkg-clients
 
-pkg-clients: clients/LICENSE clients/NOTICE clients/DISCLAIMER
+pkg-clients: clients/LICENSE clients/NOTICE 
 	mkdir -p $$(dirname $(CLIENT_TAR))
 	tar -zcvf $(CLIENT_TAR) clients
 
@@ -52,9 +52,6 @@
 clients/NOTICE: ../../NOTICE
 	cp -f $? $@
 
-clients/DISCLAIMER: ../../DISCLAIMER
-	cp -f $? $@
-
 clean:	
 	$(RM) -rf $(TRAF_HOME)/../conn/clients 
 	$(RM) -f $(TRAF_HOME)/../conn/*.mf
diff --git a/core/conn/jdbcT4/pom.xml b/core/conn/jdbcT4/pom.xml
index 8f83ecf..1750c42 100644
--- a/core/conn/jdbcT4/pom.xml
+++ b/core/conn/jdbcT4/pom.xml
@@ -51,9 +51,9 @@
 </distributionManagement>
  
  <scm>
-    <connection>scm:git:https://github.com/apache/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://github.com/apache/incubator-trafodion.git</developerConnection>
-    <url>https://github.com/apache/incubator-trafodion.git</url>
+    <connection>scm:git:https://github.com/apache/trafodion.git</connection>
+    <developerConnection>scm:git:https://github.com/apache/trafodion.git</developerConnection>
+    <url>https://github.com/apache/trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
diff --git a/core/conn/jdbcT4/src/main/java/T4Messages.properties b/core/conn/jdbcT4/src/main/java/T4Messages.properties
index 3d0a43c..3b2e661 100644
--- a/core/conn/jdbcT4/src/main/java/T4Messages.properties
+++ b/core/conn/jdbcT4/src/main/java/T4Messages.properties
@@ -212,7 +212,7 @@
 numeric_out_of_range_sqlstate=22003
 numeric_out_of_range_sqlcode=29046
 
-batch_command_failed_msg=Batch Update Failed, See next exception for details
+batch_command_failed_msg=Batch Update Failed, you can use getNextException() for more details, first getNextException() message is {0} 
 batch_command_failed_sqlstate=HY000
 batch_command_failed_sqlcode=29047
 
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
index 876e831..ce7c980 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
@@ -541,7 +541,7 @@
 			stmtLabel = stmtLabel.toUpperCase();
 		}
 
-		TrafT4CallableStatement stmt;
+		TrafT4CallableStatement stmt = null;
 
 		clearWarnings();
 		if (_isClosed() == true) {
@@ -557,8 +557,11 @@
 					return stmt;
 				}
 			}
-
-			stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+            if (stmtLabel.equalsIgnoreCase("null")) {
+                stmt = new TrafT4CallableStatement(this, sql);
+            } else {
+                stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+            }
 			stmt.prepareCall(stmt.sql_, stmt.queryTimeout_, stmt.resultSetHoldability_);
 
 			if (isStatementCachingEnabled()) {
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
index c07e204..ee1268d 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
@@ -313,7 +313,7 @@
 			} catch (SQLException e) {
 				BatchUpdateException be;
 				se = TrafT4Messages.createSQLException(connection_.props_, connection_.getLocale(),
-						"batch_command_failed", null);
+						"batch_command_failed", e.getMessage());
 				if (batchRowCount_ == null) // we failed before execute
 				{
 					batchRowCount_ = new int[paramRowCount_];
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Statement.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Statement.java
index da6e843..e5e4962 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Statement.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Statement.java
@@ -416,7 +416,7 @@
 				BatchUpdateException be;
 
 				se = TrafT4Messages.createSQLException(connection_.props_, connection_.getLocale(),
-						"batch_command_failed", null);
+						"batch_command_failed", e.getMessage());
 				be = new BatchUpdateException(se.getMessage(), se.getSQLState(), batchRowCount_);
 				be.setNextException(e);
 
diff --git a/core/conn/odb/src/odb.c b/core/conn/odb/src/odb.c
index 31bdb95..c5242f7 100755
--- a/core/conn/odb/src/odb.c
+++ b/core/conn/odb/src/odb.c
@@ -5772,8 +5772,14 @@
                         } 
                         if ( etab[no].ps ) { /* Multi-stream table analysis */
                             if ( etab[no].sb ) {    /* split by */
-                                snprintf((char *)Obuf[0], sizeof(Obuf[0]),
-                                    "SELECT MIN(%s), MAX(%s) FROM %s", etab[no].sb, etab[no].sb, etab[no].src);
+                                if (etab[no].map) { /* if we get a pwhere condition, we apply it to improve perfomance */
+                                    snprintf((char *)Obuf[0], sizeof(Obuf[0]),
+                                        "SELECT MIN(%s), MAX(%s) FROM %s WHERE %s", etab[no].sb, etab[no].sb, etab[no].src, etab[no].map);
+                                }
+                                else {
+                                    snprintf((char *)Obuf[0], sizeof(Obuf[0]),
+                                        "SELECT MIN(%s), MAX(%s) FROM %s", etab[no].sb, etab[no].sb, etab[no].src);
+                                }
                                 if (!SQL_SUCCEEDED(Oret=SQLExecDirect (Os1, Obuf[0], SQL_NTS))) {
                                     Oerr(-1, -1, __LINE__, Os1, SQL_HANDLE_STMT);
                                     goto etabadd_exit;
diff --git a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
index 9b5e5e0..c716998 100644
--- a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
+++ b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
@@ -647,7 +647,7 @@
 	pBuffer += length;
 	if (sqlWarningOrErrorLength > 0) pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-	tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+	tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
 	pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
 	if (outValuesLength > 0) pBuffer = printHex(pBuffer, outValues, outValuesLength);
 
@@ -1243,7 +1243,7 @@
   if (sqlWarningOrErrorLength > 0) 
     pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
   pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
   if (outValuesLength > 0)
      pBuffer = printHex(pBuffer, outValues, outValuesLength);
@@ -1318,7 +1318,7 @@
   if (sqlWarningOrErrorLength > 0) 
     pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
   pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
   if (outValuesLength > 0)
      pBuffer = printHex(pBuffer, outValues, outValuesLength);
diff --git a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
index a0aaf26..3e15202 100644
--- a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
@@ -6286,7 +6286,7 @@
 				ControlQueryLen = ControlQueryLen + 4;
 				break;
 			case 6:
-				sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from  %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, verBuffer, verBuffer, atol(verBuffer), schemaToken, tableName);
+				sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from  %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName);
 				strcpy(HashTableInfo+ControlQueryLen, ";HE="); // HE means Guesstimated rowset size. Change 128 to HP soon.
 				ControlQueryLen = ControlQueryLen + 4;
 				break;
diff --git a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
index ccbb689..465a224 100644
--- a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
@@ -972,7 +972,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -999,7 +999,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -1265,7 +1265,7 @@
 			{
 				char RGWarningOrError[256];
 
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 			}
@@ -1294,7 +1294,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -2233,7 +2233,7 @@
 		{
 			char RGWarningOrError[256];
 
-			sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+			sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 			sprintf( RGWarningOrError
                   , "The query's estimated cost: %.50s exceeded resource management attribute limit set."
                   , b
@@ -2260,7 +2260,7 @@
 			char *RGWarningOrError;
 
 			RGWarningOrError = new char[256];
-			sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+			sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 			sprintf( RGWarningOrError
                   , "The query's estimated cost: %.50s exceeded resource management attribute limit set."
                   , b
@@ -5078,7 +5078,7 @@
                     "cast('%s' as varchar(128)) TABLE_CAT, "
                     "cast(trim(ob_table.SCHEMA_NAME) as varchar(128)) TABLE_SCHEM, "
                     "cast(trim(ob_table.OBJECT_NAME) as varchar(128)) TABLE_NAME, "
-                    "cast(idx.is_unique as smallint) NON_UNIQUE, "
+                    "cast(case when idx.is_unique = 1 then 0 else 1 end as smallint) NON_UNIQUE, "
                     "cast(NULL as varchar(128)) INDEX_QUALIFIER, " // not support
                     "cast(trim(ob.OBJECT_NAME) as varchar(128)) INDEX_NAME, "
                     "cast(3 as smallint) TYPE, " // SQL_INDEX_OTHER
diff --git a/core/conn/trafci/Makefile b/core/conn/trafci/Makefile
index 5425ca2..bc0b26f 100644
--- a/core/conn/trafci/Makefile
+++ b/core/conn/trafci/Makefile
@@ -29,9 +29,9 @@
 Trafci_jar:
 	mvn clean install -DskipTests$
 Trafci_installer:
-	./getBuildInfo$
-	export buildId=`head -n 1 buildId`$
-	mvn -f installer_pom.xml -DbldId=${buildId} clean package -DskipTests$ 
+	./getBuildInfo
+	export buildId=`head -n 1 buildId`
+	mvn -f installer_pom.xml -DbldId=${buildId} clean package -DskipTests
 Trafci_deploy:
 	zip -r ../clients/trafci.zip -j target/trafciInstaller.jar utils/README
 	java -jar target/trafciInstaller.jar -silent -installDir ${TRAF_HOME}
diff --git a/core/conn/trafci/install/Installer.java-tmpl b/core/conn/trafci/install/Installer.java-tmpl
index 83f3510..d85e1fc 100644
--- a/core/conn/trafci/install/Installer.java-tmpl
+++ b/core/conn/trafci/install/Installer.java-tmpl
@@ -3287,12 +3287,8 @@
 

       StringBuffer path = new StringBuffer();

       for (String jarFile : jarFiles) {

-         if (isTrafHome) {

-            path.append(INSTALL_DIR_STR);

-         } else {

             path.append(outputDir.getAbsolutePath());

-         }

-         path.append(File.separator).append(jarFile).append(File.pathSeparator);

+	    path.append(File.separator).append(jarFile).append(File.pathSeparator);

       }

       String replaceStr = path.substring(0, path.length() - File.pathSeparator.length());

 

@@ -3318,7 +3314,19 @@
       replacePythonLib=replacePythonLib.replaceAll("\\$","\\\\\\$");

            

       batchfile=batchfile.replaceAll(classVarToReplace,replace).replaceAll("#####PATH#####",File.separator+File.separator+File.separator+File.separator);

+      String[] split = replacePython.split(File.pathSeparator);

+		List list = new ArrayList<String>();

+		for (String str : split) {

+			list.add("\""+str+"\"");

+		}

+      replacePython = list.toString();

       batchfile=batchfile.replaceAll(pythonClassVarToReplace,replacePython).replaceAll("#####PATH#####",File.separator+File.separator+File.separator+File.separator);

+     /* String[] split = batchfile.split(File.pathSeparator);

+		List list = new ArrayList<String>();

+		for (String str : split) {

+			list.add(str);

+		}

+      batchfile = list.toString(); */

       batchfile=batchfile.replaceAll(perlClassVarToReplace,replacePerl).replaceAll("#####PATH#####",File.separator+File.separator+File.separator+File.separator);

       batchfile =batchfile.replaceAll(perlLibPathToReplace, replacePerlLib).replaceAll("#####PATH#####", File.separator + File.separator + File.separator + File.separator);

       batchfile =batchfile.replaceAll(pythonLibPathToReplace, replacePythonLib).replaceAll("#####PATH#####", File.separator + File.separator + File.separator + File.separator);

diff --git a/core/conn/trafci/lib/python/Session.py b/core/conn/trafci/lib/python/Session.py
old mode 100644
new mode 100755
index 7106ed0..5b2dc12
--- a/core/conn/trafci/lib/python/Session.py
+++ b/core/conn/trafci/lib/python/Session.py
@@ -1,3 +1,4 @@
+#!/usr/bin/jython
 # @@@ START COPYRIGHT @@@
 #
 # Licensed to the Apache Software Foundation (ASF) under one
@@ -19,6 +20,7 @@
 #
 # @@@ END COPYRIGHT @@@
 import sys
+sys.path += ##TRAFCI_PYTHON_CLASSPATH##
 from org.trafodion.ci import ScriptsInterface
 from java.lang import System
 from java.io import PrintStream
diff --git a/core/conn/trafci/samples/sampleDDL.py b/core/conn/trafci/samples/sampleDDL.py
old mode 100644
new mode 100755
index d890a9f..0b5fde8
--- a/core/conn/trafci/samples/sampleDDL.py
+++ b/core/conn/trafci/samples/sampleDDL.py
@@ -1,3 +1,4 @@
+#!/usr/bin/jython
 # @@@ START COPYRIGHT @@@
 #
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/core/conn/trafci/samples/sampleDML.py b/core/conn/trafci/samples/sampleDML.py
old mode 100644
new mode 100755
index 413166e..79a2b22
--- a/core/conn/trafci/samples/sampleDML.py
+++ b/core/conn/trafci/samples/sampleDML.py
@@ -1,3 +1,4 @@
+#!/usr/bin/jython
 # @@@ START COPYRIGHT @@@
 #
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/core/conn/trafci/samples/sampleTables.py b/core/conn/trafci/samples/sampleTables.py
old mode 100644
new mode 100755
index 78dd70a..151a8c4
--- a/core/conn/trafci/samples/sampleTables.py
+++ b/core/conn/trafci/samples/sampleTables.py
@@ -1,3 +1,4 @@
+#!/usr/bin/jython
 # @@@ START COPYRIGHT @@@
 #
 # Licensed to the Apache Software Foundation (ASF) under one
diff --git a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
index c3bb9f3..b6240d3 100644
--- a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
+++ b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
@@ -661,6 +661,7 @@
       
       try
       {
+         // here if stmtName = null, then it will pass a "null" string, this will skip driver's conditional judgment (if (stmtName ==null) throw exception).
          cStmt = ((TrafT4Connection)conn).prepareCall(queryStr,"\"" + stmtName + "\"");
       } catch (NoSuchMethodError nsme)
       {
diff --git a/core/conn/trafci/src/main/java/org/trafodion/ci/SessionInterface.java b/core/conn/trafci/src/main/java/org/trafodion/ci/SessionInterface.java
index 0243eb1..b5aa53e 100644
--- a/core/conn/trafci/src/main/java/org/trafodion/ci/SessionInterface.java
+++ b/core/conn/trafci/src/main/java/org/trafodion/ci/SessionInterface.java
@@ -415,7 +415,7 @@
                if (!OS_EOF)
                {
             	   if(!sessObj.isSessionStartup()) {
-	                  if  (!userPressedCtrlC && sessObj.isLogCmdText() && sessObj.isLogCmdEcho() && !reader.getConsoleReader().isJline() )
+	                  if  (!userPressedCtrlC && sessObj.isLogCmdText() && sessObj.isLogCmdEcho() && reader.getConsoleReader() != null  && !reader.getConsoleReader().isJline() )
 	                  {
 	                     writer.write(sessObj.getSessionPrompt());
 	                  }
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/DrvrManager/drvrmanager.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/DrvrManager/drvrmanager.cpp
index 9bc0763..5d74a86 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/DrvrManager/drvrmanager.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/DrvrManager/drvrmanager.cpp
@@ -86,6 +86,8 @@
 			}
 			break;
 		case SQL_HANDLE_STMT:
+            if (!IS_VALID_HDBC (InputHandle))
+                return SQL_INVALID_HANDLE;
 		    rc = NeoAllocHandle(SQL_HANDLE_STMT, InputHandle, (SQLHSTMT*)OutputHandle);
 			//rc = SQLAllocStmt(InputHandle,(SQLHSTMT *)OutputHandle);
 			RETURNCODE (InputHandle,rc);
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
index bab28c5..c40fdd6 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
@@ -2307,7 +2307,7 @@
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,
 								cTmpFraction);
 				else
-					DataLen = sprintf(cTmpBuf,"%02d",
+					DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);
 				break;
 			case SQLDTCODE_YEAR_TO_HOUR:
@@ -2861,7 +2861,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)
 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
 				else
-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+					sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
 				break;
 			case SQL_INTERVAL_MINUTE:
 				if (intervalTmp->interval_sign == SQL_TRUE)
@@ -2923,7 +2923,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)
 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
 				else
-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+					sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
 				break;
 			case SQL_INTERVAL_MINUTE:
 				if (intervalTmp->interval_sign == SQL_TRUE)
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
index a300bbd..b2c0091 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
@@ -1684,6 +1684,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(DATE_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE, 
@@ -1930,6 +1931,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(TIME_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME, 
@@ -2149,6 +2151,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(TIMESTAMP_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP, 
diff --git a/core/seamonster/src/Makefile b/core/seamonster/src/Makefile
index 94b416e..bcc84d0 100644
--- a/core/seamonster/src/Makefile
+++ b/core/seamonster/src/Makefile
@@ -56,7 +56,6 @@
 	$(CC) $(CCFLAGS) $(INCLUDES) $*.c $(LDFLAGS) -c
 
 $(LIBSM): $(LIBOBJ)
-	-mkdir -p $(SP_EXPORT_LIB)
 	$(CC) $(LDFLAGS) $(CCFLAGS) -Wall -Werror -shared \
 		-Wl,-soname,libsm.so -o $@ $(LIBOBJ) -lrt
 
diff --git a/core/sqf/.gitignore b/core/sqf/.gitignore
index 380ea87..a8425d5 100644
--- a/core/sqf/.gitignore
+++ b/core/sqf/.gitignore
@@ -68,8 +68,6 @@
 # derived license
 /LICENSE
 /NOTICE
-/DISCLAIMER
-sqf/DISCLAIMER
 
 # bundled component links
 /dcs-*
diff --git a/core/sqf/Makefile b/core/sqf/Makefile
index 6f4844b..2c1d952 100644
--- a/core/sqf/Makefile
+++ b/core/sqf/Makefile
@@ -268,7 +268,7 @@
 P_TYPE = $(shell uname -p)
 
 ifeq ($(SQ_BUILD_TYPE),release)
-  PKG_TYPE="server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-incubating"
+  PKG_TYPE="server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}"
 else
   PKG_TYPE="server-$(TRAFODION_VER)-${OS_TYPE}${OS_MAJOR}-${P_TYPE}-debug"
 endif
@@ -287,7 +287,7 @@
 PKG_BIN_OBJS += trafci
 PKG_BIN_OBJS += samples
 PKG_BIN_OBJS += export/limited-support-tools
-PKG_BIN_OBJS += LICENSE NOTICE DISCLAIMER
+PKG_BIN_OBJS += LICENSE NOTICE 
 PKG_BIN_OBJS += rest-${TRAFODION_VER} dcs-${TRAFODION_VER}
 
 PKG_BIN_DIRS ?= sql export
@@ -328,12 +328,6 @@
 ../../licenses/NOTICE-server:
 	cd $(@D) && $(MAKE) $(@F)
 
-DISCLAIMER: ../../licenses/DISCLAIMER-server
-	cp -f $? $@
-
-../../licenses/DISCLAIMER-server:
-	cd $(@D) && $(MAKE) $(@F)
-
 rest-${TRAFODION_VER}:
 	ln -s ../rest/target/$@/$@ $@
 
@@ -352,7 +346,7 @@
 	find -L $(PKG_BIN_OBJS) -type d -o -type f | grep -v -xf pkglist-symlinks > $@
 
 # simple symlinks that point to filename (does not start with . or /)
-pkglist-symlinks: LICENSE NOTICE DISCLAIMER rest-${TRAFODION_VER} dcs-${TRAFODION_VER}
+pkglist-symlinks: LICENSE NOTICE rest-${TRAFODION_VER} dcs-${TRAFODION_VER}
 	find $(PKG_BIN_OBJS) -lname '[^./]*' > $@
 
 #	Targets to tar/gzip the self installer
diff --git a/core/sqf/build-scripts/build.branch b/core/sqf/build-scripts/build.branch
index 463475e..4b508cc 100755
--- a/core/sqf/build-scripts/build.branch
+++ b/core/sqf/build-scripts/build.branch
@@ -47,7 +47,7 @@
   # runs make with a detached HEAD then change the space in "(no branch)"
   # to an underscore so it can be used.
   if [ -z "$ZUUL_BRANCH" ]; then
-    branch=$(git branch | grep '^\* ' | sed 's/(no branch)/no_branch/' | awk '{print $2}')
+    branch=$(git branch --no-color| grep '^\* ' | sed 's/(no branch)/no_branch/' | awk '{print $2}')
   else
     branch=$ZUUL_BRANCH;
   fi
diff --git a/core/sqf/export/include/common/evl_sqlog_eventnum.h b/core/sqf/export/include/common/evl_sqlog_eventnum.h
index c8b4d59..96c3df9 100644
--- a/core/sqf/export/include/common/evl_sqlog_eventnum.h
+++ b/core/sqf/export/include/common/evl_sqlog_eventnum.h
@@ -54,6 +54,7 @@
 #define MON_CLUSTER_CLUSTER_1               101010401
 #define MON_CLUSTER_CLUSTER_2               101010402
 #define MON_CLUSTER_UCLUSTER                101010501
+
 #define MON_CLUSTER_HANDLEOTHERNODE_1       101010601
 #define MON_CLUSTER_HANDLEOTHERNODE_2       101010602
 #define MON_CLUSTER_HANDLEOTHERNODE_3       101010603
@@ -65,17 +66,21 @@
 #define MON_CLUSTER_HANDLEOTHERNODE_9       101010609
 #define MON_CLUSTER_HANDLEOTHERNODE_10      101010610
 #define MON_CLUSTER_HANDLEOTHERNODE_11      101010611
+
 #define MON_CLUSTER_HANDLEMYNODE_1          101010701
 #define MON_CLUSTER_HANDLEMYNODE_2          101010702
 #define MON_CLUSTER_HANDLEMYNODE_3          101010703
 #define MON_CLUSTER_HANDLEMYNODE_4          101010704
 #define MON_CLUSTER_HANDLEMYNODE_5          101010705
 #define MON_CLUSTER_HANDLEMYNODE_6          101010706
+
 #define MON_CLUSTER_CHECKWHODIED_1          101010801
 #define MON_CLUSTER_CHECKWHODIED_2          101010802
+
 #define MON_CLUSTER_REGROUP_1               101010901
 #define MON_CLUSTER_REGROUP_2               101010902
 #define MON_CLUSTER_INITCLUSTER             101011001
+
 #define MON_CLUSTER_MARKDOWN_1              101011101
 #define MON_CLUSTER_MARKDOWN_2              101011102
 #define MON_CLUSTER_MARKDOWN_3              101011103
@@ -84,15 +89,18 @@
 #define MON_CLUSTER_FORCEDOWN_1             101011401
 #define MON_CLUSTER_FORCEDOWN_2             101011402
 #define MON_CLUSTER_CLUSTER_MANAGER         101011501
+
 #define MON_CLUSTER_EXPEDITEDOWN_1          101011601
 #define MON_CLUSTER_EXPEDITEDOWN_2          101011602
 #define MON_CLUSTER_RESPONSIVE_1            101011701
 #define MON_CLUSTER_RESPONSIVE_2            101011702
 #define MON_CLUSTER_RESPONSIVE_3            101011703
+
 #define MON_CLUSTER_CONNTONEWMON_1          101011801
 #define MON_CLUSTER_CONNTONEWMON_2          101011802
 #define MON_CLUSTER_CONNTONEWMON_10         101011810
 #define MON_CLUSTER_CONNTONEWMON_11         101011811
+
 #define MON_CLUSTER_MERGETONEWMON_1         101011901
 #define MON_CLUSTER_MERGETONEWMON_2         101011902
 #define MON_CLUSTER_MERGETONEWMON_3         101011903
@@ -100,6 +108,7 @@
 #define MON_CLUSTER_MERGETONEWMON_11        101011911
 #define MON_CLUSTER_MERGETONEWMON_12        101011912
 #define MON_CLUSTER_MERGETONEWMON_13        101011913
+
 #define MON_CLUSTER_REINTEGRATE_1           101012001
 #define MON_CLUSTER_REINTEGRATE_2           101012002
 #define MON_CLUSTER_REINTEGRATE_3           101012003
@@ -107,12 +116,15 @@
 #define MON_CLUSTER_REINTEGRATE_11          101012011
 #define MON_CLUSTER_REINTEGRATE_12          101012012
 #define MON_CLUSTER_REINTEGRATE_13          101012013
+
 #define MON_CLUSTER_REINITCLUSTER_1         101012101
 #define MON_CLUSTER_REINITCLUSTER_2         101012102
 #define MON_CLUSTER_REINITCLUSTER_3         101012103
 #define MON_CLUSTER_REINITCLUSTER_4         101012104
 #define MON_CLUSTER_REINITCLUSTER_5         101012105
+
 #define MON_CLUSTER_CHECKWHOJOINED_1        101012201
+
 #define MON_CLUSTER_UPDDATECLUSTER_1        101012301
 #define MON_CLUSTER_UPDDATECLUSTER_2        101012302
 #define MON_CLUSTER_UPDTCLUSTERSTATE_1      101012401
@@ -131,11 +143,14 @@
 #define MON_CLUSTER_INITCONFIGCLUSTER_2     101013002
 #define MON_CLUSTER_INITCONFIGCLUSTER_3     101013003
 #define MON_CLUSTER_INITCONFIGCLUSTER_4     101013004
+
 #define MON_CLUSTER_SETNEWCOMM_1            101013101
 #define MON_CLUSTER_SETNEWCOMM_2            101013102
+
 #define MON_CLUSTER_SETNEWSOCK_1            101013201
 #define MON_CLUSTER_SETNEWSOCK_2            101013202
 #define MON_CLUSTER_SETNEWSOCK_3            101013203
+
 #define MON_CLUSTER_ALLGATHERSOCK_1         101013301
 #define MON_CLUSTER_ALLGATHERSOCK_2         101013302
 #define MON_CLUSTER_ALLGATHERSOCK_3         101013303
@@ -145,6 +160,7 @@
 #define MON_CLUSTER_ALLGATHERSOCK_7         101013307
 #define MON_CLUSTER_ALLGATHERSOCK_8         101013308
 #define MON_CLUSTER_EPOLLCTL_1              101013401
+
 #define MON_CLUSTER_INITCLUSTERSOCKS_1      101013501
 #define MON_CLUSTER_INITCLUSTERSOCKS_2      101013502
 #define MON_CLUSTER_INITCLUSTERSOCKS_3      101013503
@@ -155,6 +171,7 @@
 #define MON_CLUSTER_INITCLUSTERSOCKS_8      101013508
 #define MON_CLUSTER_INITACCEPTSOCK_1        101013601
 #define MON_CLUSTER_INITACCEPTSOCK_2        101013602
+
 #define MON_CLUSTER_MKSRVSOCK_1             101013701
 #define MON_CLUSTER_MKSRVSOCK_2             101013702
 #define MON_CLUSTER_MKSRVSOCK_3             101013703
@@ -163,6 +180,7 @@
 #define MON_CLUSTER_MKSRVSOCK_6             101013706
 #define MON_CLUSTER_MKSRVSOCK_7             101013707
 #define MON_CLUSTER_MKSRVSOCK_8             101013708
+
 #define MON_CLUSTER_MKCLTSOCK_1             101013801
 #define MON_CLUSTER_MKCLTSOCK_2             101013802
 #define MON_CLUSTER_MKCLTSOCK_3             101013803
@@ -175,29 +193,37 @@
 #define MON_CLUSTER_MKCLTSOCK_10            101013810
 #define MON_CLUSTER_MKCLTSOCK_11            101013811
 #define MON_CLUSTER_MKCLTSOCK_12            101013812
+
 #define MON_CLUSTER_CONNECT_1               101013901
 #define MON_CLUSTER_CONNECT_2               101013902
 #define MON_CLUSTER_CONNECT_3               101013903
 #define MON_CLUSTER_CONNECT_4               101013904
 #define MON_CLUSTER_CONNECT_5               101013905
+
 #define MON_CLUSTER_CONNECTTOSELF_1         101014001
 #define MON_CLUSTER_CONNECTTOSELF_2         101014002
 #define MON_CLUSTER_CONNECTTOSELF_3         101014003
+
 #define MON_CLUSTER_ACCEPTSOCK_1            101014101
 #define MON_CLUSTER_ACCEPTSOCK_2            101014102
 #define MON_CLUSTER_ACCEPTSOCK_3            101014103
+
 #define MON_CLUSTER_INITSERVERSOCK_1        101014201
 #define MON_CLUSTER_INITSERVERSOCK_2        101014202
 #define MON_CLUSTER_INITSERVERSOCK_3        101014203
 #define MON_CLUSTER_INITSERVERSOCK_4        101014204
+
 #define MON_CLUSTER_SOFTNODEDOWN_1          101014301
 #define MON_CLUSTER_SOFTNODEDOWN_2          101014302
 #define MON_CLUSTER_SOFTNODEDOWN_3          101014303
+
 #define MON_CLUSTER_SOFTNODEUP_1            101014401
+
 #define MON_CLUSTER_SETKEEPALIVESOCKOPT_1   101014501
 #define MON_CLUSTER_SETKEEPALIVESOCKOPT_2   101014502
 #define MON_CLUSTER_SETKEEPALIVESOCKOPT_3   101014503
 #define MON_CLUSTER_SETKEEPALIVESOCKOPT_4   101014504
+
 #define MON_CLUSTER_NO_LICENSE_VERIFIERS    101014601
 
 #define MON_CLUSTER_ALLGATHERSOCKRECONN_1   101014701
diff --git a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
index d36f665..dd49e21 100755
--- a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
+++ b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
@@ -119,6 +119,15 @@
   esac
 done
 
+#check the HBase compatiblity if TrafExportSnapshot is able to be used
+java org.trafodion.utility.backuprestore.TrafExportSnapshot -t
+if [[ $? -ne 0 ]]; then
+  echo 'not able to use TrafExportSnapshot'
+  mr_limit=0
+else
+  echo 'able to use TrafExportSnapshot'
+fi
+
 echo "logging output to: ${log_file}"
 
 #create tmp and log folders if they don't exist
diff --git a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
index 3abf8ac..e38c0f1 100644
--- a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
+++ b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
@@ -1103,9 +1103,22 @@
   static int innerMain(final Configuration conf, final String [] args) throws Exception {
     return ToolRunner.run(conf, new TrafExportSnapshot(), args);
   }
-
+  private void testMobAvail()  {
+      MobUtils.getMobHome(HBaseConfiguration.create());
+  }
   public static void main(String[] args) throws Exception {
 	  LOG.info("Trafodion Export Snapshot Utility");
+    if(args.length == 1)
+    {
+      String cmd = args[0];
+      if (cmd.equals("-t") || cmd.equals("--test")) {
+        //test if MobUtils is aviable in this system
+        //doing something meanless
+        TrafExportSnapshot ti = new TrafExportSnapshot(); 
+        ti.testMobAvail();
+        System.exit(0); //normal
+      }
+    } 
     System.exit(innerMain(HBaseConfiguration.create(), args));
   }
 }
diff --git a/core/sqf/monitor/linux/cluster.cxx b/core/sqf/monitor/linux/cluster.cxx
index 66f515c..c22e8ea 100644
--- a/core/sqf/monitor/linux/cluster.cxx
+++ b/core/sqf/monitor/linux/cluster.cxx
@@ -347,10 +347,6 @@
     }
 
     spareNode->SetActivatingSpare( false );
-    if ( MyNode->IsCreator() )
-    {
-        MyNode->SetCreator( false, -1, -1 );
-    }
     ResetIntegratingPNid();
 
     TRACE_EXIT;
@@ -848,10 +844,6 @@
         {
             if ( node->GetPNid() == integratingPNid_ )
             {
-                if ( MyNode->IsCreator() )
-                {
-                    MyNode->SetCreator( false, -1, -1 );
-                }
                 ResetIntegratingPNid();
             }
             node->KillAllDown();
@@ -1470,10 +1462,6 @@
                 }
             }
 
-            if ( MyNode->IsCreator() )
-            {
-                MyNode->SetCreator( false, -1, -1 );
-            }
             ResetIntegratingPNid();
 
             if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
@@ -3394,6 +3382,18 @@
         pingSock = Monitor->Connect( node->GetCommPort() );
         if ( pingSock < 0 )
         {
+            if (node->GetState() != State_Up)
+            {
+                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                {
+                    trace_printf( "%s@%d - Node %s (%d) is not up, "
+                                  "socks_[%d]=%d\n"
+                                , method_name, __LINE__
+                                , node->GetName(), node->GetPNid()
+                                , node->GetPNid(), socks_[node->GetPNid()] );
+                }
+                break;
+            }
             sleep( MAX_RECONN_PING_WAIT_TIMEOUT );
         }
         else
@@ -3828,12 +3828,13 @@
     myNodeInfo.creator = true;
     myNodeInfo.creatorShellPid = CreatorShellPid;
     myNodeInfo.creatorShellVerifier = CreatorShellVerifier;
+    myNodeInfo.ping = false;
 
     if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
     {
         trace_printf( "%s@%d - Connected to creator monitor, sending my info, "
                       "node %d (%s), commPort=%s, syncPort=%s, creator=%d, "
-                      "creatorShellPid=%d:%d\n"
+                      "creatorShellPid=%d:%d, ping=%d\n"
                     , method_name, __LINE__
                     , myNodeInfo.pnid
                     , myNodeInfo.nodeName
@@ -3841,7 +3842,8 @@
                     , myNodeInfo.syncPort
                     , myNodeInfo.creator
                     , myNodeInfo.creatorShellPid
-                    , myNodeInfo.creatorShellVerifier );
+                    , myNodeInfo.creatorShellVerifier
+                    , myNodeInfo.ping );
     }
 
     rc = Monitor->SendSock( (char *) &myNodeInfo
@@ -3907,6 +3909,7 @@
     myNodeInfo.creator = false;
     myNodeInfo.creatorShellPid = -1;
     myNodeInfo.creatorShellVerifier = -1;
+    myNodeInfo.ping = false;
     for (int i=0; i<pnodeCount; i++)
     {
         if ( nodeInfo[i].creatorPNid != -1 && 
@@ -4237,9 +4240,36 @@
             abort();
     }
 
+    if ( MyNode->IsCreator() )
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d - Resetting creator pnid=%d\n",
+                          method_name, __LINE__, MyPNID );
+        }
+
+        MyNode->SetCreator( false, -1, -1 );
+    }
+
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+    {
+        trace_printf( "%s@%d - Resetting integratingPNid_=%d\n",
+                      method_name, __LINE__, integratingPNid_ );
+    }
+
     integratingPNid_ = -1;
-    // Indicate to the commAcceptor thread to begin accepting connections
-    CommAccept.setAccepting( true );
+
+    if (!CommAccept.isAccepting())
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d - Triggering commAcceptor thread to begin accepting connections\n",
+                          method_name, __LINE__ );
+        }
+
+        // Indicate to the commAcceptor thread to begin accepting connections
+        CommAccept.startAccepting();
+    }
 
     TRACE_EXIT;
 }
@@ -4251,7 +4281,7 @@
 
     integratingPNid_ = pnid;
     // Indicate to the commAcceptor thread to stop accepting connections
-    CommAccept.setAccepting( false );
+    CommAccept.stopAccepting();
 
     TRACE_EXIT;
 }
@@ -4643,7 +4673,6 @@
     peer_t p[GetConfigPNodesMax()];
     memset( p, 0, sizeof(p) );
     tag = 0; // make compiler happy
-    struct timespec currentTime;
     // Set to twice the ZClient session timeout
     static int sessionTimeout = ZClientEnabled 
                                 ? (ZClient->GetSessionTimeout() * 2) : 120;
@@ -4740,10 +4769,14 @@
     while ( 1 )
     {
 reconnected:
+        bool checkConnections = false;
+        bool doReconnect = false;
+        bool resetConnections = false;
+        int peerTimedoutCount = 0;
         int maxEvents = 2*GetConfigPNodesCount() - nsent - nrecv;
         if ( maxEvents == 0 ) break;
         int nw;
-        int zerr = ZOK;
+        peer_t *peer;
 
         while ( 1 )
         {
@@ -4754,321 +4787,180 @@
         if ( nw == 0 )
         { // Timeout, no fd's ready
             for ( int iPeer = 0; iPeer < GetConfigPNodesCount(); iPeer++ )
-            {
-                peer_t *peer = &p[indexToPnid_[iPeer]];
-                if ( (indexToPnid_[iPeer] != MyPNID) && (socks_[indexToPnid_[iPeer]] != -1) )
-                {
-                    if ( (peer->p_receiving) || (peer->p_sending) )
+            { // Check no IO completion on peers
+                peer = &p[indexToPnid_[iPeer]];
+                if ( (peer->p_receiving) || (peer->p_sending) )
+                { 
+                    peerTimedoutCount++;
+                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
                     {
-                        if ( ! ZClientEnabled )
-                        {
-                            if (peer->p_initial_check && !reconnecting)
-                            {
-                                peer->p_initial_check = false;
-                                clock_gettime(CLOCK_REALTIME, &peer->znodeFailedTime);
-                                peer->znodeFailedTime.tv_sec += sessionTimeout;
-                                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                {
-                                    trace_printf( "%s@%d" " - Znode Fail Time %ld(secs)\n"
-                                                , method_name, __LINE__
-                                                , peer->znodeFailedTime.tv_sec);
-                                }
-                            }
-    
-                            if ( peer->p_timeout_count < sv_epoll_retry_count )
-                            {
-                                peer->p_timeout_count++;
+                        trace_printf( "%s@%d - EPOLL timeout (%d) on: %s(%d), "
+                                      "socks_[%d]=%d, "
+                                      "peer->p_sending=%d, "
+                                      "peer->p_receiving=%d\n"
+                                    , method_name, __LINE__
+                                    , peerTimedoutCount
+                                    , Node[indexToPnid_[iPeer]]->GetName(), indexToPnid_[iPeer]
+                                    , indexToPnid_[iPeer]
+                                    , socks_[indexToPnid_[iPeer]]
+                                    , peer->p_sending
+                                    , peer->p_receiving );
+                    }
 
-                                if (IsRealCluster)
-                                {
-                                    if (trace_settings & TRACE_RECOVERY)
-                                    {
-                                        trace_printf( "%s@%d - Initianing AllgatherSockReconnect(), trigger: %s(%d),"
-                                                      " timeout count=%d,"
-                                                      " sending=%d,"
-                                                      " receiving=%d\n"
-                                                    , method_name, __LINE__
-                                                    , Node[indexToPnid_[iPeer]]->GetName(), iPeer
-                                                    , peer->p_timeout_count
-                                                    , peer->p_sending
-                                                    , peer->p_receiving);
-                                    }
-                                    // Attempt reconnect to all peers
-                                    err = AllgatherSockReconnect( stats );
-                                    // Redrive IOs on live peer connections
-                                    nsent = 0; nrecv = 0;
-                                    for ( int i = 0; i < GetConfigPNodesCount(); i++ )
-                                    {
-                                        peer_t *peer = &p[indexToPnid_[i]];
-                                        if ( indexToPnid_[i] == MyPNID || socks_[indexToPnid_[i]] == -1 )
-                                        {
-                                            peer->p_sending = peer->p_receiving = false;
-                                            nsent++;
-                                            nrecv++;
-                                        }
-                                        else
-                                        {
-                                            peer->p_sending = peer->p_receiving = true;
-                                            peer->p_sent = peer->p_received = 0;
-                                            peer->p_timeout_count = 0;
-                                            peer->p_n2recv = -1;
-                                            peer->p_buff = ((char *) rbuf) + (indexToPnid_[i] * CommBufSize);
-                                
-                                            struct epoll_event event;
-                                            event.data.fd = socks_[indexToPnid_[i]];
-                                            event.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP | EPOLLERR | EPOLLHUP;
-                                            EpollCtl( epollFD_, EPOLL_CTL_ADD, socks_[indexToPnid_[i]], &event );
-                                        }
-                                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                        {
-                                            trace_printf( "%s@%d" " - socks_[%d]=%d, "
-                                                          "peer->p_sending=%d, "
-                                                          "peer->p_receiving=%d\n"
-                                                        , method_name, __LINE__
-                                                        , indexToPnid_[i]
-                                                        , socks_[indexToPnid_[i]]
-                                                        , peer->p_sending
-                                                        , peer->p_receiving );
-                                        }
-                                    }
-                                    reconnectSeqNum_ = seqNum_;
-                                    reconnecting = true;
-                                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                    {
-                                        trace_printf( "%s@%d" " - Reconnecting!\n"
-                                                    , method_name, __LINE__ );
-                                    }
-                                    goto reconnected;
-                                }
-                                continue;
-                            }
-                            if (trace_settings & TRACE_RECOVERY)
-                            {
-                                trace_printf( "%s@%d - Peer timeout triggered: "
-                                              "peer->p_timeout_count %d, "
-                                              "sv_epoll_retry_count %d\n"
-                                              "        socks_[%d]=%d\n"
-                                              "        stats[%d].MPI_ERROR=%s\n"
-                                            , method_name, __LINE__
-                                            , peer->p_timeout_count
-                                            , sv_epoll_retry_count
-                                            , indexToPnid_[iPeer]
-                                            , socks_[indexToPnid_[iPeer]]
-                                            , iPeer
-                                            , ErrorMsg(stats[indexToPnid_[iPeer]].MPI_ERROR) );
-                            }
-                        }
-                        else
+                    if (peer->p_initial_check && !reconnecting)
+                    { // Set the session timeout relative to now
+                        peer->p_initial_check = false;
+                        clock_gettime(CLOCK_REALTIME, &peer->znodeFailedTime);
+                        peer->znodeFailedTime.tv_sec += sessionTimeout;
+                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
                         {
-                            if (peer->p_initial_check && !reconnecting)
-                            {
-                                peer->p_initial_check = false;
-                                clock_gettime(CLOCK_REALTIME, &peer->znodeFailedTime);
-                                peer->znodeFailedTime.tv_sec += sessionTimeout;
-                                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                {
-                                    trace_printf( "%s@%d" " - Znode Fail Time %ld(secs)\n"
-                                                , method_name, __LINE__
-                                                , peer->znodeFailedTime.tv_sec);
-                                }
-                                
-                            }
-                            // If not expired, stay in the loop
-                            if ( ! ZClient->IsZNodeExpired( Node[indexToPnid_[iPeer]]->GetName(), zerr ))
-                            {
-                                if ( zerr == ZCONNECTIONLOSS || zerr == ZOPERATIONTIMEOUT )
-                                {
-                                    // Ignore transient errors with the quorum.
-                                    // However, if longer than the session
-                                    // timeout, handle it as a hard error.
-                                    clock_gettime(CLOCK_REALTIME, &currentTime);
-                                    if (currentTime.tv_sec < peer->znodeFailedTime.tv_sec)
-                                    {
-                                        // Failsafe
-                                        peer->p_timeout_count++;
-   
-                                        if ( peer->p_timeout_count < sv_epoll_retry_count )
-                                        {
-                                            if (IsRealCluster)
-                                            {
-                                                if (trace_settings & TRACE_RECOVERY)
-                                                {
-                                                    trace_printf( "%s@%d - Initianing AllgatherSockReconnect(), trigger: %s(%d),"
-                                                                  " timeout count=%d,"
-                                                                  " sending=%d,"
-                                                                  " receiving=%d\n"
-                                                                , method_name, __LINE__
-                                                                , Node[indexToPnid_[iPeer]]->GetName(), indexToPnid_[iPeer]
-                                                                , peer->p_timeout_count
-                                                                , peer->p_sending
-                                                                , peer->p_receiving);
-                                                }
-                                                // Attempt reconnect to all peers
-                                                err = AllgatherSockReconnect( stats );
-                                                // Redrive IOs on live peer connections
-                                                nsent = 0; nrecv = 0;
-                                                for ( int i = 0; i < GetConfigPNodesCount(); i++ )
-                                                {
-                                                    peer_t *peer = &p[indexToPnid_[i]];
-                                                    if ( indexToPnid_[i] == MyPNID || socks_[indexToPnid_[i]] == -1 )
-                                                    {
-                                                        peer->p_sending = peer->p_receiving = false;
-                                                        nsent++;
-                                                        nrecv++;
-                                                    }
-                                                    else
-                                                    {
-                                                        peer->p_sending = peer->p_receiving = true;
-                                                        peer->p_sent = peer->p_received = 0;
-                                                        peer->p_timeout_count = 0;
-                                                        peer->p_n2recv = -1;
-                                                        peer->p_buff = ((char *) rbuf) + (indexToPnid_[i] * CommBufSize);
-                                            
-                                                        struct epoll_event event;
-                                                        event.data.fd = socks_[indexToPnid_[i]];
-                                                        event.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP | EPOLLERR | EPOLLHUP;
-                                                        EpollCtl( epollFD_, EPOLL_CTL_ADD, socks_[i], &event );
-                                                    }
-                                                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                                    {
-                                                        trace_printf( "%s@%d" " - socks_[%d]=%d, "
-                                                                      "peer->p_sending=%d, "
-                                                                      "peer->p_receiving=%d\n"
-                                                                    , method_name, __LINE__
-                                                                    , indexToPnid_[i]
-                                                                    , socks_[indexToPnid_[i]]
-                                                                    , peer->p_sending
-                                                                    , peer->p_receiving );
-                                                    }
-                                                }
-                                                reconnectSeqNum_ = seqNum_;
-                                                reconnecting = true;
-                                                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                                {
-                                                    trace_printf( "%s@%d" " - Reconnecting!\n"
-                                                                , method_name, __LINE__ );
-                                                }
-                                                goto reconnected;
-                                            }
-                                            continue;
-                                        }
-                                    }
-                                    if (trace_settings & TRACE_RECOVERY)
-                                    {
-                                        trace_printf( "%s@%d - Znode Failed triggered\n"
-                                                      "        Current Time    %ld(secs)\n"
-                                                      "        Znode Fail Time %ld(secs)\n"
-                                                    , method_name, __LINE__
-                                                    , currentTime.tv_sec
-                                                    , peer->znodeFailedTime.tv_sec);
-                                    }
-                                }
-                                else
-                                {
-                                    // Failsafe
-                                    peer->p_timeout_count++;
-
-                                    if ( peer->p_timeout_count < sv_epoll_retry_count )
-                                    {
-                                        if (IsRealCluster)
-                                        {
-                                            if (trace_settings & TRACE_RECOVERY)
-                                            {
-                                                trace_printf( "%s@%d - Initianing AllgatherSockReconnect(), trigger: %s(%d),"
-                                                              " timeout count=%d,"
-                                                              " sending=%d,"
-                                                              " receiving=%d\n"
-                                                            , method_name, __LINE__
-                                                            , Node[indexToPnid_[iPeer]]->GetName(), indexToPnid_[iPeer]
-                                                            , peer->p_timeout_count
-                                                            , peer->p_sending
-                                                            , peer->p_receiving);
-                                            }
-                                            // Attempt reconnect to all peers
-                                            err = AllgatherSockReconnect( stats );
-                                            // Redrive IOs on live peer connections
-                                            nsent = 0; nrecv = 0;
-                                            for ( int i = 0; i < GetConfigPNodesCount(); i++ )
-                                            {
-                                                peer_t *peer = &p[indexToPnid_[i]];
-                                                if ( indexToPnid_[i] == MyPNID || socks_[indexToPnid_[i]] == -1 )
-                                                {
-                                                    peer->p_sending = peer->p_receiving = false;
-                                                    nsent++;
-                                                    nrecv++;
-                                                }
-                                                else
-                                                {
-                                                    peer->p_sending = peer->p_receiving = true;
-                                                    peer->p_sent = peer->p_received = 0;
-                                                    peer->p_timeout_count = 0;
-                                                    peer->p_n2recv = -1;
-                                                    peer->p_buff = ((char *) rbuf) + (indexToPnid_[i] * CommBufSize);
-                                        
-                                                    struct epoll_event event;
-                                                    event.data.fd = socks_[indexToPnid_[i]];
-                                                    event.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP | EPOLLERR | EPOLLHUP;
-                                                    EpollCtl( epollFD_, EPOLL_CTL_ADD, socks_[i], &event );
-                                                }
-                                                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                                {
-                                                    trace_printf( "%s@%d" " - socks_[%d]=%d, "
-                                                                  "peer->p_sending=%d, "
-                                                                  "peer->p_receiving=%d\n"
-                                                                , method_name, __LINE__
-                                                                , indexToPnid_[i]
-                                                                , socks_[indexToPnid_[i]]
-                                                                , peer->p_sending
-                                                                , peer->p_receiving );
-                                                }
-                                            }
-                                            reconnectSeqNum_ = seqNum_;
-                                            reconnecting = true;
-                                            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                                            {
-                                                trace_printf( "%s@%d" " - Reconnecting!\n"
-                                                            , method_name, __LINE__ );
-                                            }
-                                            goto reconnected;
-                                        }
-                                        continue;
-                                    }
-                                }
-                            }
-                        }
-
-                        if (trace_settings & TRACE_RECOVERY)
-                        {
-                            trace_printf( "%s@%d - err=%d, socks_[%d]=%d, stats[%d].MPI_ERROR=%s\n"
+                            trace_printf( "%s@%d" " - Znode Fail Time %ld(secs)\n"
                                         , method_name, __LINE__
-                                        , err
+                                        , peer->znodeFailedTime.tv_sec);
+                        }
+                    }
+
+                    if ( IsRealCluster && peer->p_timeout_count < sv_epoll_retry_count )
+                    {
+                        peer->p_timeout_count++;
+                        checkConnections = true;
+                        if (peer->p_timeout_count == sv_epoll_retry_count)
+                        {
+                            resetConnections = true;
+                        }
+                    }
+                    else
+                    {
+                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                        {
+                            trace_printf( "%s@%d" " - Peer timed out: %s(%d), "
+                                          "socks_[%d]=%d, "
+                                          "peer->p_timeout_count=%d\n"
+                                        , method_name, __LINE__
+                                        , Node[indexToPnid_[iPeer]]->GetName(), indexToPnid_[iPeer]
                                         , indexToPnid_[iPeer]
                                         , socks_[indexToPnid_[iPeer]]
-                                        , indexToPnid_[iPeer]
-                                        , ErrorMsg(stats[indexToPnid_[iPeer]].MPI_ERROR) );
-                        }
-
-                        if ( err == MPI_ERR_IN_STATUS
-                          && stats[indexToPnid_[iPeer]].MPI_ERROR == MPI_ERR_EXITED)
-                        {
-                            // At this point, this peer is not responding and
-                            // reconnects failed or its znode expired
-                            char buf[MON_STRING_BUF_SIZE];
-                            snprintf( buf, sizeof(buf)
-                                    , "[%s@%d] Not heard from peer=%d (node=%s) "
-                                      "(current seq # is %lld)\n"
-                                    , method_name
-                                    ,  __LINE__
-                                    , indexToPnid_[iPeer]
-                                    , Node[indexToPnid_[iPeer]]->GetName()
-                                    , seqNum_ );
-                            mon_log_write( MON_CLUSTER_ALLGATHERSOCK_2, SQ_LOG_CRIT, buf );
+                                        , peer->p_timeout_count );
                         }
                     }
                 }
+            } // Check no IO completion on peers
+
+            if (checkConnections)
+            {
+                checkConnections = false;
+                if (trace_settings & TRACE_RECOVERY)
+                {
+                    trace_printf( "%s@%d - Initianing AllgatherSockReconnect(),"
+                                  " peerTimedoutCount=%d\n"
+                                , method_name, __LINE__
+                                , peerTimedoutCount );
+                }
+                // First, check ability to connect to all peers
+                // An err returned will mean that connect failed with 
+                // at least one peer. No err implies that possible network
+                // reset occurred and there is probably one dead connection
+                // to a peer where no IOs will complete ever, so connections
+                // to all peers must be reestablished.
+                err = AllgatherSockReconnect( stats, false );
+                if (err == MPI_SUCCESS)
+                { // Connections to all peers are good
+                    if (resetConnections)
+                    { // Establish new connections on all peers
+                        resetConnections = false;
+                        err = AllgatherSockReconnect( stats, true );
+                        // Redrive IOs on new peer connections
+                        nsent = 0; nrecv = 0;
+                        for ( int i = 0; i < GetConfigPNodesCount(); i++ )
+                        {
+                            peer = &p[indexToPnid_[i]];
+                            if ( indexToPnid_[i] == MyPNID || socks_[indexToPnid_[i]] == -1 )
+                            { // peer is me or not available
+                                peer->p_sending = peer->p_receiving = false;
+                                nsent++;
+                                nrecv++;
+                            }
+                            else
+                            {
+                                peer->p_sending = peer->p_receiving = true;
+                                peer->p_sent = peer->p_received = 0;
+                                peer->p_n2recv = -1;
+                                peer->p_buff = ((char *) rbuf) + (indexToPnid_[i] * CommBufSize);
+                                struct epoll_event event;
+                                event.data.fd = socks_[indexToPnid_[i]];
+                                event.events = EPOLLIN | EPOLLOUT | EPOLLET | EPOLLRDHUP | EPOLLERR | EPOLLHUP;
+                                EpollCtl( epollFD_, EPOLL_CTL_ADD, socks_[indexToPnid_[i]], &event );
+                            }
+                        }
+                    } // (resetConnections)
+                } // (err == MPI_SUCCESS)
+                else
+                {
+                    for ( int i = 0; i < GetConfigPNodesCount(); i++ )
+                    {
+                        peer = &p[indexToPnid_[i]];
+                        if ( indexToPnid_[i] != MyPNID && socks_[indexToPnid_[i]] == -1 )
+                        { // peer is me or no longer available
+                            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY) &&
+                                (peer->p_sending || peer->p_receiving) )
+                            {
+                                trace_printf( "%s@%d No IO completion on %s(%d):socks_[%d]=%d, "
+                                              "peer->p_sending=%d, "
+                                              "peer->p_receiving=%d\n"
+                                            , method_name, __LINE__
+                                            , Node[indexToPnid_[i]]->GetName(), indexToPnid_[i]
+                                            , indexToPnid_[i]
+                                            , socks_[indexToPnid_[i]]
+                                            , peer->p_sending
+                                            , peer->p_receiving );
+                            }
+                            if (peer->p_sending)
+                            {
+                                nsent++;
+                                peer->p_sending = false;
+                            }
+                            if (peer->p_receiving)
+                            {
+                                peer->p_receiving = false;
+                                nrecv++;
+                            }
+                        }
+                    }
+                }
+                doReconnect = true;
+            } // (checkConnections)
+
+            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+            {
+                for ( int i = 0; i < GetConfigPNodesCount(); i++ )
+                {
+                    peer = &p[indexToPnid_[i]];
+                    trace_printf( "%s@%d doReconnect=%d, %s(%d):socks_[%d]=%d, "
+                                  "peer->p_sending=%d, "
+                                  "peer->p_receiving=%d\n"
+                                , method_name, __LINE__
+                                , doReconnect
+                                , Node[indexToPnid_[i]]->GetName(), indexToPnid_[i]
+                                , indexToPnid_[i]
+                                , socks_[indexToPnid_[i]]
+                                , peer->p_sending
+                                , peer->p_receiving );
+                }
             }
-        }
- 
+
+            if (doReconnect)
+            {
+                reconnectSeqNum_ = seqNum_;
+                reconnecting = true;
+                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                {
+                    trace_printf( "%s@%d" " - Reconnecting! (reconnectSeqNum_=%lld)\n"
+                                , method_name, __LINE__, reconnectSeqNum_ );
+                }
+                goto reconnected;
+            }
+        }  // ( nw == 0 )
+
         if ( nw < 0 )
         { // Got an error
             char ebuff[256];
@@ -5393,7 +5285,7 @@
     return err;
 }
 
-int CCluster::AllgatherSockReconnect( MPI_Status *stats )
+int CCluster::AllgatherSockReconnect( MPI_Status *stats, bool reestablishConnections )
 {
     const char method_name[] = "CCluster::AllgatherSockReconnect";
     TRACE_ENTRY;
@@ -5428,6 +5320,18 @@
                                         , node->GetName(), node->GetPNid()
                                         , idst, socks_[idst] );
                         }
+                        stats[idst].MPI_ERROR = MPI_ERR_EXITED;
+                        stats[idst].count = 0;
+                        err = MPI_ERR_IN_STATUS;
+                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                        {
+                            trace_printf( "%s@%d - Setting Node %s (%d) status to "
+                                          "stats[%d].MPI_ERROR=%s\n"
+                                        , method_name, __LINE__
+                                        , node->GetName(), node->GetPNid()
+                                        , idst
+                                        , ErrorMsg(stats[idst].MPI_ERROR) );
+                        }
                         // Remove old socket from epoll set, it may not be there
                         struct epoll_event event;
                         event.data.fd = socks_[idst];
@@ -5437,22 +5341,28 @@
                     }
                     continue;
                 }
-                reconnectSock = ConnectSockPeer( node, idst );
-                if (reconnectSock == -1)
+                if (PingSockPeer(node))
                 {
-                    stats[idst].MPI_ERROR = MPI_ERR_EXITED;
-                    stats[idst].count = 0;
-                    err = MPI_ERR_IN_STATUS;
-                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                    reconnectSock = ConnectSockPeer( node, idst, reestablishConnections );
+                    if (reconnectSock == -1)
                     {
-                        trace_printf( "%s@%d - Setting Node %s (%d) status to "
-                                      "stats[%d].MPI_ERROR=%s\n"
-                                    , method_name, __LINE__
-                                    , node->GetName(), node->GetPNid()
-                                    , idst
-                                    , ErrorMsg(stats[idst].MPI_ERROR) );
+                        stats[idst].MPI_ERROR = MPI_ERR_EXITED;
+                        stats[idst].count = 0;
+                        err = MPI_ERR_IN_STATUS;
+                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                        {
+                            trace_printf( "%s@%d - Setting Node %s (%d) status to "
+                                          "stats[%d].MPI_ERROR=%s\n"
+                                        , method_name, __LINE__
+                                        , node->GetName(), node->GetPNid()
+                                        , idst
+                                        , ErrorMsg(stats[idst].MPI_ERROR) );
+                        }
                     }
-                    if (node->GetState() != State_Up)
+                }
+                else
+                {
+                    if (socks_[idst] != -1)
                     {
                         if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
                         {
@@ -5470,6 +5380,19 @@
                         EpollCtlDelete( epollFD_, socks_[idst], &event );
                         socks_[idst] = -1;
                     }
+                    reconnectSock = -1;
+                    stats[idst].MPI_ERROR = MPI_ERR_EXITED;
+                    stats[idst].count = 0;
+                    err = MPI_ERR_IN_STATUS;
+                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                    {
+                        trace_printf( "%s@%d - Setting Node %s (%d) status to "
+                                      "stats[%d].MPI_ERROR=%s\n"
+                                    , method_name, __LINE__
+                                    , node->GetName(), node->GetPNid()
+                                    , idst
+                                    , ErrorMsg(stats[idst].MPI_ERROR) );
+                    }
                 }
             }
             else if ( j == MyPNID )
@@ -5491,6 +5414,18 @@
                                         , node->GetName(), node->GetPNid()
                                         , idst, socks_[idst] );
                         }
+                        stats[idst].MPI_ERROR = MPI_ERR_EXITED;
+                        stats[idst].count = 0;
+                        err = MPI_ERR_IN_STATUS;
+                        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                        {
+                            trace_printf( "%s@%d - Setting Node %s (%d) status to "
+                                          "stats[%d].MPI_ERROR=%s\n"
+                                        , method_name, __LINE__
+                                        , node->GetName(), node->GetPNid()
+                                        , idst
+                                        , ErrorMsg(stats[idst].MPI_ERROR) );
+                        }
                         // Remove old socket from epoll set, it may not be there
                         struct epoll_event event;
                         event.data.fd = socks_[idst];
@@ -5508,7 +5443,7 @@
                 }
                 if (PingSockPeer(node))
                 {
-                    reconnectSock = AcceptSockPeer( node, idst );
+                    reconnectSock = AcceptSockPeer( node, idst, reestablishConnections );
                     if (reconnectSock == -1)
                     {
                         stats[idst].MPI_ERROR = MPI_ERR_EXITED;
@@ -5523,24 +5458,6 @@
                                         , idst
                                         , ErrorMsg(stats[idst].MPI_ERROR) );
                         }
-                        if (node->GetState() != State_Up)
-                        {
-                            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-                            {
-                                trace_printf( "%s@%d - Node %s (%d) is not up, "
-                                              "removing old socket from epoll set, "
-                                              "socks_[%d]=%d\n"
-                                            , method_name, __LINE__
-                                            , node->GetName(), node->GetPNid()
-                                            , idst, socks_[idst] );
-                            }
-                            // Remove old socket from epoll set, it may not be there
-                            struct epoll_event event;
-                            event.data.fd = socks_[idst];
-                            event.events = 0;
-                            EpollCtlDelete( epollFD_, socks_[idst], &event );
-                            socks_[idst] = -1;
-                        }
                     }
                 }
                 else
@@ -5561,6 +5478,7 @@
                         event.data.fd = socks_[idst];
                         event.events = 0;
                         EpollCtlDelete( epollFD_, socks_[idst], &event );
+                        socks_[idst] = -1;
                     }
                     reconnectSock = -1;
                     stats[idst].MPI_ERROR = MPI_ERR_EXITED;
@@ -5618,14 +5536,13 @@
     return( err );
 }
 
-int CCluster::AcceptSockPeer( CNode *node, int peer )
+int CCluster::AcceptSockPeer( CNode *node, int peer, bool reestablishConnections )
 {
     const char method_name[] = "CCluster::AcceptSockPeer";
     TRACE_ENTRY;
 
     int rc = MPI_SUCCESS;
     int reconnectSock = -1;
-    unsigned char srcaddr[4];
     struct hostent *he;
 
     // Get my host structure via my node name
@@ -5646,13 +5563,9 @@
     {
         if (trace_settings & TRACE_RECOVERY)
         {
-            trace_printf( "%s@%d Accepting server socket: from %s(%d), src=%d.%d.%d.%d, port=%d\n"
+            trace_printf( "%s@%d Accepting server socket: from %s(%d), port=%d\n"
                         , method_name, __LINE__
                         , node->GetName(), node->GetPNid()
-                        , (int)((unsigned char *)srcaddr)[0]
-                        , (int)((unsigned char *)srcaddr)[1]
-                        , (int)((unsigned char *)srcaddr)[2]
-                        , (int)((unsigned char *)srcaddr)[3]
                         , MyNode->GetSyncSocketPort() );
         }
 
@@ -5679,17 +5592,40 @@
             rc = -1;
         }
 
-        if (socks_[peer] != -1)
+        if (reestablishConnections)
         {
-            // Remove old socket from epoll set, it may not be there
-            struct epoll_event event;
-            event.data.fd = socks_[peer];
-            event.events = 0;
-            EpollCtlDelete( epollFD_, socks_[peer], &event );
+            if (socks_[peer] != -1)
+            {
+                // Remove old socket from epoll set, it may not be there
+                struct epoll_event event;
+                event.data.fd = socks_[peer];
+                event.events = 0;
+                EpollCtlDelete( epollFD_, socks_[peer], &event );
+                if (node->GetState() != State_Up)
+                {
+                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                    {
+                        trace_printf( "%s@%d - Node %s (%d) is not up, "
+                                      "removing old socket from epoll set, "
+                                      "socks_[%d]=%d\n"
+                                    , method_name, __LINE__
+                                    , node->GetName(), node->GetPNid()
+                                    , peer, socks_[peer] );
+                    }
+                    socks_[peer] = -1;
+                }
+            }
+            if (reconnectSock != -1)
+            {
+                socks_[peer] = reconnectSock;
+            }
         }
-        if (reconnectSock != -1)
+        else
         {
-            socks_[peer] = reconnectSock;
+            if (reconnectSock != -1)
+            {
+                close( (int)reconnectSock );
+            }
         }
     }
 
@@ -5697,7 +5633,7 @@
     return rc;
 }
 
-int CCluster::ConnectSockPeer( CNode *node, int peer )
+int CCluster::ConnectSockPeer( CNode *node, int peer, bool reestablishConnections )
 {
     const char method_name[] = "CCluster::ConnectSockPeer";
     TRACE_ENTRY;
@@ -5791,17 +5727,40 @@
             rc = -1;
         }
 
-        if (socks_[peer] != -1)
+        if (reestablishConnections)
         {
-            // Remove old socket from epoll set, it may not be there
-            struct epoll_event event;
-            event.data.fd = socks_[peer];
-            event.events = 0;
-            EpollCtlDelete( epollFD_, socks_[peer], &event );
+            if (socks_[peer] != -1)
+            {
+                // Remove old socket from epoll set, it may not be there
+                struct epoll_event event;
+                event.data.fd = socks_[peer];
+                event.events = 0;
+                EpollCtlDelete( epollFD_, socks_[peer], &event );
+                if (node->GetState() != State_Up)
+                {
+                    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                    {
+                        trace_printf( "%s@%d - Node %s (%d) is not up, "
+                                      "removing old socket from epoll set, "
+                                      "socks_[%d]=%d\n"
+                                    , method_name, __LINE__
+                                    , node->GetName(), node->GetPNid()
+                                    , peer, socks_[peer] );
+                    }
+                    socks_[peer] = -1;
+                }
+            }
+            if (reconnectSock != -1)
+            {
+                socks_[peer] = reconnectSock;
+            }
         }
-        if (reconnectSock != -1)
+        else
         {
-            socks_[peer] = reconnectSock;
+            if (reconnectSock != -1)
+            {
+                close( (int)reconnectSock );
+            }
         }
     }
 
@@ -6118,14 +6077,21 @@
 
     if ( GetConfigPNodesCount() ==  1 ) return true;
 
-    // Count occurrences of sequence numbers from other nodes
+    // Count occurrences of sequence numbers
     for (int pnid = 0; pnid < GetConfigPNodesMax(); pnid++)
     {
         CNode *node= Nodes->GetNode( pnid );
         if (!node) continue;
         if (node->GetState() != State_Up) continue;
         
-        seqNum = nodestate[pnid].seq_num;
+        if ( pnid == MyPNID )
+        {
+            seqNum = nodestate[pnid].seq_num = seqNum_;
+        }
+        else
+        {
+            seqNum = nodestate[pnid].seq_num;
+        }
 
         if (trace_settings & TRACE_SYNC)
         {
@@ -6183,7 +6149,7 @@
     
     if (trace_settings & TRACE_SYNC)
     {
-        if ( seqNum_ != seqNumBucket[mostCountsIndex] )
+        if ( lowSeqNum_ != highSeqNum_ )
         {
             trace_printf( "%s@%d Most common seq num=%lld (%d nodes), "
                           "%d buckets, low=%lld, high=%lld, local seq num (%lld) did not match.\n"
@@ -6197,8 +6163,8 @@
         }
     }
 
-    // Fail if my seqnum does not match majority
-    return seqNum_ == seqNumBucket[mostCountsIndex];
+    // Fail if any sequence number does not match
+    return( lowSeqNum_ == highSeqNum_ );
 }
 
 void CCluster::HandleDownNode( int pnid )
@@ -6353,13 +6319,9 @@
             {
                 if (!noComm)
                 {
-                    trace_printf( "%s@%d - Communication error from node %d:\n"
-                                  "                node_state=%d\n"
-                                  "                change_nid=%d\n"
-                                  "                seq_num=#%lld\n"
+                    trace_printf( "%s@%d - Communication error from node %d, "
+                                  " seq_num=#%lld\n"
                                 , method_name, __LINE__, index
-                                , recvBuf->nodeInfo.node_state
-                                , recvBuf->nodeInfo.change_nid
                                 , seqNum_ );
                 }
             }
@@ -6803,7 +6765,18 @@
         }
 
         // if we have already processed buffer, skip it
-        if (lastSeqNum_ == msgBuf->nodeInfo.seq_num) continue;
+        if (lastSeqNum_ >= msgBuf->nodeInfo.seq_num) continue;
+
+        if (trace_settings & TRACE_SYNC)
+        {
+            trace_printf("%s@%d - Processing buffer for node %d, swpRecCount_=%d, seq_num=%lld, "
+                         "lastSeqNum_=%lld, msg_count=%d, msg_offset=%d\n",
+                         method_name, __LINE__, i, swpRecCount_,
+                         msgBuf->nodeInfo.seq_num,
+                         lastSeqNum_,
+                         msgBuf->msgInfo.msg_count,
+                         msgBuf->msgInfo.msg_offset);
+        }
 
         // reset msg length to zero to initialize for PopMsg()
         msgBuf->msgInfo.msg_offset = 0;
@@ -6814,7 +6787,7 @@
             if ( deferredTmSync )
             {   // This node has sent a TmSync message.  Process it now.
                 if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-                    trace_printf("%s@%d - Handling deferred TmSync message for "
+                    trace_printf("%s@%d - Handling deferred TmSync messages for "
                                  "node %d\n", method_name, __LINE__, i);
 
                 struct internal_msg_def *msg;
@@ -6840,16 +6813,9 @@
         }
         else if ( !deferredTmSync )
         {
-            // temp trace
-            if (trace_settings & TRACE_SYNC)
-            {
-                trace_printf("%s@%d - For node %d, swpRecCount_=%d, "
-                             "seq_num=%lld,msg_count=%d, msg_offset=%d\n",
-                             method_name, __LINE__, i, swpRecCount_,
-                             msgBuf->nodeInfo.seq_num,
-                             msgBuf->msgInfo.msg_count,
-                             msgBuf->msgInfo.msg_offset);
-            }
+            if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                trace_printf("%s@%d - Handling messages for "
+                             "node %d\n", method_name, __LINE__, i);
             do
             {
                 // Get the next sync msg for the node
@@ -7103,15 +7069,20 @@
 reconnected:
 
     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-        trace_printf( "%s@%d - doing Allgather size=%d, swpRecCount_=%d, message count=%d "
-                      "message seq_num=%lld, seqNum_=%lld, lastSeqNum_=%lld\n"
+        trace_printf( "%s@%d - doing Allgather size=%d, swpRecCount_=%d, "
+                      "message count=%d, message seq_num=%lld, "
+                      "seqNum_=%lld, lastSeqNum_=%lld, lowSeqNum_=%lld, "
+                      "highSeqNum_=%lld, reconnectSeqNum_=%lld\n"
                     , method_name, __LINE__
                     , Nodes->GetSyncSize()
                     , swpRecCount_
                     , send_buffer->msgInfo.msg_count
                     , send_buffer->nodeInfo.seq_num
                     , seqNum_
-                    , lastSeqNum_);
+                    , lastSeqNum_
+                    , lowSeqNum_
+                    , highSeqNum_
+                    , reconnectSeqNum_);
 
     struct timespec ts_ag_begin;
     clock_gettime(CLOCK_REALTIME, &ts_ag_begin);
@@ -7191,24 +7162,36 @@
                           , recv_buffer
                           , status
                           , send_buffer->nodeInfo.change_nid);
-    }
 
-    if ( ProcessClusterData( recv_buffer, send_buffer, false ) )
-    {   // There is a TmSync message remaining to be handled
-        ProcessClusterData( recv_buffer, send_buffer, true );
-    }
-    else
-    {
         if ( lastAllgatherWithLastSyncBuffer )
         {
             seqNum_ = savedSeqNum;
             lastAllgatherWithLastSyncBuffer = false;
             send_buffer = Nodes->GetSyncBuffer();
+
+            if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                trace_printf( "%s@%d - Resetting lastAllgatherWithLastSyncBuffer=%d\n"
+                            , method_name, __LINE__
+                            , lastAllgatherWithLastSyncBuffer);
+
             goto reconnected;
         }
     
         if ( reconnectSeqNum_ != 0 )
         {
+
+            if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                trace_printf( "%s@%d - Allgather IO retry, swpRecCount_=%d, "
+                              "seqNum_=%lld, lastSeqNum_=%lld, lowSeqNum_=%lld, "
+                              "highSeqNum_=%lld, reconnectSeqNum_=%lld\n"
+                            , method_name, __LINE__
+                            , swpRecCount_
+                            , seqNum_
+                            , lastSeqNum_
+                            , lowSeqNum_
+                            , highSeqNum_
+                            , reconnectSeqNum_);
+
             // The Allgather() has executed a reconnect at reconnectSeqNum_.
             // The UpdateClusterState has set the lowSeqNum_and highSeqNum_
             // in the current IO exchange which will indicate whether there is
@@ -7224,6 +7207,12 @@
                 // Indicate to follow up the next exchange with current SyncBuffer
                 lastAllgatherWithLastSyncBuffer = true;
                 lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
+
+                if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                    trace_printf( "%s@%d - Setting lastAllgatherWithLastSyncBuffer=%d\n"
+                                , method_name, __LINE__
+                                , lastAllgatherWithLastSyncBuffer);
+
                 goto reconnected;
             }
             else if (seqNum_ < highSeqNum_)
@@ -7231,10 +7220,25 @@
                 // Redo exchange with the current SyncBuffer
                 send_buffer = Nodes->GetSyncBuffer();
                 lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
+
+                if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                    trace_printf( "%s@%d - lastAllgatherWithLastSyncBuffer=%d\n"
+                                , method_name, __LINE__
+                                , lastAllgatherWithLastSyncBuffer);
+
                 goto reconnected;
             }
+            lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
         }
+    }
+
+    if ( ProcessClusterData( recv_buffer, send_buffer, false ) )
+    {   // There is a TmSync message remaining to be handled
+        ProcessClusterData( recv_buffer, send_buffer, true );
+    }
     
+    if (swpRecCount_ == 1)
+    {
         // Save the sync buffer and corresponding sequence number we just processed
         // On reconnect we must resend the last buffer and the current buffer
         // to ensure dropped buffers are processed by all monitor processe in the
@@ -7246,22 +7250,21 @@
         if ( ++seqNum_ == 0) seqNum_ = 1;
     }
 
-    // ?? Need the following?  Possibly not since maybe all sync cycle
-    // dependent code was removed -- need to check.
     // Wake up any threads waiting on the completion of a sync cycle
     syncCycle_.wakeAll();
 
     if (doShutdown) result = checkIfDone( );
 
-    --swpRecCount_;
-
     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
         trace_printf( "%s@%d - node data exchange completed, swpRecCount_=%d, "
-                      "seqNum_=%lld, lastSeqNum_=%lld\n"
+                      "seqNum_=%lld, lastSeqNum_=%lld, reconnectSeqNum_=%lld\n"
                     , method_name, __LINE__
                     , swpRecCount_
                     , seqNum_
-                    , lastSeqNum_);
+                    , lastSeqNum_
+                    , reconnectSeqNum_);
+
+    --swpRecCount_;
 
     TRACE_EXIT;
 
@@ -7329,16 +7332,21 @@
 
 reconnected:
 
-    if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_TMSYNC))
-        trace_printf( "%s@%d - doing Allgather size=%d, swpRecCount_=%d, message count=%d "
-                      "message seq_num=%lld, seqNum_=%lld, lastSeqNum_=%lld\n"
+    if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+        trace_printf( "%s@%d - doing Allgather size=%d, swpRecCount_=%d, "
+                      "message count=%d, message seq_num=%lld, "
+                      "seqNum_=%lld, lastSeqNum_=%lld, lowSeqNum_=%lld, "
+                      "highSeqNum_=%lld, reconnectSeqNum_=%lld\n"
                     , method_name, __LINE__
                     , Nodes->GetSyncSize()
                     , swpRecCount_
                     , send_buffer->msgInfo.msg_count
                     , send_buffer->nodeInfo.seq_num
                     , seqNum_
-                    , lastSeqNum_);
+                    , lastSeqNum_
+                    , lowSeqNum_
+                    , highSeqNum_
+                    , reconnectSeqNum_);
 
     struct timespec ts_ag_begin;
     clock_gettime(CLOCK_REALTIME, &ts_ag_begin);
@@ -7418,6 +7426,73 @@
                           , recv_buffer
                           , status
                           , send_buffer->nodeInfo.change_nid);
+
+        if ( lastAllgatherWithLastSyncBuffer )
+        {
+            seqNum_ = savedSeqNum;
+            lastAllgatherWithLastSyncBuffer = false;
+            send_buffer = Nodes->GetSyncBuffer();
+
+            if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                trace_printf( "%s@%d - Resetting lastAllgatherWithLastSyncBuffer=%d\n"
+                            , method_name, __LINE__
+                            , lastAllgatherWithLastSyncBuffer);
+
+            goto reconnected;
+        }
+
+        if ( reconnectSeqNum_ != 0 )
+        {
+            if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                trace_printf( "%s@%d - Allgather IO retry, swpRecCount_=%d, "
+                              "seqNum_=%lld, lastSeqNum_=%lld, lowSeqNum_=%lld, "
+                              "highSeqNum_=%lld, reconnectSeqNum_=%lld\n"
+                            , method_name, __LINE__
+                            , swpRecCount_
+                            , seqNum_
+                            , lastSeqNum_
+                            , lowSeqNum_
+                            , highSeqNum_
+                            , reconnectSeqNum_);
+
+            // The Allgather() has executed a reconnect at reconnectSeqNum_.
+            // The UpdateClusterState has set the lowSeqNum_and highSeqNum_
+            // in the current IO exchange which will indicate whether there is
+            // a mismatch in IOs between monitor processes. If there is a mismatch,
+            // the lowSeqNum_and highSeqNum_ relative to our current seqNum_
+            // will determine how to redrive the exchange of node data.
+            if (seqNum_ > lowSeqNum_)
+            { // A remote monitor did not receive our last SyncBuffer
+                // Redo exchange with the previous SyncBuffer
+                send_buffer = Nodes->GetLastSyncBuffer();
+                savedSeqNum = seqNum_;
+                seqNum_ = lastSeqNum_;
+                // Indicate to follow up the next exchange with current SyncBuffer
+                lastAllgatherWithLastSyncBuffer = true;
+                lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
+
+                if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                    trace_printf( "%s@%d - Setting lastAllgatherWithLastSyncBuffer=%d\n"
+                                , method_name, __LINE__
+                                , lastAllgatherWithLastSyncBuffer);
+
+                goto reconnected;
+            }
+            else if (seqNum_ < highSeqNum_)
+            { // The local monitor did not receive the last remote SyncBuffer
+                // Redo exchange with the current SyncBuffer
+                send_buffer = Nodes->GetSyncBuffer();
+                lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
+
+                if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
+                    trace_printf( "%s@%d - lastAllgatherWithLastSyncBuffer=%d\n"
+                                , method_name, __LINE__
+                                , lastAllgatherWithLastSyncBuffer);
+
+                goto reconnected;
+            }
+            lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
+        }
     }
 
     if ( ProcessClusterData( recv_buffer, send_buffer, false ) )
@@ -7425,61 +7500,29 @@
         ProcessClusterData( recv_buffer, send_buffer, true );
     }
 
-    if ( lastAllgatherWithLastSyncBuffer )
+    if (swpRecCount_ == 1)
     {
-        seqNum_ = savedSeqNum;
-        lastAllgatherWithLastSyncBuffer = false;
-        send_buffer = Nodes->GetSyncBuffer();
-        goto reconnected;
+        // Save the sync buffer and corresponding sequence number we just processed
+        // On reconnect we must resend the last buffer and the current buffer
+        // to ensure dropped buffers are processed by all monitor processe in the
+        // correct order
+        Nodes->SaveMyLastSyncBuffer();
+        lastSeqNum_ = seqNum_;
+    
+        // Increment count of "Allgather" calls.  If wrap-around, start again at 1.
+        if ( ++seqNum_ == 0) seqNum_ = 1;
     }
 
-    if ( reconnectSeqNum_ != 0 )
-    {
-        // The Allgather() has executed a reconnect at reconnectSeqNum_.
-        // The UpdateClusterState has set the lowSeqNum_and highSeqNum_
-        // in the current IO exchange which will indicate whether there is
-        // a mismatch in IOs between monitor processes. If there is a mismatch,
-        // the lowSeqNum_and highSeqNum_ relative to our current seqNum_
-        // will determine how to redrive the exchange of node data.
-        if (seqNum_ > lowSeqNum_)
-        { // A remote monitor did not receive our last SyncBuffer
-            // Redo exchange with the previous SyncBuffer
-            send_buffer = Nodes->GetLastSyncBuffer();
-            savedSeqNum = seqNum_;
-            seqNum_ = lastSeqNum_;
-            // Indicate to follow up the next exchange with current SyncBuffer
-            lastAllgatherWithLastSyncBuffer = true;
-            lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
-            goto reconnected;
-        }
-        else if (seqNum_ < highSeqNum_)
-        { // The local monitor did not receive the last remote SyncBuffer
-            // Redo exchange with the current SyncBuffer
-            send_buffer = Nodes->GetSyncBuffer();
-            lowSeqNum_ = highSeqNum_ = reconnectSeqNum_ = 0;
-            goto reconnected;
-        }
-    }
-
-    // Save the sync buffer and corresponding sequence number we just processed
-    // On reconnect we must resend the last buffer and the current buffer
-    // to ensure dropped buffers are processed by all monitor processe in the
-    // correct order
-    Nodes->SaveMyLastSyncBuffer();
-    lastSeqNum_ = seqNum_;
-
-    // Increment count of "Allgather" calls.  If wrap-around, start again at 1.
-    if ( ++seqNum_ == 0) seqNum_ = 1;
-
-    --swpRecCount_;
-
     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
         trace_printf( "%s@%d - node data exchange completed, swpRecCount_=%d, "
-                      "seqNum_=%lld, lastSeqNum_=%lld\n"
+                      "seqNum_=%lld, lastSeqNum_=%lld, reconnectSeqNum_=%lld\n"
                     , method_name, __LINE__
                     , swpRecCount_
                     , seqNum_
-                    , lastSeqNum_);
+                    , lastSeqNum_
+                    , reconnectSeqNum_);
+
+    --swpRecCount_;
 
     TRACE_EXIT;
 }
diff --git a/core/sqf/monitor/linux/cluster.h b/core/sqf/monitor/linux/cluster.h
index e743341..58d3540 100644
--- a/core/sqf/monitor/linux/cluster.h
+++ b/core/sqf/monitor/linux/cluster.h
@@ -336,9 +336,9 @@
     int Allgather(int nbytes, void *sbuf, char *rbuf, int tag, MPI_Status *stats);
     int AllgatherIB(int nbytes, void *sbuf, char *rbuf, int tag, MPI_Status *stats);
     int AllgatherSock(int nbytes, void *sbuf, char *rbuf, int tag, MPI_Status *stats);
-    int AllgatherSockReconnect( MPI_Status *stats );
-    int AcceptSockPeer( CNode *node, int peer );
-    int ConnectSockPeer( CNode *node, int peer );
+    int AllgatherSockReconnect( MPI_Status *stats, bool reestablishConnections = false );
+    int AcceptSockPeer( CNode *node, int peer, bool reestablishConnections = false );
+    int ConnectSockPeer( CNode *node, int peer, bool reestablishConnections = false );
 
     void ValidateClusterState( cluster_state_def_t nodestate[],
                                bool haveDivergence );
diff --git a/core/sqf/monitor/linux/cmsh.cxx b/core/sqf/monitor/linux/cmsh.cxx
index c8c4975..709b293 100644
--- a/core/sqf/monitor/linux/cmsh.cxx
+++ b/core/sqf/monitor/linux/cmsh.cxx
@@ -91,6 +91,46 @@
 
 ///////////////////////////////////////////////////////////////////////////////
 //
+// Function/Method: CCmsh::PopulateNodeState
+//
+// Description: Executes the command string passed in the constructor and
+//              populates the internal node state list with the state of each node
+//              in the cluster. Clients can then inquire about state of each node.
+//              
+// Return:
+//        0 - success
+//       -1 - failure
+//
+///////////////////////////////////////////////////////////////////////////////
+int CCmsh::PopulateNodeState( const char *nodeName )
+{
+    const char method_name[] = "CCmsh::PopulateNodeState";
+    TRACE_ENTRY;
+
+    int rc;
+
+    // The caller should save and close stdin before calling this proc
+    // and restore it when done. This is to prevent ssh from consuming
+    // caller's stdin contents when executing the command. 
+    string commandArgs;
+    {
+        commandArgs = "-n ";
+        commandArgs += nodeName;
+    }
+    rc = ExecuteCommand( commandArgs.c_str(), nodeStateList_ );
+    if ( rc == -1 )
+    {
+        char la_buf[MON_STRING_BUF_SIZE];
+        sprintf(la_buf, "[%s] Error: While executing '%s' command\n", method_name, command_.data());
+        mon_log_write(MON_CMSH_GET_CLUSTER_STATE_1, SQ_LOG_ERR, la_buf);
+    }
+
+    TRACE_EXIT;
+    return( rc );
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
 // Function/Method: CCmsh::GetClusterState
 //
 // Description: Updates the state of the nodes in the physicalNodeMap passed in
@@ -128,31 +168,97 @@
             if (it != physicalNodeMap.end())
             {
                // TEST_POINT and Exclude List : to force state down on node name 
-               const char *downNodeName = getenv( TP001_NODE_DOWN );
-               const char *downNodeList = getenv( TRAF_EXCLUDE_LIST );
-	       string downNodeString = " ";
-	       if (downNodeList)
-	       {
-		 downNodeString += downNodeList;
-	         downNodeString += " ";
-	       }
-	       string downNodeToFind = " ";
-	       downNodeToFind += nodeName.c_str();
-	       downNodeToFind += " ";
-               if (((downNodeList != NULL) && 
-		      strstr(downNodeString.c_str(),downNodeToFind.c_str())) ||
-                   ( (downNodeName != NULL) && 
-                     !strcmp( downNodeName, nodeName.c_str()) ))
-              {
-                   nodeState = StateDown;
-              }
-	   
+                const char *downNodeName = getenv( TP001_NODE_DOWN );
+                const char *downNodeList = getenv( TRAF_EXCLUDE_LIST );
+                string downNodeString = " ";
+                if (downNodeList)
+                {
+                    downNodeString += downNodeList;
+                    downNodeString += " ";
+                }
+                string downNodeToFind = " ";
+                downNodeToFind += nodeName.c_str();
+                downNodeToFind += " ";
+                if (((downNodeList != NULL) && 
+                      strstr(downNodeString.c_str(),downNodeToFind.c_str())) ||
+                    ((downNodeName != NULL) && 
+                     !strcmp(downNodeName, nodeName.c_str())))
+                {
+                    nodeState = StateDown;
+                }
+          
                 // Set physical node state
                 physicalNode = it->second;
                 physicalNode->SetState( nodeState );
             }
         }
-    }	
+    }  
+
+    TRACE_EXIT;
+    return( rc );
+}
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// Function/Method: CCmsh::GetNodeState
+//
+// Description: Updates the state of the nodeName in the physicalNode passed in
+//              as a parameter. Caller should ensure that the node names are already
+//              present in the physicalNodeMap. 
+//
+// Return:
+//        0 - success
+//       -1 - failure
+//
+///////////////////////////////////////////////////////////////////////////////
+int CCmsh::GetNodeState( char *name ,CPhysicalNode  *physicalNode )
+{
+    const char method_name[] = "CCmsh::GetNodeState";
+    TRACE_ENTRY;
+
+    int rc;
+
+    rc = PopulateNodeState( name );
+
+    if ( rc != -1 )
+    {
+        // Parse each line extracting name and state
+        string nodeName;
+        NodeState_t nodeState;
+        PhysicalNodeNameMap_t::iterator it;
+        
+        StringList_t::iterator    alit;
+        for ( alit = nodeStateList_.begin(); alit != nodeStateList_.end() ; alit++ )
+        {
+            ParseNodeStatus( *alit, nodeName, nodeState );
+
+            // TEST_POINT and Exclude List : to force state down on node name 
+            const char *downNodeName = getenv( TP001_NODE_DOWN );
+            const char *downNodeList = getenv( TRAF_EXCLUDE_LIST );
+            string downNodeString = " ";
+            if (downNodeList)
+            {
+                downNodeString += downNodeList;
+                downNodeString += " ";
+            }
+            string downNodeToFind = " ";
+            downNodeToFind += nodeName.c_str();
+            downNodeToFind += " ";
+            if (((downNodeList != NULL) && 
+                  strstr(downNodeString.c_str(),downNodeToFind.c_str())) ||
+                ((downNodeName != NULL) && 
+                 !strcmp(downNodeName, nodeName.c_str())))
+            {
+                nodeState = StateDown;
+            }
+
+            if (!strcmp(name, nodeName.c_str()))
+            {
+                // Set physical node state
+                physicalNode->SetState( nodeState );
+            }
+        }
+    }  
 
     TRACE_EXIT;
     return( rc );
diff --git a/core/sqf/monitor/linux/cmsh.h b/core/sqf/monitor/linux/cmsh.h
index dce2e79..f1226bd 100644
--- a/core/sqf/monitor/linux/cmsh.h
+++ b/core/sqf/monitor/linux/cmsh.h
@@ -44,6 +44,7 @@
 
     int  PopulateClusterState( void );
     int  GetClusterState( PhysicalNodeNameMap_t &physicalNodeMap );
+    int  GetNodeState( char *name ,CPhysicalNode  *physicalNode );
     bool  IsInitialized( void ); 
     void ClearClusterState( void ) { nodeStateList_.clear(); }
     NodeState_t GetNodeState( char nodeName[] );
@@ -52,6 +53,7 @@
     NodeStateList_t    nodeStateList_;
     
     void ParseNodeStatus( string &nodeStatus, string &nodeName, NodeState_t &state );
+    int  PopulateNodeState( const char *nodeName );
 };
 
 #endif /*CMSH_H_*/
diff --git a/core/sqf/monitor/linux/commaccept.cxx b/core/sqf/monitor/linux/commaccept.cxx
index a9045af..21b30a6 100644
--- a/core/sqf/monitor/linux/commaccept.cxx
+++ b/core/sqf/monitor/linux/commaccept.cxx
@@ -942,7 +942,7 @@
         {
             char buf[MON_STRING_BUF_SIZE];
             MPI_Error_class( rc, &errClass );
-            snprintf(buf, sizeof(buf), "[%s], cannot accept new monitor: %s.\n",
+            snprintf(buf, sizeof(buf), "[%s], cannot accept remote monitor: %s.\n",
                      method_name, ErrorMsg(rc));
             mon_log_write(MON_COMMACCEPT_15, SQ_LOG_ERR, buf);
 
@@ -1101,13 +1101,44 @@
     TRACE_EXIT;
 }
 
-void CCommAccept::setAccepting( bool accepting ) 
+void CCommAccept::startAccepting( void ) 
 {
+    const char method_name[] = "CCommAccept::startAccepting";
+    TRACE_ENTRY;
+
     CAutoLock lock( getLocker( ) );
-    accepting_ = accepting;
+    
+    if ( !accepting_ )
+    {
+        accepting_ = true;
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d - Enabling accepting_=%d\n"
+                        , method_name, __LINE__, accepting_ );
+        }
+        CLock::wakeOne();
+    }
+
+    TRACE_EXIT;
+}
+
+void CCommAccept::stopAccepting( void ) 
+{
+    const char method_name[] = "CCommAccept::stopAccepting";
+    TRACE_ENTRY;
+
+    CAutoLock lock( getLocker( ) );
     
     if ( accepting_ )
     {
+        accepting_ = false;
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d - Disabling accepting_=%d\n"
+                        , method_name, __LINE__, accepting_ );
+        }
         CLock::wakeOne();
     }
+
+    TRACE_EXIT;
 }
diff --git a/core/sqf/monitor/linux/commaccept.h b/core/sqf/monitor/linux/commaccept.h
index ac85efb..c32d975 100644
--- a/core/sqf/monitor/linux/commaccept.h
+++ b/core/sqf/monitor/linux/commaccept.h
@@ -41,7 +41,8 @@
     bool isAccepting( void ) { CAutoLock lock(getLocker()); return( accepting_ ); }
     void processNewComm( MPI_Comm interComm );
     void processNewSock( int sockFd );
-    void setAccepting( bool accepting );
+    void startAccepting( void );
+    void stopAccepting( void );
     void start( void );
     void shutdownWork( void );
 
diff --git a/core/sqf/monitor/linux/internal.h b/core/sqf/monitor/linux/internal.h
index f69f424..b0f118c 100644
--- a/core/sqf/monitor/linux/internal.h
+++ b/core/sqf/monitor/linux/internal.h
@@ -442,7 +442,7 @@
     int  creatorPNid;
     int  creatorShellPid;
     Verifier_t creatorShellVerifier;
-    bool creator;  // NEW monitor set to true to tell creator it is the CREATOR
+    bool creator;  // NEW monitor sets to true to tell creator it is the CREATOR
     bool ping;     // Monitor sets to true to tell remote monitor
                    // it is just checking that it can communicate with it.
                    // Used during allgather reconnect 
diff --git a/core/sqf/monitor/linux/macros.gmk b/core/sqf/monitor/linux/macros.gmk
index e5324f1..5bb24ec 100644
--- a/core/sqf/monitor/linux/macros.gmk
+++ b/core/sqf/monitor/linux/macros.gmk
@@ -53,8 +53,8 @@
 MPI_CC		:= $(CC)
 MPI_CXX		:= $(CXX)
 
-CC		= mpicc
-CXX		= $(MPICH_ROOT)/bin/mpicxx
+CC		= mpicc $(ARCH_SPECIFIC_OPTION)
+CXX		= $(MPICH_ROOT)/bin/mpicxx $(ARCH_SPECIFIC_OPTION)
 
 ifeq ($(SQ_MTYPE),32)
    CC		+= -mpi32
diff --git a/core/sqf/monitor/linux/montest_run.virtual b/core/sqf/monitor/linux/montest_run.virtual
index 424d38d..e6a7463 100755
--- a/core/sqf/monitor/linux/montest_run.virtual
+++ b/core/sqf/monitor/linux/montest_run.virtual
@@ -23,24 +23,26 @@
 
 #!/bin/sh
 
+ARCH=`arch`
 # Cleanup 
-cd $TRAF_HOME/monitor/linux/Linux-x86_64/dbg
+cd ${TRAF_HOME}/monitor/linux/Linux-${ARCH}/dbg
 echo $PWD
 rm -f core* *.log *.lst test*sub*
 rm -f $MPI_TMPDIR/monitor.port.*
+exit 0
 
 # Setup monitor test files
 cd $TRAF_HOME/monitor/linux
 echo $PWD
 echo Copying monitor test files to execution directory
-echo cp -p ./test*sub* ./Linux-x86_64/dbg
-cp -p ./test*sub* ./Linux-x86_64/dbg
+echo cp -p ./test*sub* ./Linux-${ARCH}/dbg
+cp -p ./test*sub* ./Linux-${ARCH}/dbg
 
 # Establish SQ virtual cluster parameters
 export SQ_VIRTUAL_NODES=6
 export SQ_VIRTUAL_NID=0
 
-cd $TRAF_HOME/monitor/linux/Linux-x86_64/dbg
+cd ${TRAF_HOME}/monitor/linux/Linux-${ARCH}/dbg
 echo $PWD
 
 shell <<eof 
diff --git a/core/sqf/monitor/linux/pnode.cxx b/core/sqf/monitor/linux/pnode.cxx
index 57ce0a6..00f1271 100644
--- a/core/sqf/monitor/linux/pnode.cxx
+++ b/core/sqf/monitor/linux/pnode.cxx
@@ -1133,10 +1133,10 @@
     }
 
     //Displays the startup and keep alive timer values in use for a given run.
-    if (trace_settings & TRACE_INIT)
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
        trace_printf("%s@%d" " - KeepAlive Timer in seconds =%d\n", method_name, __LINE__, (wdtKeepAliveTimerValue_));
 
-    if (trace_settings & TRACE_INIT)
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
        trace_printf("%s@%d" " - Creating Watchdog Process\n", method_name, __LINE__);
 
     strcpy(path,getenv("PATH"));
@@ -1959,7 +1959,7 @@
 
             ++count;
 
-            if (trace_settings & ( TRACE_INIT || TRACE_RECOVERY || TRACE_REQUEST_DETAIL) )
+            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
                 trace_printf("%s@%d - Packing node mapping, pnidConfig=%d, pnid=%d \n",
                             method_name, __LINE__, pnidConfig, pnid);
         }
@@ -1982,7 +1982,7 @@
         pnidConfig = *buffer++;
         pnid = *buffer++;
 
-        if (trace_settings & ( TRACE_INIT || TRACE_RECOVERY || TRACE_REQUEST_DETAIL) )
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
             trace_printf("%s@%d - Unpacking node mapping, pnidConfig=%d, pnid=%d \n",
                         method_name, __LINE__, pnidConfig, pnid);
 
diff --git a/core/sqf/monitor/linux/pnodeconfig.cxx b/core/sqf/monitor/linux/pnodeconfig.cxx
index 4d5ee99..03f52c9 100644
--- a/core/sqf/monitor/linux/pnodeconfig.cxx
+++ b/core/sqf/monitor/linux/pnodeconfig.cxx
@@ -569,9 +569,10 @@
     if ( !p_str1 ) return 1;
     if ( !p_str2 ) return 1;
 
+    // Compare the string passed in
     int lv_ret = strcmp( p_str1, p_str2 );
     if ( lv_ret == 0 )
-    {
+    { // Got a match!
         return lv_ret;
     }
     if ( sb_strict_hostname_check )
@@ -586,23 +587,45 @@
 
     char *lp_str1_dot = strchr( (char *) p_str1, '.' );
     if ( lp_str1_dot )
-    {
+    { // Found '.', copy up to one char before '.'
         memcpy( lv_str1_to_cmp, p_str1, lp_str1_dot - p_str1 );
     }
     else
-    {
+    { // Copy entire string
         strcpy( lv_str1_to_cmp, p_str1 );
     }
 
     char *lp_str2_dot = strchr( (char *) p_str2, '.' );
     if ( lp_str2_dot )
-    {
+    { // Found '.', copy up to one char before '.'
         memcpy( lv_str2_to_cmp, p_str2, lp_str2_dot - p_str2 );
     }
     else
-    {
+    { // Copy entire string
         strcpy( lv_str2_to_cmp, p_str2 );
     }
 
+    // Ignore case
+    NormalizeCase( lv_str1_to_cmp );
+    NormalizeCase( lv_str2_to_cmp );
     return strcmp( lv_str1_to_cmp, lv_str2_to_cmp );
 }
+
+char *CPNodeConfigContainer::NormalizeCase( char *token )
+{
+    char *ptr = token;
+
+    const char method_name[] = "CPNodeConfigContainer::NormalizeCase";
+    TRACE_ENTRY;
+
+    while ( *ptr )
+    {
+        *ptr = tolower( *ptr );
+        if ( *ptr == '\n' ) *ptr = '\0';
+        ptr++;
+    }
+
+    TRACE_EXIT;
+    return token;
+}
+
diff --git a/core/sqf/monitor/linux/pnodeconfig.h b/core/sqf/monitor/linux/pnodeconfig.h
index 1916797..5deccc8 100644
--- a/core/sqf/monitor/linux/pnodeconfig.h
+++ b/core/sqf/monitor/linux/pnodeconfig.h
@@ -74,6 +74,8 @@
     int             nextPNid_;    // next physical node id available
 
 private:
+    static char  *NormalizeCase( char *token );
+
     int             pnodesConfigMax_; // maximum number of physical nodes
     PNodesConfigList_t  spareNodesConfigList_; // configured spare nodes list
     CPNodeConfig  *head_; // head of physical nodes linked list
diff --git a/core/sqf/monitor/linux/process.cxx b/core/sqf/monitor/linux/process.cxx
index 8a35c4d..6a8e08b 100644
--- a/core/sqf/monitor/linux/process.cxx
+++ b/core/sqf/monitor/linux/process.cxx
@@ -327,13 +327,19 @@
     delete [] userArgv_;
 
     if (fd_stdin_ != -1 && !Clone)
-        Redirector.tryShutdownPipeFd(Pid, fd_stdin_);
+    {
+        Redirector.tryShutdownPipeFd(Pid, fd_stdin_, false);
+    }
 
     if (fd_stdout_ != -1)
-        Redirector.tryShutdownPipeFd(Pid, fd_stdout_);
+    {
+        Redirector.tryShutdownPipeFd(Pid, fd_stdout_, true);
+    }
 
     if (fd_stderr_ != -1)
-        Redirector.tryShutdownPipeFd(Pid, fd_stderr_);
+    {
+        Redirector.tryShutdownPipeFd(Pid, fd_stderr_, false);
+    }
 
     // Remove the fifos associated with this process (if any)
     if (fifo_stdin_.size() != 0)
@@ -911,9 +917,10 @@
         {
             ancestor = node->GetProcessL(nextPid);
             if ( ancestor  &&  
-                 (ancestor->CreationTime.tv_sec  < earlyCreationTime.tv_sec ||
-                 (ancestor->CreationTime.tv_sec == earlyCreationTime.tv_sec  &&
-                  ancestor->CreationTime.tv_nsec < earlyCreationTime.tv_nsec)) )
+                 (( ! MyNode->IsMyNode(ancestor->GetNid())) ||
+                  (ancestor->CreationTime.tv_sec  < earlyCreationTime.tv_sec ||
+                   (ancestor->CreationTime.tv_sec == earlyCreationTime.tv_sec  &&
+                    ancestor->CreationTime.tv_nsec < earlyCreationTime.tv_nsec))) )
             {
                 earlyCreationTime.tv_sec  = ancestor->CreationTime.tv_sec;
                 earlyCreationTime.tv_nsec = ancestor->CreationTime.tv_nsec;
@@ -3853,71 +3860,91 @@
             if ( ! MyNode->IsSpareNode() )
             {
                 int nid = MyNode->AssignNid();
-                strId_t progStrId = MyNode->GetStringId( msg->u.request.u.startup.program );
-                strId_t nullStrId = { -1, -1 };
-                process =
-                    new CProcess (NULL, nid, msg->u.request.u.startup.os_pid, ProcessType_Generic, 0, 0, false, true, (char *) "", 
-                                  nullStrId, nullStrId, progStrId, (char *) "", (char *) "");
-                if (process == NULL)
+                if ( (nid == -1) && (MyNode->GetState() != State_Up) )
                 {
-                    //TODO: Log event
-                    abort();
-                }
-                if ( process )
-                {
-                    char user_argv[MAX_ARGS][MAX_ARG_SIZE];
-                    process->userArgs ( 0, user_argv );
-                }
-                if ( msg->u.request.u.startup.process_name[0] == '\0')
-                {   // Create a name for the process and place it in the
-                    // Name member of the process object);
-                    char pname[MAX_KEY_NAME];
-                    MyNode->BuildOurName(nid, process->GetPid(), pname );
-                    process->SetName( pname );
+                    snprintf( la_buf, sizeof(la_buf),
+                            "[%s], Can't attach the pid %d (program: %s) - the monitor is not up yet (curr state: %d).\n",
+                            method_name,
+                            msg->u.request.u.startup.os_pid,
+                            msg->u.request.u.startup.program,
+                            MyNode->GetState() );
+                    mon_log_write( MON_PROCESSCONT_ATTACHPCHECK_4, SQ_LOG_ERR, la_buf );
+
+                    msg->u.reply.type = ReplyType_Generic;
+                    msg->u.reply.u.generic.nid = -1;
+                    msg->u.reply.u.generic.pid = -1;
+                    msg->u.reply.u.generic.verifier = -1;
+                    msg->u.reply.u.generic.process_name[0] = '\0';
+                    msg->u.reply.u.generic.return_code = MPI_ERR_NAME;
                 }
                 else
                 {
-                    process->SetName ( 
-                    MyNode->NormalizeName(msg->u.request.u.startup.process_name) );
+                    strId_t progStrId = MyNode->GetStringId( msg->u.request.u.startup.program );
+                    strId_t nullStrId = { -1, -1 };
+                    process =
+                        new CProcess( NULL, nid, msg->u.request.u.startup.os_pid, ProcessType_Generic, 0, 0, false, true, (char *) "", 
+                        nullStrId, nullStrId, progStrId, (char *) "", (char *) "" );
+                    if ( process == NULL )
+                    {
+                        //TODO: Log event
+                        abort();
+                    }
+                    if ( process )
+                    {
+                        char user_argv[MAX_ARGS][MAX_ARG_SIZE];
+                        process->userArgs( 0, user_argv );
+                    }
+                    if ( msg->u.request.u.startup.process_name[0] == '\0' )
+                    {   // Create a name for the process and place it in the
+                        // Name member of the process object);
+                        char pname[MAX_KEY_NAME];
+                        MyNode->BuildOurName( nid, process->GetPid( ), pname );
+                        process->SetName( pname );
+                    }
+                    else
+                    {
+                        process->SetName( 
+                            MyNode->NormalizeName( msg->u.request.u.startup.process_name ) );
+                    }
+                    process->SetAttached( true );
+                    process->SetupFifo( process->GetNid( ), msg->u.request.u.startup.os_pid );
+                    process->SetCreationTime( msg->u.request.u.startup.os_pid );
+                    process->SetVerifier( );
+                    AddToList( process );
+                    process->CompleteProcessStartup( msg->u.request.u.startup.port_name,
+                                                     msg->u.request.u.startup.os_pid,
+                                                     msg->u.request.u.startup.event_messages,
+                                                     msg->u.request.u.startup.system_messages,
+                                                     false,
+                                                     NULL );
+
+                    msg->u.reply.type = ReplyType_Startup;
+                    msg->u.reply.u.startup_info.nid = process->GetNid( );
+                    msg->u.reply.u.startup_info.pid = process->GetPid( );
+                    msg->u.reply.u.startup_info.verifier = process->GetVerifier( );
+                    strcpy( msg->u.reply.u.startup_info.process_name, process->GetName( ) );
+                    msg->u.reply.u.startup_info.return_code = MPI_SUCCESS;
+                    STRCPY( msg->u.reply.u.startup_info.fifo_stdin,
+                            process->fifo_stdin() );
+                    STRCPY( msg->u.reply.u.startup_info.fifo_stdout,
+                            process->fifo_stdout() );
+                    STRCPY( msg->u.reply.u.startup_info.fifo_stderr,
+                            process->fifo_stderr() );
+
+                    Monitor->writeProcessMapBegin( process->GetName( )
+                                                 , process->GetNid( )
+                                                 , process->GetPid( )
+                                                 , process->GetVerifier( )
+                                                 , -1, -1, -1
+                                                 , msg->u.request.u.startup.program );
                 }
-                process->SetAttached ( true );
-                process->SetupFifo(process->GetNid(), msg->u.request.u.startup.os_pid);
-                process->SetCreationTime(msg->u.request.u.startup.os_pid);
-                process->SetVerifier();
-                AddToList( process );
-                process->CompleteProcessStartup ( msg->u.request.u.startup.port_name,
-                                                  msg->u.request.u.startup.os_pid,
-                                                  msg->u.request.u.startup.event_messages,
-                                                  msg->u.request.u.startup.system_messages,
-                                                  false,
-                                                  NULL );
-
-                msg->u.reply.type = ReplyType_Startup;
-                msg->u.reply.u.startup_info.nid = process->GetNid();
-                msg->u.reply.u.startup_info.pid = process->GetPid();
-                msg->u.reply.u.startup_info.verifier = process->GetVerifier();
-                strcpy (msg->u.reply.u.startup_info.process_name, process->GetName());
-                msg->u.reply.u.startup_info.return_code = MPI_SUCCESS;
-                STRCPY(msg->u.reply.u.startup_info.fifo_stdin,
-                       process->fifo_stdin());
-                STRCPY(msg->u.reply.u.startup_info.fifo_stdout,
-                       process->fifo_stdout());
-                STRCPY(msg->u.reply.u.startup_info.fifo_stderr,
-                       process->fifo_stderr());
-
-                Monitor->writeProcessMapBegin( process->GetName()
-                                             , process->GetNid()
-                                             , process->GetPid()
-                                             , process->GetVerifier()
-                                             , -1, -1, -1
-                                             , msg->u.request.u.startup.program );
             }
             else
             {
-                snprintf(la_buf, sizeof(la_buf),
-                         "[%s], Can't attach, node is a spare node!\n",
-                         method_name);
-                mon_log_write(MON_PROCESSCONT_ATTACHPCHECK_3, SQ_LOG_ERR, la_buf);
+                snprintf( la_buf, sizeof(la_buf),
+                        "[%s], Can't attach, node is a spare node!\n",
+                        method_name );
+                mon_log_write( MON_PROCESSCONT_ATTACHPCHECK_3, SQ_LOG_ERR, la_buf );
 
                 msg->u.reply.type = ReplyType_Startup;
                 msg->u.reply.u.startup_info.nid = -1;
@@ -3930,10 +3957,10 @@
         else
         {
             // Find the duplicate process
-            snprintf(la_buf, sizeof(la_buf),
+            snprintf( la_buf, sizeof(la_buf),
                      "[%s], Can't attach duplicate process %s!\n",
-                     method_name, msg->u.request.u.startup.process_name);
-            mon_log_write(MON_PROCESSCONT_ATTACHPCHECK_4, SQ_LOG_ERR, la_buf);
+                     method_name, msg->u.request.u.startup.process_name );
+            mon_log_write( MON_PROCESSCONT_ATTACHPCHECK_4, SQ_LOG_ERR, la_buf );
 
             msg->u.reply.type = ReplyType_Generic;
             msg->u.reply.u.generic.nid = -1;
@@ -3941,7 +3968,7 @@
             msg->u.reply.u.generic.verifier = -1;
             msg->u.reply.u.generic.process_name[0] = '\0';
             msg->u.reply.u.generic.return_code = MPI_ERR_NAME;
-        } 
+        }
     }  
     // complete a monitor child process startup
     else
diff --git a/core/sqf/monitor/linux/redirector.cxx b/core/sqf/monitor/linux/redirector.cxx
index b3780cc..de27211 100644
--- a/core/sqf/monitor/linux/redirector.cxx
+++ b/core/sqf/monitor/linux/redirector.cxx
@@ -1288,7 +1288,7 @@
                 buf[size-1] = '\n';
             }
         }
-        mon_log_write(MON_REDIR_STDERR, SQ_LOG_INFO, buf);
+        mon_log_write(MON_REDIR_STDERR, SQ_LOG_DEBUG, buf);
 
         delete [] buf;
     }
@@ -1764,7 +1764,7 @@
     TRACE_EXIT;
 }
 
-void CRedirector::tryShutdownPipeFd(int pid, int fd)
+void CRedirector::tryShutdownPipeFd(int pid, int fd, bool pv_delete_redirect)
 {
     const char method_name[] = "CRedirector::tryShutdownPipeFd";
     TRACE_ENTRY;
@@ -1784,9 +1784,12 @@
         redirect = iter->second;
 
         // bugcatcher, temp call
-        redirect->validateObj();
+        if (redirect->pid() != 0)
+            redirect->validateObj();
 
-        if (!redirect->active() && (pid == redirect->pid()))
+        if (((pv_delete_redirect) ||
+             (!redirect->active())) &&
+            (pid == redirect->pid()))
         {
             if (trace_settings & TRACE_REDIRECTION)
                 trace_printf("%s@%d invoking shutdownPipeFd for fd=%d\n",
diff --git a/core/sqf/monitor/linux/redirector.h b/core/sqf/monitor/linux/redirector.h
index 1babca6..2bea30a 100644
--- a/core/sqf/monitor/linux/redirector.h
+++ b/core/sqf/monitor/linux/redirector.h
@@ -254,7 +254,7 @@
     void stdinOff(int fd);
     void stdinOn(int fd);
 
-    void tryShutdownPipeFd(int pid, int fd);
+    void tryShutdownPipeFd(int pid, int fd, bool pv_delete_redirect);
 
     void disposeIoData(int fd, int count, char *buffer);
 
diff --git a/core/sqf/monitor/linux/reqexit.cxx b/core/sqf/monitor/linux/reqexit.cxx
index 4cfdec5..8a487e8 100644
--- a/core/sqf/monitor/linux/reqexit.cxx
+++ b/core/sqf/monitor/linux/reqexit.cxx
@@ -96,6 +96,8 @@
 void CExtExitReq::performRequest()
 {
     bool status = FAILURE;
+    int target_nid = -1;
+    CLNode *target_lnode = NULL;
 
     const char method_name[] = "CExtExitReq::performRequest";
     TRACE_ENTRY;
@@ -115,8 +117,9 @@
                     , msg_->u.request.u.exit.verifier );
     }
 
-    if ((msg_->u.request.u.exit.nid < 0) ||
-        (msg_->u.request.u.exit.nid >= Nodes->GetLNodesConfigMax()))
+    target_nid = msg_->u.request.u.exit.nid;
+    target_lnode = Nodes->GetLNode( target_nid );
+    if ( target_lnode == NULL )
     {
         char buf[MON_STRING_BUF_SIZE];
         sprintf(buf, "[CMonitor::ExitProcess], Invalid Node ID!\n");
diff --git a/core/sqf/monitor/linux/reqnewproc.cxx b/core/sqf/monitor/linux/reqnewproc.cxx
index 7cd35ea..afe1f38 100644
--- a/core/sqf/monitor/linux/reqnewproc.cxx
+++ b/core/sqf/monitor/linux/reqnewproc.cxx
@@ -86,6 +86,7 @@
     CProcess *process = NULL;
     CNode *node = NULL;
     CLNode *lnode = NULL;
+    CLNode *target_lnode = NULL;
     CLNode *zone_lnode = NULL;
     char la_buf[MON_STRING_BUF_SIZE];
     int result;
@@ -116,10 +117,10 @@
     if ( requester )
     {
         target_nid = msg_->u.request.u.new_process.nid;
+        target_lnode = Nodes->GetLNode( target_nid );
         if ( msg_->u.request.u.new_process.type == ProcessType_SSMP ) 
         {
-            if (( msg_->u.request.u.new_process.nid < 0  ||
-                  msg_->u.request.u.new_process.nid >= Nodes->GetLNodesConfigMax() )   )
+            if ( target_lnode == NULL )
             {
                 // Nid must be specified
                 msg_->u.reply.type = ReplyType_NewProcess;
@@ -150,8 +151,7 @@
         }
         if ( msg_->u.request.u.new_process.type == ProcessType_DTM )
         {
-            if (( msg_->u.request.u.new_process.nid < 0  ||
-                  msg_->u.request.u.new_process.nid >= Nodes->GetLNodesConfigMax() )   )
+            if ( target_lnode == NULL )
             {
                 // Nid must be specified
                 msg_->u.reply.type = ReplyType_NewProcess;
@@ -189,8 +189,7 @@
         }
         if ( msg_->u.request.u.new_process.type == ProcessType_SPX ) 
         {
-            if (( msg_->u.request.u.new_process.nid < 0  ||
-                  msg_->u.request.u.new_process.nid >= Nodes->GetLNodesConfigMax() )   )
+            if ( target_lnode == NULL )
             {
                 // Nid must be specified
                 msg_->u.reply.type = ReplyType_NewProcess;
@@ -350,9 +349,7 @@
                 }
             }
         }
-        else if (( msg_->u.request.u.new_process.type == ProcessType_DTM         ) &&
-                 (( msg_->u.request.u.new_process.nid < 0                    ) ||
-                  ( msg_->u.request.u.new_process.nid >= Nodes->GetLNodesConfigMax() )   )   )
+        else if ( target_lnode == NULL )
         {
             msg_->u.reply.type = ReplyType_NewProcess;
             msg_->u.reply.u.new_process.return_code = MPI_ERR_SPAWN;
@@ -365,21 +362,6 @@
     
             return;
         }
-        else if (( msg_->u.request.u.new_process.type != ProcessType_DTM         ) &&
-                 (( msg_->u.request.u.new_process.nid < 0                    ) ||
-                  ( msg_->u.request.u.new_process.nid >= Nodes->GetLNodesConfigMax() )   )   )
-        {
-            msg_->u.reply.type = ReplyType_NewProcess;
-            msg_->u.reply.u.new_process.return_code = MPI_ERR_SPAWN;
-            // Send reply to requester
-            lioreply(msg_, pid_);
-
-            sprintf(la_buf, "[%s], Invalid Node ID (%d).\n", method_name,
-                    target_nid);
-            mon_log_write(MON_MONITOR_STARTPROCESS_7, SQ_LOG_ERR, la_buf);
-
-            return;
-        }
         else
         {
             if( msg_->u.request.u.new_process.backup )
diff --git a/core/sqf/monitor/linux/reqopen.cxx b/core/sqf/monitor/linux/reqopen.cxx
index 56b13b2..494a0e3 100644
--- a/core/sqf/monitor/linux/reqopen.cxx
+++ b/core/sqf/monitor/linux/reqopen.cxx
@@ -189,13 +189,17 @@
     const char method_name[] = "CExtOpenReq::prepare";
     TRACE_ENTRY;
 
+    int target_nid = -1;
+    CLNode *target_lnode = NULL;
+
     if ( prepared_ == true )
     {   // Already did the prepare work earlier.
         return true;
     }
 
-    if ((msg_->u.request.u.open.nid < 0) ||
-        (msg_->u.request.u.open.nid >= Nodes->GetLNodesConfigMax()))
+    target_nid = msg_->u.request.u.open.nid;
+    target_lnode = Nodes->GetLNode( target_nid );
+    if ( target_lnode == NULL )
     {
         char buf[MON_STRING_BUF_SIZE];
         sprintf(buf, "%s, Invalid Node ID (%d)\n", method_name,
diff --git a/core/sqf/monitor/linux/reqqueue.cxx b/core/sqf/monitor/linux/reqqueue.cxx
index 764966f..becb0cd 100644
--- a/core/sqf/monitor/linux/reqqueue.cxx
+++ b/core/sqf/monitor/linux/reqqueue.cxx
@@ -1971,6 +1971,11 @@
 {
     // Add eyecatcher sequence as a debugging aid
     memcpy(&eyecatcher_, "RQIP", 4);
+
+    if ( pnid == MyPNID )
+    {
+        SetReviveFlag(1); // allow this request to be processed during revive
+    }
 }
 
 CIntDownReq::~CIntDownReq()
diff --git a/core/sqf/monitor/linux/shell.cxx b/core/sqf/monitor/linux/shell.cxx
index 7bbc6e3..5037dd3 100644
--- a/core/sqf/monitor/linux/shell.cxx
+++ b/core/sqf/monitor/linux/shell.cxx
@@ -44,6 +44,7 @@
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/wait.h>
+#include <string> 
 
 #include "msgdef.h"
 #include "props.h"
@@ -541,6 +542,102 @@
     return( true );
 }
 
+bool update_node_state( char *nodeName, bool checkSpareColdStandby = true )
+{
+    if ( strlen(nodeName) == 0 )
+    {
+        return( false );
+    }
+
+    int rc, rc2;
+    char pnodename[MPI_MAX_PROCESSOR_NAME];
+    CPhysicalNode  *physicalNode;
+    PhysicalNodeNameMap_t::iterator it;
+    CCmsh cmshcmd( "sqnodestatus" );
+
+    strncpy(pnodename, nodeName, MPI_MAX_PROCESSOR_NAME);
+    pnodename[MPI_MAX_PROCESSOR_NAME-1] = '\0';
+
+    // Look up name
+    it = PhysicalNodeMap.find( pnodename );
+
+    if (it != PhysicalNodeMap.end())
+    {
+        physicalNode = it->second;
+    }
+    else
+    {
+        printf( "[%s] Error: Internal error while looking up physical node map, node name does not exist, node name=%s\n", MyName, pnodename );
+        return( false );
+    }
+
+    // save, close and restore stdin when executing ssh command 
+    // because ssh, by design, would consume contents of stdin.
+    int savedStdIn = dup(STDIN_FILENO);
+    if ( savedStdIn == -1 )
+    {
+        fprintf(stderr, "[%s] Error: dup() failed for STDIN_FILENO: %s (%d)\n", MyName, strerror(errno), errno );
+        exit(1);
+    }
+    close(STDIN_FILENO);
+
+    rc = cmshcmd.GetNodeState( nodeName, physicalNode );
+    rc2 = dup2(savedStdIn, STDIN_FILENO);
+    if ( rc2 == -1 )
+    {
+        fprintf(stderr, "[%s] Error: dup2() failed for STDIN_FILENO: %s (%d)\n", MyName, strerror(errno), errno );
+        exit(1);
+    }
+    close(savedStdIn);
+
+    if ( rc == -1 )
+    {
+        return( false );
+    }
+
+    NodeState_t nodeState;
+    CPNodeConfig *pnodeConfig = ClusterConfig.GetPNodeConfig( nodeName );
+    if ( pnodeConfig )
+    {
+        if ( get_pnode_state( PNode[pnodeConfig->GetPNid()], nodeState ) )
+        {
+            if ( nodeState == StateUp )
+            {
+                if ( checkSpareColdStandby && SpareNodeColdStandby )
+                {
+                    if ( pnodeConfig  && pnodeConfig->IsSpareNode() )
+                    {
+                        ++NumDown;
+                        NodeState[pnodeConfig->GetPNid()] = false;
+                        nodeState = StateDown;
+                        set_pnode_state( PNode[pnodeConfig->GetPNid()], nodeState );
+                    }
+                    else
+                    {
+                        NodeState[pnodeConfig->GetPNid()] = true;
+                    }
+                }
+                else
+                {
+                    NodeState[pnodeConfig->GetPNid()] = true;
+                }
+            }
+            else
+            {
+                NodeState[pnodeConfig->GetPNid()] = false;
+                ++NumDown;
+            }
+        }
+    }
+    else
+    {
+        printf( "[%s] Physical node configuration does not exist, node name=%s\n", MyName, nodeName );
+        return( false );
+    }
+    
+    return( true );
+}
+
 int mon_log_write(int pv_event_type, posix_sqlog_severity_t pv_severity, char *pp_string)
 {
     pv_event_type = pv_event_type;
@@ -3770,8 +3867,8 @@
     // If this is a real cluster
     if ( nid == -1 )
     {
-        // Get current physical state of all nodes
-        if ( !update_cluster_state( true, false ) )
+        // Get current physical state of target nodes
+        if ( !update_node_state( node_name, false ) )
         {
             return( rc ) ;
         }
diff --git a/core/sqf/monitor/linux/tcdbsqlite.cxx b/core/sqf/monitor/linux/tcdbsqlite.cxx
index d53f602..ff18cdd 100644
--- a/core/sqf/monitor/linux/tcdbsqlite.cxx
+++ b/core/sqf/monitor/linux/tcdbsqlite.cxx
@@ -2507,10 +2507,6 @@
         }
         else
         {
-            if ( prepStmt != NULL )
-            {
-                sqlite3_finalize( prepStmt );
-            }
             char buf[TC_LOG_BUF_SIZE];
             snprintf( buf, sizeof(buf)
                     , "[%s] (%s) failed, nid=%d, id=%d, error: %s\n"
@@ -2631,10 +2627,6 @@
         }
         else
         {
-            if ( prepStmt != NULL )
-            {
-                sqlite3_finalize( prepStmt );
-            }
             char buf[TC_LOG_BUF_SIZE];
             snprintf( buf, sizeof(buf)
                     , "[%s] (%s) failed, nid=%d, id=%d, error: %s\n"
diff --git a/core/sqf/monitor/linux/tmsync.cxx b/core/sqf/monitor/linux/tmsync.cxx
index 3e72241..60d9f40 100644
--- a/core/sqf/monitor/linux/tmsync.cxx
+++ b/core/sqf/monitor/linux/tmsync.cxx
@@ -661,30 +661,40 @@
         if (trace_settings & (TRACE_REQUEST | TRACE_TMSYNC))
             trace_printf("%s@%d - Unsolicited TmSync reply, handle=%d\n",
                          method_name, __LINE__, tmsync_req->Handle);
-        tmsync_req->Completed = true;
-        UnsolicitedComplete( msg );
-        if ( msg->u.reply.u.unsolicited_tm_sync.return_code != MPI_SUCCESS )
+        if (msg->u.reply.u.unsolicited_tm_sync.return_code == MPI_SUCCESS)
         {
             TmSyncReplyCode |= msg->u.reply.u.unsolicited_tm_sync.return_code;
-        }
-        if ( TmSyncPNid == MyPNID )
-        {
-            if (trace_settings & (TRACE_REQUEST | TRACE_TMSYNC))
-                trace_printf("%s@%d - Local Unsolicited TmSync reply, handle="
-                             "%d\n", method_name, __LINE__,
-                             tmsync_req->Handle);
-            if ( GetTmSyncReplies() == GetTotalSlaveTmSyncCount() )
+            tmsync_req->Completed = true;
+            UnsolicitedComplete( msg );
+            if ( TmSyncPNid == MyPNID )
             {
-                UpdateTmSyncState( TmSyncReplyCode );
-                UnsolicitedCompleteDone();
+                if (trace_settings & (TRACE_REQUEST | TRACE_TMSYNC))
+                    trace_printf("%s@%d - Local Unsolicited TmSync reply, handle="
+                                 "%d\n", method_name, __LINE__,
+                                 tmsync_req->Handle);
+                if ( GetTmSyncReplies() == GetTotalSlaveTmSyncCount() )
+                {
+                    UpdateTmSyncState( TmSyncReplyCode );
+                    UnsolicitedCompleteDone();
+                }
+            }
+            else
+            {
+                if ( GetTmSyncReplies() == GetTotalSlaveTmSyncCount() )
+                {
+                    CommitTmDataBlock(TmSyncReplyCode);
+                }
             }
         }
         else
-        {
-            if ( GetTmSyncReplies() == GetTotalSlaveTmSyncCount() )
-            {
-                CommitTmDataBlock(TmSyncReplyCode);
-            }
+        { // The Seabed callback has not been registered, try again
+            if (trace_settings & (TRACE_REQUEST | TRACE_TMSYNC))
+                trace_printf("%s@%d - Retrying Local Unsolicited TmSync, handle="
+                             "%d\n", method_name, __LINE__,
+                             tmsync_req->Handle);
+            PendingSlaveTmSyncCount--;
+            tmsync_req->Completed = false;
+            SendUnsolicitedMessages();
         }
     }
     else
diff --git a/core/sqf/monitor/linux/zclient.cxx b/core/sqf/monitor/linux/zclient.cxx
index 23dca8a..36a0600 100644
--- a/core/sqf/monitor/linux/zclient.cxx
+++ b/core/sqf/monitor/linux/zclient.cxx
@@ -650,7 +650,7 @@
     
         monZnode.assign( znodeQueue_.front() );
 
-        if (trace_settings)
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
         {
             trace_printf("%s@%d" " - znodePath=%s, znodeQueue_.size=%ld\n"
                         , method_name, __LINE__
@@ -659,10 +659,6 @@
 
         znodeQueue_.pop_front();
         
-        trace_printf( "%s@%d" " - Checking znode=%s\n"
-                    , method_name, __LINE__
-                    , monZnode.c_str() );
-
         strcpy( pathStr, monZnode.c_str() );
 
         tknStart++; // skip the first '/'
diff --git a/core/sqf/sql/scripts/analyzeMessageGuide.py b/core/sqf/sql/scripts/analyzeMessageGuide.py
index 56a6151..162d738 100644
--- a/core/sqf/sql/scripts/analyzeMessageGuide.py
+++ b/core/sqf/sql/scripts/analyzeMessageGuide.py
@@ -416,6 +416,7 @@
             elif state == 2:
                 if line[i] == '>':
                     state = 0
+                    result = result + '.elided.'
                 else:
                     throwAway = throwAway + '>' + line[i]
             i = i + 1
@@ -426,7 +427,7 @@
             result = result + ' <' + throwAway
         #print "Before<: " + line
         #print "After<: " + result
-        return result 
+        return result.rstrip() # ignore trailing spaces
 
     def removeDollarTerms(self,line):
         # removes text of the form $0~Datatype0 (where Datatype might
@@ -470,10 +471,11 @@
                 if line[i].isalpha():
                     throwAway = throwAway + line[i]
                 elif line[i].isdigit():
-                    state = 0  # we reached the end of the dollar text                  
-                else:
-                    result = result + line[i]
                     state = 0  # we reached the end of the dollar text
+                    result = result + '.elided.'                  
+                else: 
+                    state = 0  # we reached the end of the dollar text
+                    result = result + '.elided.' + line[i]
             i = i + 1
 
         # if we reached the end of the line then put the throwaway text
@@ -482,7 +484,7 @@
             result = result + throwAway
         #print "Before$: " + line
         #print "After$: " + result
-        return result           
+        return result.rstrip() # ignore trailing spaces          
 
 
     def compareText(self):
@@ -621,6 +623,7 @@
 enumFileList = ( [ ['ustat/hs_const.h','USTAT_ERROR_CODES'],
     ['sqlcomp/CmpDDLCatErrorCodes.h','CatErrorCode'],
     ['optimizer/opt_error.h','OptimizerSQLErrorCode'],
+    ['optimizer/UdrErrors.h','UDRErrors'],
     ['exp/ExpErrorEnums.h','ExeErrorCode'] ] )
 for entry in enumFileList:
     fileName = mySQroot + '/../sql/' + entry[0]
diff --git a/core/sqf/sql/scripts/install_apache_hadoop b/core/sqf/sql/scripts/install_apache_hadoop
index 9316b56..dbbbd4e 100755
--- a/core/sqf/sql/scripts/install_apache_hadoop
+++ b/core/sqf/sql/scripts/install_apache_hadoop
@@ -1528,7 +1528,7 @@
 echo "Setting up DCS, REST and Phoenix tests..."
 
 #Default GIT location
-GIT_DIR="git@github.com:apache/incubator-trafodion"
+GIT_DIR="git@github.com:apache/trafodion"
 DCS_SRC=$TRAF_HOME/../../dcs
 
 if [ -d $DCS_SRC ]; then
@@ -1540,7 +1540,7 @@
    # Default location for phoenix_test
    PHX_SRC=$TRAF_HOME/../../tests/phx
 else
-   TRAF_SRC=$MY_SW_ROOT/src/incubator-trafodion
+   TRAF_SRC=$MY_SW_ROOT/src/trafodion
    if [ ! -d $TRAF_SRC ]; then
      mkdir -p $MY_SW_ROOT/src
      cd $MY_SW_ROOT/src
diff --git a/core/sqf/sql/scripts/install_local_hadoop b/core/sqf/sql/scripts/install_local_hadoop
index fc3eed1..7c2b062 100755
--- a/core/sqf/sql/scripts/install_local_hadoop
+++ b/core/sqf/sql/scripts/install_local_hadoop
@@ -594,7 +594,7 @@
     HADOOP_TAR=hadoop-2.7.1.2.3.2.0-2950.tar.gz
 fi
 if [[ "$HBASE_DISTRO" =~ "APACHE" ]]; then
-    HADOOP_TAR=hadoop-2.5.2.tar.gz
+    HADOOP_TAR=hadoop-2.6.0.tar.gz
 fi
 
 # Alternative: Use MariaDB (not validated)
@@ -1318,7 +1318,7 @@
 
 # Enable logging by default to help find problems
 general-log=1
-general-log-file=${MY_SW_ROOT}/log/mysql-general.\${HOSTNAME}.log
+general-log-file=${MY_SW_ROOT}/log/mysql-general.${HOSTNAME}.log
 
 EOF
 
diff --git a/core/sqf/sql/scripts/sqnodestatus b/core/sqf/sql/scripts/sqnodestatus
index 44dc93f..56511fc 100755
--- a/core/sqf/sql/scripts/sqnodestatus
+++ b/core/sqf/sql/scripts/sqnodestatus
@@ -34,25 +34,34 @@
 my %node_hash=();
 my $sq_mon_ssh_options=readpipe("echo -n \$SQ_MON_SSH_OPTIONS");
 my $json=$ARGV[0];
+my $node_name=$ARGV[1];
 
 &main();
 
 sub main()
 {
-  #$node_context=~s/-w//ig;
-  #print "node_context=${node_context}";
-  chomp($node_context);
-  my @nodes=split(' ',$node_context);
-  foreach my $node(@nodes)
+  #print "json=${json}\n";
+  #print "node_name=${node_name}\n";
+  if ($ARGV[0] ne '-n')
   {
-     $check_flag=check_node_status($node);
+     #print "node_context=${node_context}";
+     chomp($node_context);
+     my @nodes=split(' ',$node_context);
+     foreach my $node(@nodes)
+     {
+        $check_flag=check_node_status($node);
+     }
+  }
+  else
+  {
+     $check_flag=check_node_status($node_name);
   }
   print_node_status();
 }
 
 sub print_node_status()
 {
-    if ($json) 
+    if ($json eq '-json')
     {
         $comma="";
         print "[";
diff --git a/core/sqf/src/seabed/src/msmon.cpp b/core/sqf/src/seabed/src/msmon.cpp
index 311e099..cad7146 100644
--- a/core/sqf/src/seabed/src/msmon.cpp
+++ b/core/sqf/src/seabed/src/msmon.cpp
@@ -6609,7 +6609,7 @@
     } else {
         if (gv_ms_trace_mon)
             trace_where_printf(WHERE, "no tmsync callback, replying with error\n");
-        lv_handle = -1;
+        lv_handle = pp_msg->u.request.u.unsolicited_tm_sync.handle;
         lv_cbret = 1; // set error
     }
     lv_err = gp_local_mon_io->acquire_msg(&lp_msg);
diff --git a/core/sqf/src/seabed/src/sqstatepi.cpp b/core/sqf/src/seabed/src/sqstatepi.cpp
index 383a13a..4f1a267 100644
--- a/core/sqf/src/seabed/src/sqstatepi.cpp
+++ b/core/sqf/src/seabed/src/sqstatepi.cpp
@@ -289,18 +289,23 @@
     lp_prog = basename(pp_proc->program);
 #ifdef SQ_PHANDLE_VERIFIER
     sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
-    sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
             pp_proc->process_name,
             pp_proc->type,
             lp_proc_t,
             pp_proc->nid,
             pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
             pp_proc->verifier,
-#endif
             lp_prog);
+#else
+    sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+            pp_proc->process_name,
+            pp_proc->type,
+            lp_proc_t,
+            pp_proc->nid,
+            pp_proc->pid,
+            lp_prog);
+#endif
+
     if (pv_str)
         lp_op = "sb_ic_get_openers";
     else
@@ -339,18 +344,22 @@
     lp_prog = basename(pp_proc->program);
 #ifdef SQ_PHANDLE_VERIFIER
     sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
-    sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
             pp_proc->process_name,
             pp_proc->type,
             lp_proc_t,
             pp_proc->nid,
             pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
             pp_proc->verifier,
-#endif
             lp_prog);
+#else
+    sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+            pp_proc->process_name,
+            pp_proc->type,
+            lp_proc_t,
+            pp_proc->nid,
+            pp_proc->pid,
+            lp_prog);
+#endif
     if (pv_str)
         lp_op = "sb_ic_get_opens";
     else
diff --git a/core/sql/arkcmp/CmpContext.cpp b/core/sql/arkcmp/CmpContext.cpp
index e5c179e..f824e1e 100644
--- a/core/sql/arkcmp/CmpContext.cpp
+++ b/core/sql/arkcmp/CmpContext.cpp
@@ -151,9 +151,6 @@
   cmpCurrentContext = this;
   CMPASSERT(heap_ != NULL);
 
-  // For embedded arkcmp, the CmpInternalErrorJmpBuf will be populated
-  // later but the buffer location and pointer are valid
-  heap_->setJmpBuf(CmpInternalErrorJmpBufPtr);
   heap_->setErrorCallback(&CmpErrLog::CmpErrLogCallback);
 
   // Reserve memory that can be used for out-of-memory reporting.
@@ -214,13 +211,11 @@
     memLimit = (size_t) 1024 * CmpCommon::getDefaultLong(MEMORY_LIMIT_HISTCACHE_UPPER_KB);
     const Lng32 initHeapSize = 16 * 1024;    // ## 16K
     NAHeap *histogramCacheHeap = new (heap_) 
-                                 NAHeap("HistogramCache Heap",
+                                 NAHeap((const char *)"HistogramCache Heap",
                                  heap_,
                                  initHeapSize,
                                  memLimit);
 
-    histogramCacheHeap->setJmpBuf(CmpInternalErrorJmpBufPtr);
-
     // Setting up the cache for histogram
     histogramCache_ = new(histogramCacheHeap) HistogramCache(histogramCacheHeap, 107);
 
@@ -304,9 +299,6 @@
   // create dynamic metadata descriptors
   CmpSeabaseDDL cmpSeabaseDDL(heap_);
   cmpSeabaseDDL.createMDdescs(trafMDDescsInfo_);
-
-  emptyInLogProp_ = NULL;
-
 }
 
 // MV
diff --git a/core/sql/arkcmp/CmpContext.h b/core/sql/arkcmp/CmpContext.h
index b875927..5df2eca 100644
--- a/core/sql/arkcmp/CmpContext.h
+++ b/core/sql/arkcmp/CmpContext.h
@@ -449,11 +449,6 @@
   // optimizer cached defaults
   OptDefaults* getOptDefaults() { return optDefaults_; }
 
-  // context global empty input logical property
-  EstLogPropSharedPtr* getGEILP() { return &emptyInLogProp_; }
-  void setGEILP(EstLogPropSharedPtr inLogProp)
-                             { emptyInLogProp_ = inLogProp; }
-    
   MDDescsInfo *getTrafMDDescsInfo() { return trafMDDescsInfo_; }
 
   void setCIClass(CmpContextInfo::CmpContextClassType x) { ciClass_ = x; }
@@ -615,9 +610,6 @@
   // query defaults using during a statement compilation
   OptDefaults* optDefaults_;
 
-  // context global empty input logical property
-  EstLogPropSharedPtr emptyInLogProp_;
-
   MDDescsInfo * trafMDDescsInfo_;
 
   CmpContextInfo::CmpContextClassType ciClass_;
diff --git a/core/sql/arkcmp/CmpStatement.cpp b/core/sql/arkcmp/CmpStatement.cpp
index e16ff5c..69e1f45 100644
--- a/core/sql/arkcmp/CmpStatement.cpp
+++ b/core/sql/arkcmp/CmpStatement.cpp
@@ -165,12 +165,10 @@
   {
     // set up statement heap with 32 KB allocation units
     size_t memLimit = (size_t) 1024 * CmpCommon::getDefaultLong(MEMORY_LIMIT_CMPSTMT_UPPER_KB);
-    heap_ = new (context_->heap()) NAHeap("Cmp Statement Heap",
+    heap_ = new (context_->heap()) NAHeap((const char *)"Cmp Statement Heap",
                        context_->heap(),
                        (Lng32)32768,
                        memLimit);
-
-    heap_->setJmpBuf(&ExportJmpBuf);
     heap_->setErrorCallback(&CmpErrLog::CmpErrLogCallback);
   }
 
@@ -1515,17 +1513,18 @@
   // do any necessary initialization work here (unless this
   // initialization work fits in the constructor)
 
-  // Initialize the global "empty input logprop".
-  context_->setGEILP(EstLogPropSharedPtr(new (STMTHEAP)
-                              EstLogProp(1,
-                                         NULL,
-                                         EstLogProp::NOT_SEMI_TSJ,
-                                         new (STMTHEAP) CANodeIdSet(),
-                                         TRUE)));
-
-    //++MV
-    // This input cardinality is not estimated , so we keep this knowledge
-    // in a special attribute.
+  // Initialize the global "empty input logprop"
+  if (emptyInLogProp_ == NULL)
+    emptyInLogProp_ = EstLogPropSharedPtr(
+         new (STMTHEAP) EstLogProp(1,
+                                   NULL,
+                                   EstLogProp::NOT_SEMI_TSJ,
+                                   new (STMTHEAP) CANodeIdSet(STMTHEAP),
+                                   TRUE));
+  
+  //++MV
+  // This input cardinality is not estimated , so we keep this knowledge
+  // in a special attribute.
   (*GLOBAL_EMPTY_INPUT_LOGPROP)->setCardinalityEqOne();
 
 #ifdef _DEBUG
diff --git a/core/sql/arkcmp/CmpStatement.h b/core/sql/arkcmp/CmpStatement.h
index 14b1781..dc0767a 100644
--- a/core/sql/arkcmp/CmpStatement.h
+++ b/core/sql/arkcmp/CmpStatement.h
@@ -230,6 +230,9 @@
   const LIST(CSEInfo *) *getCSEInfoList() const { return cses_; }
   void addCSEInfo(CSEInfo *info);
 
+  // context global empty input logical property
+  EstLogPropSharedPtr* getGEILP() { return &emptyInLogProp_; }
+    
 protected:
   // CmpStatement(const CmpStatement&); please remove this line
   CmpStatement& operator=(const CmpStatement&);
@@ -327,6 +330,9 @@
   // CmpMain::sqlcomp(QueryText, ...
   Int32 numOfCompilationRetries_;
 
+  // context global empty input logical property
+  EstLogPropSharedPtr emptyInLogProp_;
+
 }; // end of CmpStatement
 
 class CmpStatementISP: public CmpStatement
diff --git a/core/sql/bin/SqlciErrors.txt b/core/sql/bin/SqlciErrors.txt
index 4b0389a..0a82369 100644
--- a/core/sql/bin/SqlciErrors.txt
+++ b/core/sql/bin/SqlciErrors.txt
@@ -146,67 +146,67 @@
 1144 ZZZZZ 99999 BEGINNER MAJOR DBADMIN A quoted string was expected in first key clause for column $0~ColumnName on table $1~TableName, but the value detected is ($2~String0).
 1145 3D000 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1146 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~String0 could not be altered because it either does not exist or is not a table.
-1147 ZZZZZ 99999 BEGINNER MINOR DBADMIN System-generated column $0~ColumnName of base table $1~TableName cannot appear in a unique or primary key constraint definition.
+1147 ZZZZZ 99999 BEGINNER MINOR DBADMIN System-generated column $0~ColumnName of base table $1~TableName cannot appear in a unique or primary key constraint.
 1148 23000 99999 BEGINNER MINOR DBADMIN System-generated column $0~ColumnName of base table $1~TableName cannot appear in a referential integrity constraint definition.
-1149 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Column $0~ColumnName does not exist in table $1~TableName.
-1150 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Table $0~TableName was not created because Partition Overlay Support could not generate volume names for the partitions to reside on.
-1151 01000 99999 BEGINNER MAJOR DBADMIN POS (Partition Overlay Support) was not applied because volume names could not be generated for the partitions.  So a simple table ~TableName was created without partitions.
-1152 ZZZZZ 99999 BEGINNER CRTCL DBADMIN Unable to access partition $0~string0. Partition is offline.
-1153 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Column $0~String0 whose size is not set cannot be part of the primary key.
-1154 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot create object $0~TableName because the table size $1~Int0 is too big to fit on the system($2~Int1 partitions).
+1149 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1150 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1151 01000 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1152 ZZZZZ 99999 BEGINNER CRTCL DBADMIN --- unused ---
+1153 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1154 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1155 ZZZZZ 99999 BEGINNER MINOR DBADMIN Operation cannot be performed because $0~String0 is not a synonym.
 1156 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~TableName does not have columns.
 1157 ZZZZZ 99999 BEGINNER MINOR DBADMIN Synonym object $0~String0 is the same as previous mapping.
-1158 ZZZZZ 99999 BEGINNER MINOR LOGONLY Object $0~TableName already exists.
-1159 ZZZZZ 99999 BEGINNER MINOR DBADMIN $0~TableName does not exist.
-1160 ZZZZZ 99999 BEGINNER MAJOR DBADMIN A mismatch between the NOT DROPPABLE PRIMARY KEY constraint and the STORE BY clause was detected.  When both clauses are specified, the STORE BY key column list must be the same as, or a prefix of, the PRIMARY KEY column list.  This mismatch is caused by differences between the columns themselves, the order of columns, or the ASC/DESC attribute.
-1161 ZZZZZ 99999 BEGINNER MINOR DBADMIN System generated column $0~String0 cannot be specified as part of the PARTITION BY clause.
-1162 ZZZZZ 99999 BEGINNER INFRM LOGONLY User $0~String0 already owns $1~String1 $2~String2.  Operation ignored.
-1163 ZZZZZ 99999 BEGINNER MAJOR DBADMIN You are not authorized to change the owner of $0~String0 $1~String1 owned by user $2~String2.
-1164 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Internal error: unable to send controls while performing $0~String0. Error returned is $1~Int0.
-1165 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Parallel $0~string0 operation failed on $1~TableName, the serial operation for this object will be performed.
-1166 ZZZZZ 99999 BEGINNER MAJOR DBADMIN	Drop of partition $0~string0 for object $1~string1 failed with error $2~NSKCode, continuing.
-1167 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot change ownership of $0~String0 $1~String1 because $2~String2 is not the super ID or the services ID.
-1168 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Internal error: unable to find the object associated with UID $0~string0 that belongs to the schema associated with UID $1~string1.  This object is being accessed by $2~string2 using the lock mode of $3~string3.
-1169 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Schema privileges for schema $0~String0 have not been enabled.
-1170 ZZZZZ 99999 BEGINNER MINOR DBADMIN Invalid number of disk pools $0~Int0 specified either through the default or through the DDL statement.
-1171 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Error $0~int0 was returned by the file system while fetching the size of the disk.
-1172 ZZZZZ 99999 BEGINNER MAJOR DBADMIN DDL privileges cannot be specified on $0~string0.  They may only be specified at the schema level.
-1173 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Volatile table $0~TableName was not created as the free space threshold $0~string0 percent was reached on all available disks.
-1174 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An invalid data type was specified for column $0~String0.
-1175 ZZZZZ 99999 BEGINNER INFRM LOGONLY Since table $0~TableName does not contain an IDENTIFIER column, the sequence generator object was not recreated.
-1176 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot create object $0~TableName because max table size $1~Int0 is bigger than absolute max table size $2~Int1.
-1177 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Internal error: trying to create an object with a qualified name type of $0~string0. This qualified name type is not supported. 
-1178 ZZZZZ 99999 ADVANCED MAJOR DIALOUT Internal error: unable to find catalog associated with uid $0~string0.  This catalog is being accessed by role $1~string1.
-1179 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Internal error: inconsistent object definition for $0~TableName found for object in name space $0~string0. Object owned by $1~string1. Unable to find object details in $2~string2;
+1158 ZZZZZ 99999 BEGINNER MINOR LOGONLY --- unused ---
+1159 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1160 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1161 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1162 ZZZZZ 99999 BEGINNER INFRM LOGONLY --- unused ---
+1163 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1164 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1165 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1166 ZZZZZ 99999 BEGINNER MAJOR DBADMIN	--- unused ---
+1167 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1168 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1169 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1170 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1171 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1172 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1173 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1174 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An unsupported data type was encountered on this operation.
+1175 ZZZZZ 99999 BEGINNER INFRM LOGONLY --- unused ---
+1176 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1177 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1178 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
+1179 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
 1180 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Trying to create an external $0~String0 table with a different schema or table name ($1~TableName) than the source table ($2~String1).  The external schema and table name must be the same as the source. 
 1181 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Trying to create a schema with name $0~TableName to store the definition of a native HIVE or HBASE table and the name is too long.  Maximum length supported is $0~Int0.
-1182 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Error $0~NSKCode was returned by the file system on resource fork $1~String0.
-1183 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Error $0~NSKCode was returned by the file system on metadata table $1~TableName (file name $2~String0).
-1184 ZZZZZ 99999 BEGINNER MAJOR DBADMIN You do not have the required privilege(s) on $0~ColumnName.
-1185 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The location name is either invalid or missing.
+1182 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1183 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1184 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1185 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1186 42000 99999 BEGINNER MAJOR DBADMIN Column $0~ColumnName is of type $1~String0 which is not compatible with the default value's type, $2~String1.
 1187 3F000 99999 BEGINNER MAJOR DBADMIN The schema name $0~SchemaName is reserved for SQL metadata.
 1188 23000 99999 BEGINNER MAJOR DBADMIN Referential integrity constraint $0~ConstraintName for table $1~TableName could not be created due to circular dependency: $2~String0.
-1189 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Request failed. View $0~string0 is already valid.
+1189 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1190 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Failed to initialize Hive metadata. Call to $0~string0 returned error $1~string1($0~int0). Cause: $2~string2.
 1191 ZZZZZ 99999 BEGINNER MAJOR DBADMIN SERIALIZE option is not yet supported for $0~string0 datatype.
 1192 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Failed to retrieve data from Hive metastore.  Call to $0~string0 returned error $1~string1($0~int0). Cause: $2~string2.
 1193 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The $0~string0 specified in the $1~string1 clause must be identical to the primary key for a Trafodion table.
-1194 ZZZZZ 99999 ADVANCED MAJOR DIALOUT Label $0~String0 could not be dropped (file error $1~NSKCode). 
+1194 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
 1195 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Column $0~ColumnName is not allowed as a salt column. Only primary key columns or STORE BY columns are allowed.
-1196 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The number of salt partitions must be between $0~int0 and $1~int1
+1196 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The number of salt partitions must be between $0~int0 and $1~int1 inclusive.
 1197 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The location $0~string0 for $1~string1 does not match with another location $2~string2 specified. All location specifications must be identical.
-1198 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU View text is too long to fit into the metadata.
+1198 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
 1199 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The PARTITION BY clause is not allowed for a Trafodion table.
 1200 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU An error occurred while reading HDFS file statistics for Hive table $1~TableName. Cause: $0~string0.
 1201 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Salted index $0~string0 cannot be unique.
 1202 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Index $0~string1 cannot be salted like a table since table $0~string0 is not salted.
 1203 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU HBASE_OPTIONS clause in CREATE statement is longer than 6000 characters. Object $0~string0 was not created.
 1204 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Unsupported Hive datatype $0~string0.
-1205 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Hive feature $0~string0 is not supported.
+1205 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
 1206 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU SALT AND SPLIT BY clauses are not allowed together.
-1207 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Text to be inserted into the TEXT metadata table contains too many quotes.
+1207 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
 1208 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The SPLIT BY clause is not supported for indexes.
 1209 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU The SPLIT BY clause has more columns than the table has key columns ($0~Int0).
 1210 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Column $0~String0 is not allowed at position $1~Int0 in the SPLIT BY clause, because SPLIT BY must specify a prefix of the clustering key columns and the next clustering key column is $2~String1.
@@ -219,35 +219,35 @@
 1221 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only system components may contain system operations.
 1222 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Command not supported when authorization is not enabled.
 1223 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Grant to self or DB__ROOT is not allowed.
-1224 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An invalid data type was specified for routine parameter $0~String0.
-1225 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Mixing EXECUTE privilege with other privileges is not allowed.
-1226 ZZZZZ 99999 BEGINNER MAJOR DBADMIN No valid combination of privileges was specified.
-1227 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot unregister user.  User $0~String0 has been granted privileges on $1~String1.
-1228 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot drop role.  Role $0~String0 has been granted privileges on $1~String1.
+1224 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1225 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1226 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1227 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot unregister user. User $0~String0 has been granted privileges on $1~String1.
+1228 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot drop role. Role $0~String0 has been granted privileges on $1~String1.
 1229 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The $0~string0 option is not supported.
 1230 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Object owner must be the schema owner in private schemas.
 1231 ZZZZZ 99999 BEGINNER MAJOR DBADMIN User-defined routine $0~String0 could not be created.
-1232 ZZZZZ 99999 BEGINNER MINOR DBADMIN A file error ($0~int0) occurred when saving dropped table DDL for table $1~TableName to $2~String0.
-1233 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Creating schema in SQL system catalog $0~String0 is prohibited.
-1234 ZZZZZ 99999 BEGINNER MAJOR DIALOUT Authorization need to be reinitialized due to missing or corrupted privilege manager metadata. To reinitialize, do 'initialize authorization, drop' followed by 'initialize authorization'. This deletes and recreates privilege manager metadata. Trafodion metadata is not affected. 
-1235 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An invalid combination of EXTENT sizes and MAXEXTENTS was specified for table or index $0~TableName. File system error returned $1~int0.
-1236 3F000 99999 BEGINNER MAJOR DBADMIN The schema name specified for SQL object $0~String0 is not valid. The schema name must be the same as the schema being created.
+1232 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1233 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1234 ZZZZZ 99999 BEGINNER MAJOR DIALOUT Authorization needs to be reinitialized due to missing or corrupted privilege manager metadata. To reinitialize, do 'initialize authorization, drop' followed by 'initialize authorization'. This deletes and recreates privilege manager metadata. Trafodion metadata is not affected. 
+1235 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1236 3F000 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1237 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused as of 05/7/12 ---
-1238 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character set used in TRIGGER text must be ISO88591.
-1239 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character set for string literals in VIEW must be ISO88591.
+1238 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1239 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1240 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character set for a PARTITION KEY column must be ISO88591.
-1241 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character set for HEADING must be ISO88591.
-1242 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character set for string literals in CONSTRAINT must be ISO88591.
+1241 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1242 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1243 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The hexadecimal form of string literals is not allowed in this context.
-1244 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Authorization initialization failed.
-1245 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The supplied partition key value ($0~String0) for column $1~ColumnName of object $2~TableName is not valid.
-1246 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The supplied partition key value ($0~String0) is inconsistent with the data type of column $1~ColumnName of object $2~TableName.   
+1244 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1245 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1246 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1247 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ---- Msg text will be merged in.  ------   
-1248 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Partition name $0~string0 has already been used. Each partition in an object should have a unique name.
-1249 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Insert into $0~string0 log table failed.    
-1250 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Operation cannot be performed on object $0~TableName because a utility operation ($2~string0) associated with DDL_LOCK $3~string1 is currently running.
-1251 01000 99999 BEGINNER MINOR DBADMIN The extra insignificant digits of default value ($0~String0) are truncated to match the scale of the data type of column $1~ColumnName.
-1252 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The existing index $0~TableName to be used by a unique or primary constraint has not been populated.  Please populate the index and then try to add the constraint again.
+1248 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1249 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---   
+1250 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1251 01000 99999 BEGINNER MINOR DBADMIN The extra insignificant digits of default value $0~String0 are truncated to match the scale of the data type of column $1~ColumnName.
+1252 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1253 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ---- Msg text will be merged in.  ------   
 1254 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Duplicate unique constraints are not allowed with same set of columns.
 1255 ZZZZZ 99999 BEGINNER MINOR DBADMIN Constraint $0~String0 is the clustering key constraint for table $1~String1 and cannot be dropped.
@@ -256,76 +256,76 @@
 1258 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ---- Msg text will be merged in ------  
 1259 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ---- Msg text will be merged in ------  
 1260 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Debugging of UDRs is only allowed for the DB__ROOT user. Connect as DB__ROOT, preferably using the sqlci tool, and try again.
-1261 ZZZZZ 99999 BEGINNER MAJOR DIALOUT ADVANCED MAJOR DIALOUT Error $0~int0 was returned by the SQL CLI while processing the UDR_JAVA_OPTIONS setting '$1~string0'.
-1262 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The command cannot be executed because $0~string0 is in progress for $1~string1.
-1263 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object names that start with $0~String0 are reserved for SQL metadata.
+1261 ZZZZZ 99999 BEGINNER MAJOR DIALOUT --- unused ---
+1262 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1263 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1264 ZZZZZ 99999 BEGINNER MINOR DBADMIN Duplicate privileges are not allowed in a GRANT or REVOKE statement.
-1265 ZZZZZ 99999 BEGINNER MINOR DBADMIN Duplicate grantees are not allowed in a GRANT or REVOKE statement.
+1265 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1266 ZZZZZ 99999 BEGINNER MINOR DBADMIN Only EXECUTE privilege is supported for a procedure or routine.
 1267 ZZZZZ 99999 BEGINNER MINOR DBADMIN $0~string0 privilege is incompatible with this object type.
 1268 ZZZZZ 99999 BEGINNER MINOR DBADMIN Duplicate columns are not allowed in a GRANT or REVOKE statement.
 1269 ZZZZZ 99999 BEGINNER MINOR DBADMIN Column name $0~String0 is reserved for internal system usage. It cannot be specified as a user column.
-1270 ZZZZZ 99999 BEGINNER MAJOR DBADMIN ALLOCATE or DEALLOCATE failed for object $0~TableName due to file error $1~Int0 on $2~String0.
-1271 ZZZZZ 99999 BEGINNER MAJOR DBADMIN ALLOCATE failed for object $0~TableName because the number of extents to be allocated ($1~Int0) is greater than the MAXEXTENTS for a partition of the object.
-1272 ZZZZZ 99999 BEGINNER CRTCL DIALOUT The system is not licensed for use of SQL format tables.
-1273 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The specified MAXEXTENTS value must be greater than the number of extents allocated.
-1274 01000 99999 BEGINNER MAJOR DBADMIN The specified or default MAXEXTENTS value is not sufficient. The MAXEXTENTS value has been automatically set to the new value of $1~Int0 for the file $0~String0.
-1275 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Constraint $0~String0 cannot be dropped because it is needed by unique constraint $1~String1.   
+1270 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1271 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1272 ZZZZZ 99999 BEGINNER CRTCL DIALOUT --- unused ---
+1273 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1274 01000 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1275 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1276 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Unable to select partition $0~String0 from table $1~TableName.
-1277 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Unrecognized partitioning scheme for object $0~String0.
-1278 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The command cannot be executed because $0~String0 is in progress for all schemas in catalog $1~CatalogName.
+1277 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1278 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1279 42000 99999 BEGINNER MAJOR DBADMIN A volatile DDL statement cannot be used on regular objects.
 1280 42000 99999 BEGINNER MAJOR DBADMIN A regular DDL statement cannot be used on volatile objects.
-1281 ZZZZZ 99999 BEGINNER INFRM LOGONLY
-1282 ZZZZZ 99999 BEGINNER INFRM LOGONLY A LOB column cannot be specified in a volatile table
+1281 ZZZZZ 99999 BEGINNER INFRM LOGONLY --- unused ---
+1282 ZZZZZ 99999 BEGINNER INFRM LOGONLY A LOB column cannot be specified in a volatile table.
 1283 0A000 99999 BEGINNER INFRM LOGONLY The specified constraint or file option is not supported on a volatile table.
-1284 ZZZZZ 99999 BEGINNER MINOR DBADMIN Table $0~TableName cannot be dropped because it was specified to be NOT DROPPABLE.
-1285 ZZZZZ 99999 BEGINNER MINOR DBADMIN Schema $0~SchemaName cannot be dropped because it contains NOT DROPPABLE table $1~TableName.
+1284 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1285 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1286 ZZZZZ 99999 BEGINNER MINOR DBADMIN The NOT DROPPABLE clause is not allowed for volatile tables.
-1287 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only super ID can execute INITIALIZE SECURITY.
-1288 ZZZZZ 99999 BEGINNER MAJOR DIALOUT Security was not able to initialize.
+1287 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1288 ZZZZZ 99999 BEGINNER MAJOR DIALOUT --- unused ---
 1289 ZZZZZ 99999 BEGINNER MINOR DBADMIN The use of ALTER on reserved schemas and metadata schemas is not permitted.
-1290 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Select from table $0~TableName failed during UPGRADE operation.
-1291 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Select from table $0~TableName failed during DOWNGRADE operation.
-1292 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Delete from $0~string0 log table failed.    
-1293 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Schema version of the object $0~string0 must be 2300.    
-1294 ZZZZZ 99999 BEGINNER MAJOR DBADMIN ISO_MAPPING must be SJIS for this operation.    
-1295 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Target column $0~ColumnName has mismatching default type.    
-1296 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Character data type of target column and literal are different.    
-1297 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The $0~string0 command cannot be executed because volatile schema $1~string1 exists.
+1290 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1291 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1292 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---    
+1293 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---  
+1294 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---     
+1295 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---    
+1296 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---    
+1297 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused --- 
 1298 ZZZZZ 99999 BEGINNER MINOR DBADMIN Schema $0~SchemaName could not be altered. $0~String0
 1299 ZZZZZ 99999 BEGINNER MAJOR DBADMIN You cannot specify some columns with just the name and others with name & data attributes.
-1300 ZZZZZ 99999 ADVANCED MAJOR DIALOUT Catman generated unknown Exception for procedure $0~TableName.
-1301 ZZZZZ 99999 BEGINNER MAJOR DBADMIN NO ACTION referential action for $0~string0 clause is not yet supported as specified by ANSI SQL99 standard. To alter the behavior, set an appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT default.
-1302 01000 99999 BEGINNER MAJOR DBADMIN NO ACTION referential action for $0~string0 clause behaves like RESTRICT referential action. To alter the behavior, set the appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT default.
-1303 ZZZZZ 99999 UUUUU UUUUUUUU UUUUU --- unused as of 5/7/12 --- 
-1304 ZZZZZ 99999 BEGINNER CRTCL DIALOUT SQL could not obtain the location of the system schema tables.
-1305 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The specified schema location $0~string0 is already in use by schema $1~SchemaName.
-1306 ZZZZZ 99999 ADVANCED MAJOR DIALOUT SQL was not able to generate a unique schema location for schema $0~SchemaName.
-1307 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The schema location $0~string0 is reserved for SQL metadata.
-1308 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ----- Msg not currently used -----
-1309 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Object type for $0~string0 is not valid for the current operation.
-1310 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The CREATE SCHEMA statement does not support the creation of triggers. 
-1311 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Unable to $0~string0 constraint $1~ConstraintName due to the above errors.
-1312 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Unable to $0~string0 $1~TableName due to the above errors.
+1300 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused --- 
+1301 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused --- 
+1302 01000 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1303 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused --- 
+1304 ZZZZZ 99999 BEGINNER CRTCL DIALOUT --- unused ---
+1305 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1306 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
+1307 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1308 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
+1309 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1310 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused --- 
+1311 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1312 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1313 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The referential integrity constraint $0~string0 has been created with the NOT ENFORCED attribute and will not be enforced during INSERT, UPDATE, OR DELETE statements.
-1314 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The create privilege has been turned off for catalog $0~CatalogName. The $1~string0 cannot be performed for $2~TableName.
-1315 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Catalog $0~CatalogName does not have an associated definition schema.  The ALTER operation fails.
-1316 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Specified synonym $0~TableName does not reference a system view for object $1~String0.  
-1317 ZZZZZ 99999 ADVANCED MAJOR DBADMIN No DBA role has been identified.
-1318 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Synonym name $0~TableName which contains $1~int0 bytes is too long.  Synonym name must be less than $2~int1 bytes.
-1319 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Mismatch of privileges specified for PUBLISH or UNPUBLISH on object $0~TableName.  Select column level privileges cannot be specified with table level privileges.
-1320 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Mismatch of privileges specified for PUBLISH or UNPUBLISH on object $0~TableName.  Select column level privileges cannot be specified with either update or references column level privileges.
-1321 ZZZZZ 99999 ADVANCED MAJOR DBADMIN The public schema has not been specified.  Cannot PUBLISH or UNPUBLISH the requested privileges for object $0~TableName.
-1322 ZZZZZ 99999 ADVANCED MAJOR DBADMIN The synonym $0~String0 is referencing view $1~String1 which is not related to object $2~String2.  The referenced view is related to $3~String3.
+1314 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1315 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1316 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1317 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1318 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1319 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1320 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1321 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1322 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
 1323 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Unable to grant privileges while creating $0~Tablename.
-1324 ZZZZZ 99999 ADVANCED MAJOR DBADMIN You are not allowed to exercise trigger $0~String0.
-1325 ZZZZZ 99999 ADVANCED MAJOR DBADMIN The WITH GRANT OPTION privilege has been disabled.  You are not allowed to perform the request.
-1326 42000 99999 BEGINNER MAJOR DBADMIN This operation is not supported on inMemory objects.
-1327 42000 99999 BEGINNER MAJOR DBADMIN An inMemory DDL statement cannot be used on regular objects.
+1324 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1325 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1326 42000 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1327 42000 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1328 ZZZZZ 99999 BEGINNER MINOR DBADMIN The $0~String0 privilege(s) cannot be specified for $1~String1 $2~TableName.
-1329 ZZZZZ 99999 ADVANCED MAJOR LOGONLY Cannot create metadata views because of server failure.
-1330 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot revoke role $0~String0 from authorization ID $1~String1.  Role has been granted to another authorization ID. 
+1329 ZZZZZ 99999 ADVANCED MAJOR LOGONLY --- unused ---
+1330 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot revoke role $0~String0 from authorization ID $1~String1. Role has been granted to another authorization ID. 
 1331 ZZZZZ 99999 BEGINNER MINOR DBADMIN User $0~String0 does not exist in the directory service.
 1332 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Error while communicating with the directory service.
 1333 ZZZZZ 99999 BEGINNER MINOR DBADMIN User $0~String0 does not exist.
@@ -336,53 +336,53 @@
 1338 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Role $0~string0 is not defined in the database.
 1339 ZZZZZ 99999 BEGINNER MINOR DBADMIN $0~string0 is not a grantable role.
 1340 ZZZZZ 99999 BEGINNER MINOR DBADMIN $0~string0 is not a user.
-1341 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User owns one or more catalogs.
-1342 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User owns one or more schemas.
-1343 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User owns one or more objects.
-1344 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User is the grantee of one or more schema privileges.
-1345 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User is the grantee of one or more table privileges.
-1346 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User is the grantee of one or more column privileges.
-1347 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User owns one or more roles.
-1348 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot drop role.  Role is granted to one or more users.
-1349 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user.  User granted one or more roles.
-1350 ZZZZZ 99999 BEGINNER MINOR DBADMIN Role $0~String0 is not granted to $1~String1.
+1341 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1342 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1343 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user. User owns one or more objects.
+1344 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1345 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1346 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1347 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user. User owns one or more roles.
+1348 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot drop role. Role is granted to one or more users.
+1349 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot unregister user. User granted one or more roles.
+1350 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1351 ZZZZZ 99999 BEGINNER MINOR DBADMIN Role $1~String0 appears multiple times in list.
 1352 ZZZZZ 99999 BEGINNER MINOR DBADMIN User $1~String0 appears multiple times in list.
-1353 ZZZZZ 99999 BEGINNER MINOR DBADMIN WITH GRANT OPTION is not allowed for roles.
-1354 ZZZZZ 99999 BEGINNER MINOR DBADMIN Granting schema privileges to roles is not allowed.
+1353 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1354 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1355 ZZZZZ 99999 BEGINNER MINOR DBADMIN Granting a role to PUBLIC or _SYSTEM is not allowed.
 1356 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot create the component privilege specified. Component privilege code $0~String0 for the component already exists. 
 1357 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot create the component privilege specified. Component privilege name $0~String0 for the component already exists.
-1358 ZZZZZ 99999 BEGINNER MINOR DBADMIN Component privilege $0~String0 not found. 
+1358 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1359 ZZZZZ 99999 BEGINNER MINOR DBADMIN Specified invalid privilege $0~String0 for GRANT/REVOKE.
-1360 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot $0~String0. Dependent component privileges exist for the user/role. 
+1360 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1361 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Library $0~TableName does not exist.
-1362 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 is not a library.
-1363 ZZZZZ 99999 BEGINNER MINOR DBADMIN Library $0~TableName already exists.
+1362 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1363 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1364 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot revoke role $0~String0. Object $1~String1 depends on privileges on object $2~String2. 
-1365 ZZZZZ 99999 BEGINNER MINOR DBADMIN Unable to obtain necessary lock for user/role number $0~Int0.
-1366 ZZZZZ 99999 BEGINNER MINOR DBADMIN Request failed.  One or more dependent procedures exist.
-1367 ZZZZZ 99999 BEGINNER MINOR DBADMIN Procedure uses a library object, EXTERNAL PATH cannot be altered.
-1368 ZZZZZ 99999 BEGINNER MINOR DBADMIN The library must be defined in the same catalog.
-1369 ZZZZZ 99999 BEGINNER MINOR DBADMIN Only UPDATE and USAGE privileges are supported for a library.
+1365 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1366 ZZZZZ 99999 BEGINNER MINOR DBADMIN Request failed. One or more dependent procedures exist.
+1367 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1368 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1369 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1370 ZZZZZ 99999 BEGINNER MINOR DBADMIN The authorization name "$0~String0" contains one or more invalid characters. A valid name can only contain these characters: [a-zA-Z_0-9-@./]
-1371 ZZZZZ 99999 BEGINNER MINOR DBADMIN Only INSERT and SELECT privileges are supported for table $0~String0.
-1372 ZZZZZ 99999 BEGINNER MINOR DBADMIN Unable to alter table $0~String0 because of dependent object $1~String1 with object type $0~String3.
-1373 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Value for column $0~int0 ($1~String0) must be either Y or N.
-1374 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: $0~String0 is an unrecognized or unsupported audit log type.
-1375 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Input strings limited to $0~Int0 characters each.
-1376 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Column numbers must be in the range 1 - $0~Int0.
-1377 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Audit configuration alter failed due to SQL error $0~Int0.
-1378 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Audit refresh time must be 0 to $0~Int0 seconds.
-1379 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Audit aging days must be 0 to $0~Int0.
-1380 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Audit threshold must be a percentage from 0 to 100.
-1381 ZZZZZ 99999 BEGINNER MINOR DBADMIN AUDITLOG: Column/value pair mismatch.
+1371 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1372 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1373 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1374 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1375 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1376 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1377 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1378 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1379 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1380 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1381 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1382 ZZZZZ 99999 BEGINNER MINOR DBADMIN JAR or DLL file $0~String0 not found.
-1383 ZZZZZ 99999 BEGINNER MAJOR DBADMIN User management operations have been disabled. $0~string0 cannot be performed for $1~TableName.
-1384 ZZZZZ 99999 BEGINNER MINOR DBADMIN Unable to perform 1-to-1 mapping of $0~int0 partitions to $1~int1 disks for $2~TableName.  Since there are more partitions than available disks, $3~int2 partitions have been created on each disk.
-1385 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Mapping of disk pools for $0~TableName during CREATE failed. The number of partitions is $1~int0, the expected number of disks is $2~int1 and the actual number of disks is $3~int2.
-1386 ZZZZZ 99999 BEGINNER MINOR DBADMIN	The call to run_script failed. $0~string0.
-1387 ZZZZZ 99999 BEGINNER MINOR DBADMIN Use of blob datatype in a PRIMARY KEY is not allowed.
+1383 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1384 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1385 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1386 ZZZZZ 99999 BEGINNER MINOR DBADMIN	--- unused ---
+1387 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1388 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~TableName does not exist in hive metadata.
 1389 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~String0 does not exist in Trafodion.
 1390 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~String0 already exists in Trafodion.
@@ -392,10 +392,10 @@
 1394 ZZZZZ 99999 BEGINNER MINOR DBADMIN Trafodion needs to be reinitialized on this system due to missing or corrupted metadata objects. Do 'initialize trafodion, drop' followed by 'initialize trafodion' to reinitialize Trafodion. This will delete all metadata and user objects from the Trafodion database and recreate metadata.
 1395 ZZZZZ 99999 BEGINNER MINOR DBADMIN Trafodion needs to be upgraded on this system due to metadata version mismatch. Do 'initialize trafodion, upgrade' to upgrade metadata. Or do 'initialize trafodion, drop' followed by 'initialize trafodion'. Be aware that the second option will delete all metadata and user objects from Trafodion database.
 1396 ZZZZZ 99999 BEGINNER MINOR DBADMIN Trafodion needs to be reinitialized on this system due to data format version mismatch.
-1397 ZZZZZ 99999 BEGINNER MINOR DBADMIN Software version of objects being used is different than the version of software running on the system. Make sure that objects being used are built with the same version as that running on the system. Version of Trafodion software is picked from file sqenvcom.sh.
-1398 ZZZZZ 99999 BEGINNER MINOR DBADMIN Error $0~Int0 occured while accessing the hbase subsystem. Fix that error and make sure hbase is up and running. Error Details: $0~String0. 
+1397 ZZZZZ 99999 BEGINNER MINOR DBADMIN Software version of objects being used is different than the version of software running on the system. Make sure that objects being used are built with the same version as that running on the system. Version of Trafodion software is determined by file sqenvcom.sh.
+1398 ZZZZZ 99999 BEGINNER MINOR DBADMIN Error $0~Int0 occured while accessing the HBase subsystem. Fix that error and make sure HBase is up and running. Error Details: $0~String0. 
 1399 ZZZZZ 99999 BEGINNER MINOR DBADMIN Metadata has already been upgraded. No action is needed.
-1400 ZZZZZ 99999 BEGINNER MINOR LOGONLY The default value of column $0~ColumnName contains characters that cannot be converted to $1~String0.
+1400 ZZZZZ 99999 BEGINNER MINOR LOGONLY --- unused ---
 1401 ZZZZZ 99999 BEGINNER MINOR DBADMIN The default value of column $0~ColumnName contains characters that cannot be converted to character set $1~String0.
 1402 ZZZZZ 99999 BEGINNER MINOR DBADMIN Unique index $0~TableName could not be created with the DIVISION LIKE TABLE option. Only non-unique indexes are supported with this option.
 1403 ZZZZZ 99999 BEGINNER MINOR DBADMIN This ALTER command is not allowed on a reserved system schema object.
@@ -406,44 +406,44 @@
 1423 ZZZZZ 99999 BEGINNER MINOR DBADMIN Insert into metadata table $0~string0 failed.
 1424 ZZZZZ 99999 BEGINNER MINOR DBADMIN Column $0~ColumnName cannot be dropped as that would leave the table with no user defined or user updatable columns.
 1425 ZZZZZ 99999 BEGINNER MINOR DBADMIN This operation could not be performed on $0~TableName. $0~String0
-1426 ZZZZZ 99999 BEGINNER MINOR DBADMIN An invalid HBase column name $0~String0 was specified. A valid name must be of the format:   <ColumnFamily>:<ColumnName>
+1426 ZZZZZ 99999 BEGINNER MINOR DBADMIN An invalid HBase column name $0~String0 was specified. A valid name must be of the format: <ColumnFamily>:<ColumnName>
 1427 ZZZZZ 99999 BEGINNER MINOR DBADMIN Table cannot be renamed. $0~String0
 1428 ZZZZZ 99999 BEGINNER MINOR DBADMIN Metadata definitions could not be created and preloaded in global MDdescInfo struct. Make sure that metadata table definition syntax is correct.
-1429 ZZZZZ 99999 BEGINNER MINOR DBADMIN Inserts into _ROW_ format external hbase tables can only use the VALUES clause and must use the column_create function to create values.
+1429 ZZZZZ 99999 BEGINNER MINOR DBADMIN Inserts into _ROW_ format external HBase tables can only use the VALUES clause and must use the column_create function to create values.
 1430 3F000 99999 BEGINNER MAJOR DBADMIN A schema name that starts and ends with an "_"(underscore) is reserved for internal usage. It cannot be used to create a user schema.
 1431 ZZZZZ 99999 BEGINNER MINOR DBADMIN Object $0~String0 exists in HBase. This could be due to a concurrent transactional ddl operation in progress on this table.
 1432 ZZZZZ 99999 BEGINNER MINOR DBADMIN Input LOB type $0~Int0 does not match column's storage type: $1~Int1 Column name: $0~String0 .
-1500 ZZZZZ 99999 ADVANCED CRTCL DIALOUT The CATSYS - CAT_REFERENCES system schema relationship for catalog $0~CatalogName might be corrupt.
+1500 ZZZZZ 99999 ADVANCED CRTCL DIALOUT --- unused ---
 1501 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused as of 5/7/12 ---
-1502 ZZZZZ 99999 ADVANCED CRTCL DIALOUT The OBJECTS - REPLICAS definition schema relationship for $0~String0 $1~TableName might be corrupt.
-1503 ZZZZZ 99999 ADVANCED CRTCL DIALOUT The OBJECTS - PARTITIONS definition schema relationship for $0~String0 $1~TableName might be corrupt.
-1504 ZZZZZ 99999 ADVANCED MAJOR DIALOUT $0~TableName has no OBJECTS entry in $1~SchemaName.
-1505 ZZZZZ 99999 ADVANCED MAJOR DIALOUT The system schema metadata for catalog $0~CatalogName is inconsistent between the local node $1~String0 and remote node $2~String1.
-1506 ZZZZZ 99999 ADVANCED MAJOR DIALOUT The system schema metadata for schema $0~SchemaName is inconsistent between nodes $1~String0 and $2~String1.
+1502 ZZZZZ 99999 ADVANCED CRTCL DIALOUT --- unused ---
+1503 ZZZZZ 99999 ADVANCED CRTCL DIALOUT --- unused ---
+1504 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
+1505 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
+1506 ZZZZZ 99999 ADVANCED MAJOR DIALOUT --- unused ---
 1507 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ----- Msg not currently used -----
 1508 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ----- Msg not currently used -----
 1509 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ----- Msg not currently used -----
-1510 ZZZZZ 99999 BEGINNER MAJOR DBADMIN IDENTITY column $0~ColumnName can be of the following data types only: LARGEINT, unsigned INTEGER and unsigned SMALL INT.
+1510 ZZZZZ 99999 BEGINNER MAJOR DBADMIN IDENTITY column $0~ColumnName can be of the following data types only: LARGEINT, INTEGER UNSIGNED and SMALLINT UNSIGNED.
 1511 ZZZZZ 99999 BEGINNER MAJOR DBADMIN There can only be one IDENTITY column for a table.
-1512 ZZZZZ 99999 BEGINNER MAJOR DBADMIN IDENTITY column must be defined as a NOT NULL NOT DROPPABLE column.
-1513 ZZZZZ 99999 BEGINNER MAJOR DBADMIN IDENTITY column support is available only for hash and hash2 partitioned tables.
+1512 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1513 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1514 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Cannot add an IDENTITY column using ALTER TABLE command.
 1515 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU ----- Msg not currently used -----
-1516 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU
-1517 ZZZZZ 99999 BEGINNER MINOR DBADMIN Constraint $0~ConstraintName on table $1~TableName is NOT DROPPABLE, you cannot disable NOT DROPPABLE constraints.
-1518 ZZZZZ 99999 BEGINNER MINOR DBADMIN Constraint $0~ConstraintName was not enabled because it requires index $1~TableName which is currently disabled.
-1519 ZZZZZ 99999 BEGINNER MINOR DBADMIN Constraint $0~ConstraintName on table $1~TableName cannot be disabled because it is a foreign key constraint.
-1520 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU
-1521 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception table $0~string0 must be defined in the same schema as table $1~string1.
-1522 ZZZZZ 99999 BEGINNER MINOR DBADMIN Operation $0~string0 cannot be performed because $1~String1 is not an exception table.
-1523 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception table $0~TableName does not exist.
-1524 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception tables cannot be created on metadata tables.
-1525 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception tables cannot be created on exception tables.
-1526 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception table $0~string0 not found in metadata table $1~String1
-1527 ZZZZZ 99999 BEGINNER MINOR DBADMIN Key does not exist for $0~TableName. Cannot create exception table.
-1528 ZZZZZ 99999 BEGINNER MINOR DBADMIN Exception table $0~string0 not found for table $1~String1.
-1529 ZZZZZ 99999 BEGINNER MINOR DBADMIN Sequence Generator $0~string0 not found in metadata table $1~String1.
-1530 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Failed to add the LDAP default configuration.
+1516 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
+1517 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1518 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1519 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1520 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- unused ---
+1521 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1522 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1523 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1524 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1525 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1526 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1527 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1528 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1529 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1530 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1540 ZZZZZ 99999 BEGINNER MINOR DBADMIN The NO POPULATE clause is not allowed for index $0~String0 on volatile table $1~TableName.
 1541 ZZZZZ 99999 BEGINNER MINOR DBADMIN Use of BLOB/CLOB datatype as a key of the base table or an index is not allowed.
 1550 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Failed to create UDF $0~string0 with $0~int0 parameters. A scalar UDF can have a maximum of 32 parameters.
@@ -453,70 +453,69 @@
 1554 ZZZZZ 99999 ADVANCED MAJOR DBADMIN ---- Reserved for UDF ----
 1555 ZZZZZ 99999 ADVANCED MAJOR DBADMIN ---- Reserved for UDF ----
 1556 ZZZZZ 99999 ADVANCED MAJOR DBADMIN ---- Reserved for UDF ----
-1557 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Incorrect function data sequence numbering for function identifier $0~String0 in table $1~String1.
-1558 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Did not find a matching row for function $0~String0 in table $1~String1.
-1559 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Did not find a matching row for format or model '$0~String0' in table $1~String1.
-1560 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Error parsing column definition '$0~String0' for column number $1~Int0, function identifier $2~String1, in table $3~String2.
-1561 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Invalid column direction indicator for function identifier $0~String0, column number $1~Int0, in table $2~String1.
-1562 ZZZZZ 99999 ADVANCED MAJOR DBADMIN No output parameter rows found for model '$0~String0' in table $1~String1.
-1563 ZZZZZ 99999 ADVANCED MAJOR DBADMIN No input parameter rows found for model '$0~String0' in table $1~String1.
-1564 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Expected 1 output parameter row for format '$0~String0'. Found $1~Int0 rows in table $2~String1.
-1565 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Expected 1 input parameter row for format '$0~String0'. Found $1~Int0 rows in table $2~String1.
-1566 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Incorrect column numbering for function identifier $0~String0 in table $1~String1.
-1567 ZZZZZ 99999 ADVANCED MAJOR DBADMIN An error was encountered processing metadata for function $0~String0.
-1568 ZZZZZ 99999 ADVANCED MAJOR DBADMIN An error was encountered processing metadata for format or model '$0~String0'.
-1569 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Subqueries are not allowed as arguments to user-defined functions.
+1557 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1558 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1559 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1560 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1561 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1562 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1563 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1564 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1565 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1566 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1567 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1568 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
+1569 ZZZZZ 99999 ADVANCED MAJOR DBADMIN --- unused ---
 1570 ZZZZZ 99999 BEGINNER MAJOR DBADMIN MAXVALUE must be greater than MINVALUE for $0~string0.
 1571 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 value cannot be zero for $1~string1.
 1572 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 value cannot be a negative number for $0~string1.
 1573 ZZZZZ 99999 BEGINNER MAJOR DBADMIN START WITH value must be greater than or equal to MINVALUE and less than or equal to MAXVALUE for $0~string0.
-1574 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The CYCLE option is currently not supported for $0~string0.   
+1574 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---   
 1575 ZZZZZ 99999 BEGINNER MAJOR DBADMIN INCREMENT BY value cannot be greater than the difference between MINVALUE and MAXVALUE for $0~string0.
 1576 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 value is greater than maximum allowed for this sequence.
 1577 ZZZZZ 99999 BEGINNER MAJOR DBADMIN CACHE value must be greater than 1 and less than or equal to (maxValue-startValue+1)/incrementValue for $0~string0.
-1578 ZZZZZ 99999 BEGINNER MAJOR DBADMIN START WITH option cannot be used for $0~string0.
+1578 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1579 ZZZZZ 99999 BEGINNER MAJOR DBADMIN This sequence has reached its max and cannot provide a new value.
-1580 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Catalog $0~string0 uses an unsupported collation.
-1581 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Schema $0~string0 uses an unsupported collation.
+1580 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1581 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1582 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Internal Error: Sequence information does not exist in metadata.
 1583 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Sequence metadata could not be updated.
 1584 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Timestamp mismatch detected on Sequence object.
 1590 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Column $0~ColumnName is not an IDENTITY column.
-1591 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The MAXVALUE option for the $0~string0 must be greater than the current value of $0~string1.
+1591 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1592 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 cannot be specified for $0~string1.
-1593 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only one $0~string0 option can be altered at a time.
-1594 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An error occurred trying to access the current value of the $0~string0.
-1595 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The MAXVALUE option for the $0~string0 must be a valid numeric value.  NO MAXVALUE is not allowed.
-1596 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Recalibration of the internal sequence generator failed. \
- Please see additional messages for details.
-1597 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The maximum of the IDENTITY column, $0~string0, for the table, $0~string1, could not be obtained.
-1598 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The new CURRENT_VALUE, $0~string0, for the IDENTITY column, $0~string1, for the table, $0~string2, will be greater than the maximum allowed, $0~string3.
-1599 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The recalibration value is $0~string0 value, $0~string1, of the IDENTITY column, $0~string2, for the table, $0~string3.
-1600 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Unable to lock table, $0~string0, for the IDENTITY column, $0~string1, during recalibration.
-1601 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Recalibration of the GENERATED ALWAYS AS IDENTITY column, $0~string0, for the table, $0~string1, is not allowed with NO SELECT.
+1593 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1594 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1595 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1596 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1597 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1598 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1599 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1600 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1601 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
 
-1700 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Object $0~string0 is not a universal user-defined function.
-1701 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Encountered an error while processing a routine definition. Too many pass through inputs.
-1702 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only a character string literal can appear within the VALUE clause in a pass through input definition.
-1703 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only BINARY option can appear together with a UCS2 character string literal within the VALUE clause in a pass through input definition.
-1704 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Only BINARY option can appear within the VALUE FROM FILE clause.
-1705 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Encountered an error while processing a routine definition. Unable to open file '$0~string0' for read.
-1706 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Encountered an error while processing a routine definition. Unable to read contents of file '$0~string0'.
-1707 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Routine action $0~string0 already exists in the list of actions used by universal user-defined function $1~string0
-1708 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The number of items in the NUMBER OF UNIQUE OUTPUT VALUES clause exceeds the number of outputs declared in the RETURNS clause.
-1709 ZZZZZ 99999 BEGINNER MINOR DBADMIN Cannot drop the universal user-defined function $0~String0 - Associating routine action $1~String1 still exists.
-1710 ZZZZZ 99999 BEGINNER MINOR DBADMIN Encountered an error while processing the ALTER PASS THROUGH INPUTS clause. The specified position must start from 1, not 0.
-1711 ZZZZZ 99999 BEGINNER MINOR DBADMIN Encountered an error while processing the ALTER PASS THROUGH INPUTS clause. The specified position exceeds the number of existing pass through inputs.
-1712 ZZZZZ 99999 BEGINNER MINOR DBADMIN Encountered an error while processing the ALTER PASS THROUGH INPUTS clause. The specified position appears multiple times.
-1713 ZZZZZ 99999 BEGINNER MINOR DBADMIN Missing the required universal user-defined function name clause.
-1714 ZZZZZ 99999 BEGINNER MINOR DBADMIN An error occurred while retrieving metadata from catalog manager. Encountered an invalid routine action name '$0~string0'.
-1715 ZZZZZ 99999 BEGINNER MINOR DBADMIN An error occurred while retrieving metadata from catalog manager. Unable to start transaction.
-1716 ZZZZZ 99999 BEGINNER MINOR DBADMIN Number of declared formal parameters with SQL parameter style cannot exceed 32.
-1717 ZZZZZ 99999 BEGINNER MINOR DBADMIN Encountered an error while processing a routine definition. Pass through input value with BINARY type cannot be empty.
-1718 ZZZZZ 99999 BEGINNER MINOR DBADMIN Encountered an error while processing a routine definition. File '$0~string0' is empty. A pass through input value with BINARY type cannot be empty.
+1700 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1701 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1702 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1703 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1704 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1705 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1706 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1707 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1708 ZZZZZ 99999 BEGINNER MAJOR DBADMIN --- unused ---
+1709 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1710 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1711 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1712 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1713 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1714 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1715 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1716 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1717 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
+1718 ZZZZZ 99999 BEGINNER MINOR DBADMIN --- unused ---
 1719 ZZZZZ 99999 BEGINNER MINOR DBADMIN Access Type '$0~string0' is not supported.
 1720 ZZZZZ 99999 BEGINNER MINOR DBADMIN Isolation Level '$0~string0' is not supported.
-1999 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Last Catalog Manager error
+1999 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU --- Last Catalog Manager error ---
 2000 ZZZZZ 99999 UUUUUUUU UUUUU UUUUUUU Error messages for compiler main, IPC, and DEFAULTS table; assertions for optimizer.
 2001 ZZZZZ 99999 ADVANCED MAJOR DIALOUT Error or warning $0~Int0 occurred while opening or reading from DEFAULTS table $1~TableName.  Using $2~String0 values.
 2002 ZZZZZ 99999 ADVANCED MAJOR DIALOUT Internal error: cannot create compiler.
@@ -1197,7 +1196,7 @@
 4125 42000 99999 BEGINNER MAJOR DBADMIN The select list of a subquery in a row value constructor must be scalar (degree of one) if the subquery is one of several expressions rather than the only expression in the constructor.
 4126 42000 99999 BEGINNER MAJOR DBADMIN The row value constructors in a VALUES clause must be of equal degree.
 4127 42000 99999 BEGINNER MAJOR DBADMIN Type $0~String0 cannot be assigned to type $1~String1.
-4128 42000 99999 BEGINNER MAJOR DBADMIN Default volume and subvolume information could not be retrieved from =_DEFAULTS define - DEFINEINFO error $0~int0.
+4128 42000 99999 BEGINNER MAJOR DBADMIN --- unused ---
 4129 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The character-typed result of the function $0~String0 is longer than the maximum supported size.
 4130 ZZZZZ 99999 BEGINNER MAJOR DBADMIN $0~string0 is a read-only DEFAULTS attribute and cannot be updated.
 4131 42000 99999 BEGINNER MAJOR DBADMIN Current_time, current_date, or current_timestamp is not allowed in a check constraint.
@@ -1283,8 +1282,8 @@
 4219 ZZZZZ 99999 BEGINNER MAJOR DBADMIN This command is not supported on the specified object or an incorrect object type was specified.$0~String0
 4220 ZZZZZ 99999 BEGINNER MAJOR DBADMIN The THIS function can be used only inside a ROWS SINCE function.
 4221 42000 99999 BEGINNER MAJOR DBADMIN The operand of function $0~String0 must be $1~String1.
-4222 42000 99999 BEGINNER MAJOR DBADMIN The $0~String0 feature is not supported in this software version.
-4223 42000 99999 BEGINNER MAJOR DBADMIN $0~String0 not supported in this software version.
+4222 42000 99999 BEGINNER MAJOR DBADMIN The $0~String0 feature is not supported in this software version or edition.
+4223 42000 99999 BEGINNER MAJOR DBADMIN $0~String0 not supported in this software version or edition.
 4224 42000 99999 BEGINNER MAJOR DBADMIN The directory $0~String0 is not a valid Hive location.
 4225 42000 99999 BEGINNER MAJOR DBADMIN Number of column families cannot exceed 32.
 4226 42000 99999 BEGINNER MAJOR DBADMIN Table $0~TableName has a maximum record length of $0~Int0 which is greater than the HDFS_IO_BUFFERSIZE default. Increase buffer size setting, or reduce the HIVE_MAX_STRING_LENGTH default.
@@ -1894,7 +1893,7 @@
 9212 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Cardinality statistics will be more accurate if you use the SET ROWCOUNT option in the SAMPLE clause.
 9213 ZZZZZ 99999 BEGINNER INFRM DBADMIN If you intend to update histogram statistics for columns, you must specify a column list in the statement.
 9214 ZZZZZ 99999 ADVANCED MAJOR DBADMIN Object $0~string0 could not be created.
-9215 ZZZZZ 99999 ADVANCED MAJOR DBADMIN UPDATE STATISTICS encountered an internal error (from $0~String0, with return value=$1~String1).  Details: $2~String2."
+9215 ZZZZZ 99999 ADVANCED MAJOR DBADMIN UPDATE STATISTICS encountered an internal error (from $0~String0, with return value=$1~String1).  Details: $2~String2.
 9216 ZZZZZ 99999 BEGINNER MAJOR DBADMIN UPDATE STATISTICS cannot be on EVERY COLUMN for LOG tables. 
 9217 ZZZZZ 99999 BEGINNER INFRM DBADMIN The statement will have no effect because no histograms are currently maintained for the table.
 9218 ZZZZZ 99999 BEGINNER INFRM DBADMIN The statement will have no effect because no histograms need to be updated.
diff --git a/core/sql/bin/ex_sscp_main.cpp b/core/sql/bin/ex_sscp_main.cpp
index 6ddb549..a1f1f2b 100644
--- a/core/sql/bin/ex_sscp_main.cpp
+++ b/core/sql/bin/ex_sscp_main.cpp
@@ -156,7 +156,6 @@
 void runServer(Int32 argc, char **argv)
 {
   Int32 shmid;
-  jmp_buf sscpJmpBuf;
   StatsGlobals *statsGlobals = NULL;
   void *statsGlobalsAddr;
   NABoolean createStatsGlobals = FALSE;
@@ -275,9 +274,6 @@
   }
   XPROCESSHANDLE_GETMINE_(statsGlobals->getSscpProcHandle());
   NAHeap *sscpHeap = cliGlobals->getExecutorMemory();
-  cliGlobals->setJmpBufPtr(&sscpJmpBuf);
-  if (setjmp(sscpJmpBuf))
-    NAExit(1); // Abend
   IpcEnvironment  *sscpIpcEnv = new (sscpHeap) IpcEnvironment(sscpHeap, cliGlobals->getEventConsumed(),
       FALSE, IPC_SQLSSCP_SERVER, FALSE, TRUE);
 
diff --git a/core/sql/bin/ex_ssmp_main.cpp b/core/sql/bin/ex_ssmp_main.cpp
index 2f17b95..cfbe07b 100755
--- a/core/sql/bin/ex_ssmp_main.cpp
+++ b/core/sql/bin/ex_ssmp_main.cpp
@@ -145,7 +145,6 @@
 
 void runServer(Int32 argc, char **argv)
 {
-  jmp_buf ssmpJmpBuf;
   Int32 shmId;
   StatsGlobals *statsGlobals = (StatsGlobals *)shareStatsSegment(shmId);
   Int32 r = 0;
@@ -209,9 +208,6 @@
   XPROCESSHANDLE_GETMINE_(statsGlobals->getSsmpProcHandle());
 
   NAHeap *ssmpHeap = cliGlobals->getExecutorMemory();
-  cliGlobals->setJmpBufPtr(&ssmpJmpBuf);
-  if (setjmp(ssmpJmpBuf))
-    NAExit(1); // Abend
 
   IpcEnvironment       *ssmpIpcEnv = new (ssmpHeap) IpcEnvironment(ssmpHeap,
             cliGlobals->getEventConsumed(), FALSE, IPC_SQLSSMP_SERVER,
diff --git a/core/sql/cli/Cli.cpp b/core/sql/cli/Cli.cpp
index 3f88eac..4086a6e 100644
--- a/core/sql/cli/Cli.cpp
+++ b/core/sql/cli/Cli.cpp
@@ -74,12 +74,8 @@
 
 #include <time.h>
 
-#include <setjmp.h>
-
-
 #include "fs/feerrors.h"
 
-
 #include "csconvert.h"
 
 #include "ExRsInfo.h"
@@ -9222,8 +9218,10 @@
 	{
 	  diags.mergeAfter(*myDiags);
 	}
-      return cliRC;
     }
+  myDiags->deAllocate();
+  if (cliRC < 0)
+     return cliRC;
   else if (cliRC == 100)
     return 100;
   else
@@ -9510,13 +9508,16 @@
 	{
 	  diags.mergeAfter(*myDiags);
 	}
-      return cliRC;
     }
+  myDiags->deAllocate();
+  if (cliRC < 0)
+     return cliRC;
   else if (cliRC == 100)
     return 100;
   else
     return 0;   
 }
+
 Lng32 SQLCLI_LOBddlInterface
 (
 /*IN*/     CliGlobals *cliGlobals,
@@ -9924,8 +9925,10 @@
 	{
 	  diags.mergeAfter(*myDiags);
 	}
-      return cliRC;
     }
+  myDiags->deAllocate();
+  if (cliRC < 0)
+     return cliRC;
   else if (cliRC == 100)
     return 100;
   else
@@ -10035,8 +10038,6 @@
   ContextCli   & currContext = *(cliGlobals->currContext());
   ComDiagsArea & diags       = currContext.diags();
 
-  ComDiagsArea * myDiags = ComDiagsArea::allocate(currContext.exHeap());
-
   ExeCliInterface *cliInterface = NULL;
   if (inCliInterface && (*inCliInterface))
     {
@@ -10711,8 +10712,10 @@
 						currContext.exHeap(),
 						nextValue,
 						endValue);
-  if (cliRC < 0)
-    return cliRC;
+  if (cliRC < 0) {
+     myDiags->deAllocate();     
+     return cliRC;
+  }
   
   if ((sga->getSGCycleOption()) &&
       (nextValue > sga->getSGMaxValue()))
@@ -10727,10 +10730,13 @@
 						    currContext.exHeap(),
 						    nextValue,
 						    endValue);
-      if (cliRC < 0)
-	return cliRC;
+      if (cliRC < 0) {
+         myDiags->deAllocate();     
+	 return cliRC;
+      }
     }
 
+  myDiags->deAllocate();     
   sga->setSGNextValue(nextValue);
   sga->setSGEndValue(endValue);
 
diff --git a/core/sql/cli/Context.cpp b/core/sql/cli/Context.cpp
index 6eef48f..b7cfd08 100644
--- a/core/sql/cli/Context.cpp
+++ b/core/sql/cli/Context.cpp
@@ -174,7 +174,6 @@
     numRoles_(0),
     unusedBMOsMemoryQuota_(0)
 {
-  exHeap_.setJmpBuf(cliGlobals->getJmpBuf());
   cliSemaphore_ = new (&exHeap_) CLISemaphore();
   ipcHeap_ = new (cliGlobals_->getProcessIpcHeap())
                   NAHeap("IPC Context Heap",
diff --git a/core/sql/cli/Globals.cpp b/core/sql/cli/Globals.cpp
index b9dd6e5..e6f64eb 100644
--- a/core/sql/cli/Globals.cpp
+++ b/core/sql/cli/Globals.cpp
@@ -74,14 +74,8 @@
 
 CliGlobals::CliGlobals(NABoolean espProcess)
      : inConstructor_(TRUE),
-       executorMemory_("Global Executor Memory",0,0,
-		       0,0,0, &segGlobals_),
+       executorMemory_((const char *)"Global Executor Memory"),
        contextList_(NULL),
-       defaultVolSeed_(0),
-       listOfVolNames_(NULL),
-       listOfAuditedVols_(NULL),
-       listOfVolNamesCacheTime_(-1),
-       sysVolNameInitialized_(FALSE),
        envvars_(NULL),
        envvarsContext_(0),
        sharedArkcmp_(NULL),
@@ -90,16 +84,10 @@
        totalCliCalls_(0),
        savedCompilerVersion_ (COM_VERS_COMPILER_VERSION),
        globalSbbCount_(0),
-       //       sessionDefaults_(NULL),
        priorityChanged_(FALSE),
        currRootTcb_(NULL),
        processStats_(NULL),
        savedPriority_(148), // Set it to some valid priority to start with
-       qualifyingVolsPerNode_(NULL),
-       cpuNumbers_(NULL),
-       capacities_(NULL),
-       freespaces_(NULL),
-       largestFragments_(NULL),
        tidList_(NULL),
        cliSemaphore_(NULL),
        defaultContext_(NULL),
@@ -134,7 +122,6 @@
   _sqptr = new (&executorMemory_) char[10];
 
   numCliCalls_ = 0;
-  logEmsEvents_ = TRUE;
   nodeName_[0] = '\0';
 
   breakEnabled_ = FALSE;
@@ -181,10 +168,6 @@
     // Create the process global ARKCMP server.
     sharedArkcmp_ = NULL;
     nextUniqueContextHandle = DEFAULT_CONTEXT_HANDLE;
-
-    arlibHeap_ = new (&executorMemory_) NAHeap("MXARLIB Cache Heap",
-                                               &executorMemory_,
-                                               (Lng32) 32768);
     lastUniqueNumber_ = 0;
     sessionUniqueNumber_ = 0;
     // It is not thread safe to set the globals cli_globals
@@ -243,11 +226,6 @@
     tidList_  = new(&executorMemory_) HashQueue(&executorMemory_);
     SQLCTX_HANDLE ch = defaultContext_->getContextHandle();
     contextList_->insert((char*)&ch, sizeof(SQLCTX_HANDLE), (void*)defaultContext_);
-    qualifyingVolsPerNode_.setHeap(defaultContext_->exCollHeap());
-    cpuNumbers_.setHeap(defaultContext_->exCollHeap());
-    capacities_.setHeap(defaultContext_->exCollHeap());
-    freespaces_.setHeap(defaultContext_->exCollHeap());
-    largestFragments_.setHeap(defaultContext_->exCollHeap());
     if (statsGlobals_ != NULL) 
        memMonitor_ = statsGlobals_->getMemoryMonitor();
     else
@@ -307,11 +285,6 @@
     delete sharedArkcmp_;
     sharedArkcmp_ = NULL;
   }
-  if (arlibHeap_)
-  {
-    delete arlibHeap_;
-    arlibHeap_ = NULL;
-  }
   if (statsGlobals_ != NULL)
   {
     error = statsGlobals_->getStatsSemaphore(semId_, myPin_);
@@ -894,45 +867,6 @@
     return SUCCESS;
 }
 
-void CliGlobals::clearQualifiedDiskInfo()
-{
-  CollHeap *heap = defaultContext_->exCollHeap();
-
-  nodeName_[0] = '\0';
-
-  while (!qualifyingVolsPerNode_.isEmpty())
-  {
-    char *volume;
-    // getFirst() removes and returns the first element in
-    // the container.
-    qualifyingVolsPerNode_.getFirst(volume);
-    NADELETEBASIC(volume, heap);  // Allocated in addQualifiedDiskInfo()
-  }
-
-  cpuNumbers_.clear();
-  capacities_.clear();
-  freespaces_.clear();
-  largestFragments_.clear();
-}
-
-void CliGlobals::addQualifiedDiskInfo(
-                 const char *volumeName,
-                 Lng32 primaryCpu,
-                 Lng32 capacity,
-                 Lng32 freeSpace,
-                 Lng32 largestFragment)
-{
-  CollHeap *heap = defaultContext_->exCollHeap();
-  char *volName = new(heap) char[9]; // deleted in clearQualifiedDiskInfo()
-  strcpy(volName, volumeName);
-
-  qualifyingVolsPerNode_.insert(volName);
-  cpuNumbers_.insert(primaryCpu);
-  capacities_.insert(capacity);
-  freespaces_.insert(freeSpace);
-  largestFragments_.insert(largestFragment);
-}
-
 NAHeap *CliGlobals::getCurrContextHeap()
 {
    return currContext()->exHeap();
diff --git a/core/sql/cli/Globals.h b/core/sql/cli/Globals.h
index ca498fa..284d992 100644
--- a/core/sql/cli/Globals.h
+++ b/core/sql/cli/Globals.h
@@ -64,7 +64,6 @@
 
 #include "NAMemory.h"
 #include "sqlcli.h"
-#include "QuasiFileManager.h"
 #include "Ipc.h"
 #include "ComQueue.h"
 #include "logmxevent.h"
@@ -72,14 +71,13 @@
 #include "ComRtUtils.h"
 #include "ComSmallDefs.h"
 class ContextCli;
-class Statement;  // $$$ possibly a stub for QuasiFileberManager
-class ComDiagsArea; // $$$ possibly a stub for QuasiFileberManager
+class Statement;  
+class ComDiagsArea; 
 class ExEspManager;
 class ExSsmpManager;
 class ExSqlComp;
 class IpcEnvironment;
 class MemoryMonitor;
-class QuasiFileManager;
 class HashQueue;
 class ExUdrServerManager;
 class ExControlArea;
@@ -159,49 +157,12 @@
   ExUdrServerManager *getUdrServerManager();
   inline MemoryMonitor * getMemoryMonitor()     { return memMonitor_; }
   inline void setMemoryMonitor(MemoryMonitor *memMon) { memMonitor_ = memMon; }
-  inline QuasiFileManager * getQuasiFileManager() { return quasiFileManager_; }
 
   inline NAHeap * getExecutorMemory()      { return &executorMemory_; }
-  inline NAHeap * getNoWaitHeap()  { return noWaitSQLHeap_; }
-
-  inline short getSegId(Lng32 &index)
-                                { return segGlobals_.getSegId(index); }
-  inline const NASegGlobals * getSegGlobals() const
-      
-                                         { return &segGlobals_; }
-  inline UInt32 getDefaultVolSeed()       { return defaultVolSeed_; }
-  inline void     setDefaultVolSeed( UInt32 seed)
-                                            { defaultVolSeed_ = seed; }
-  inline char **  getListOfVolNames()       { return listOfVolNames_; }
-  inline void     setListOfVolNames( char ** pVols)
-                                           { listOfVolNames_ = pVols; }
-  inline void *   getListOfAuditedVols() { return listOfAuditedVols_; }
-  inline void     setListOfAuditedVols( void *p)
-                                            { listOfAuditedVols_ = p; }
-  inline Int64   getListOfVolNamesCacheTime()  // 64-bit
-                                   { return listOfVolNamesCacheTime_; }
-  inline void     setListOfVolNamesCacheTime(Int64 cacheTime)
-                              { listOfVolNamesCacheTime_ = cacheTime; }
-  inline NABoolean isSysVolNameInitialized()
-                                     { return sysVolNameInitialized_; }
-  inline void setSysVolNameIsInitialized() 
-                                     { sysVolNameInitialized_ = TRUE; }
-  inline char * getSysVolName()                 { return sysVolName_; }
-
-  void clearQualifiedDiskInfo();
-  void addQualifiedDiskInfo(const char *volumeName, Lng32 primaryCpu,
-                            Lng32 capacity, Lng32 freeSpace, Lng32 largestFragment);
   inline void setNodeName(const char *nodeName)
                     { strncpy(nodeName_, nodeName, sizeof(nodeName_)); }
   inline char *getNodeName() { return nodeName_; }
 
-  inline Lng32 getNumOfQualifyingVols() { return qualifyingVolsPerNode_.entries(); }
-  inline char *getQualifyingVolume(Lng32 i) { return qualifyingVolsPerNode_[i]; }
-  inline Lng32 getCpuNumberForVol(Lng32 i) { return cpuNumbers_[i]; }
-  inline Lng32 getCapacityForVol(Lng32 i) { return capacities_[i]; }
-  inline Lng32 getFreespaceForVol(Lng32 i) { return freespaces_[i]; }
-  inline Lng32 getLargestFragmentForVol(Lng32 i) { return largestFragments_[i]; }
-
   inline Lng32 incrNumOfCliCalls()                   { return ++numCliCalls_; }
   inline Lng32 decrNumOfCliCalls()                   
   { 
@@ -442,42 +403,12 @@
   // executor memory that maintains all heap memory for this executor
   NAHeap executorMemory_;
 
-  // Object that contains: 1) attributes of the first flat segment
-  //                       2) array of secondary segment ids
-  NASegGlobals segGlobals_;
-
   // heap used by the IPC procedures
   NAHeap * ipcHeap_;
   
   // memory monitor for this process
   MemoryMonitor *memMonitor_;
 
-  // heap used by no-wait SQL procedures
-  NAHeap * noWaitSQLHeap_;
-
-  // quasi file manager for this process
-  QuasiFileManager * quasiFileManager_;
-
-  // Cache of descriptive table information from resource forks. Used
-  // in the audit reading CLI procedures called by utilities and by
-  // TMFARLB2. Code for these audit reading procedures is in
-  // CliMxArLib.cpp.
-  NAHeap *arlibHeap_;
-  //
-  // used by the catalog manager get-default-volume algorithm
-  //
-  UInt32 defaultVolSeed_;
-  char **  listOfVolNames_;
-  void *   listOfAuditedVols_;
-  Int64   listOfVolNamesCacheTime_;  // 64-bit
-
-  //
-  // cache the Tandem System Volume name
-  //
-  NABoolean sysVolNameInitialized_;
-  char sysVolName_[ 18 ];  // '$' + VOLNAME +  '.' +
-                          // SUBVOL + null-terminator
-
   // copy of the oss envvars
   char ** envvars_;
 
@@ -521,8 +452,6 @@
 
   // location of the application program which is calling SQL.
   // Fully qualified oss pathname for OSS processes.
-  // \sys.$vol.subvol for guardian processes. Currently, programDir_
-  // is not set or used for guardian processes.
   char * programDir_;
   short  processType_; // 0, oss process.  1, guardian process.
   NABoolean logReclaimEventDone_;
@@ -556,13 +485,7 @@
   // EMS event descriptor
   SQLMXLoggingArea::ExperienceLevel emsEventExperienceLevel_;
 
-  // these vars are used by GetListOfQualifyingVolumes.. methods.
   char nodeName_[9];
-  LIST(char *) qualifyingVolsPerNode_;
-  LIST(Lng32) cpuNumbers_;
-  LIST(Lng32) capacities_;
-  LIST(Lng32) freespaces_;
-  LIST(Lng32) largestFragments_;
 
   StatsGlobals *statsGlobals_;
 // heap used for the Stats collection
diff --git a/core/sql/cli/NoWaitOp.cpp b/core/sql/cli/NoWaitOp.cpp
deleted file mode 100644
index f6dc8d2..0000000
--- a/core/sql/cli/NoWaitOp.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-/**********************************************************************
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-**********************************************************************/
-/* -*-C++-*-
- *****************************************************************************
- *
- * File:         NoWaitOp.cpp
- * Description:  Functions of NowaitOp class.
- *               
- * Created:      3/26/2002
- * Language:     C++
- *
- *
- *
- *
- *****************************************************************************
- */
-
-#include "Platform.h"
-#include "ComCextdecs.h"
-
-#include "cextdecs/cextdecs.h"
-
-#include <stdlib.h>
-#include "cli_stdh.h"
-#include "Ipc.h"
-#include "ex_stdh.h"
-#include "SQLCLI.h"
-#include "NoWaitOp.h"
-#include "Statement.h"
-#include "Descriptor.h"
-#include "ExStats.h"
-#include "ex_exe_stmt_globals.h"
-
-// Methods for class NoWaitOp
-
-NoWaitOp::NoWaitOp(Statement * stmt,
-             Descriptor * inputDesc, Descriptor * outputDesc,
-             Lng32 tag, NoWaitOp::opType op, NABoolean initiated)
-             : stmt_(stmt), inputDesc_(inputDesc),
-             outputDesc_(outputDesc),tag_(tag),op_(op),
-             initiated_(initiated)
-  {
-  // lock Descriptors ($$$ what happens if already locked?)
-  if (inputDesc)
-    inputDesc->lockForNoWaitOp(); // $$$ ignores possible error return code
-
-  if (outputDesc)
-    outputDesc->lockForNoWaitOp(); // $$$ ignores possible error return code
-  }
-
-NoWaitOp::~NoWaitOp(void)
-  {
-  // unlock Descriptors ($$$ what happens if already unlocked?)
-  if (inputDesc_)
-    inputDesc_->unlockForNoWaitOp(); // $$$ ignores possible error return code
-  
-  if (outputDesc_)
-    outputDesc_->unlockForNoWaitOp(); // $$$ ignores possible error return code
-  }
-
-
-RETCODE NoWaitOp::awaitIox(Lng32 * tag)
-  {
-
-  RETCODE rc = NOT_FINISHED;  // assume not finished yet
-  RETCODE rc1;
-
-  // set current context to that of this Statement
-
-  // $$$ note that the diagsArea might be from a different context;
-  // I think this is OK, but it bears closer inspection.
-
-  // $$$ this code should be bracketed with a try-catch block to
-  // restore the original context in the event of an exception.
-  
-  ContextCli * stmtContext = stmt_->getContext();
-  ComDiagsArea &diagsArea = stmtContext->diags();
-  CliGlobals * cliGlobals = stmtContext->getCliGlobals();
-  ContextCli * oldCurrentContext = cliGlobals->currContext();
-  cliGlobals->setCurrentContext(stmtContext);
-  jmp_buf jmpBuf, *oldJmpBufPtr;
-  oldJmpBufPtr = cliGlobals->getJmpBufPtr();
-
-  cliGlobals->setJmpBufPtr(&jmpBuf);
-  Lng32 jmpRetcode;
-  Int32 jmpRc = setjmp(jmpBuf);
-  if (jmpRc)
-    {
-    if (jmpRc == MEMALLOC_FAILURE)
-      jmpRetcode = -EXE_NO_MEM_TO_EXEC;
-    else
-    {
-      stmt_->resetNoWaitOpPending();
-      jmpRetcode = -CLI_INTERNAL_ERROR;
-    }
-    *tag = tag_;
-    diagsArea << DgSqlCode(jmpRetcode);
-    rc = ERROR;
-    }
-  else
-    {
-    Int64 startTime = NA_JulianTimestamp();
-  
-    switch (op_)
-      {
-      case FETCH:
-      case FETCH_CLOSE:
-	{
-	// drive the fetch with a zero time limit (since we drive
-	// the IPC wait in this layer instead of in the Executor
-	// layer), but drive it only if there is dispatchable work
-	// to do
-	if (stmt_->mightHaveWorkToDo()) //Do this for fetch but not prepare
-	  {
-	  // redrive the fetch (FALSE indicates not a new fetch)
-
-	  rc = stmt_->fetch(cliGlobals,outputDesc_,diagsArea,FALSE);
-        
-	  // $$$ for the moment, assume at most one no-wait op per
-	  // Statement; can relax this later
-	  if (rc != NOT_FINISHED)
-	    {
-	    stmt_->resetNoWaitOpPending();
-	    *tag = tag_;
-
-	    // Fixup the diags area and outputDesc.
-	    // if bulk move was done, remember the statement it was done for.
-	    if ((outputDesc_) && (NOT outputDesc_->bulkMoveDisabled()))
-	      {
-	      //if (getenv("BULKMOVEWARN"))
-	      //	diags << DgSqlCode(EXE_ERROR_NOT_IN_USE_8350);
-
-	      outputDesc_->bulkMoveStmt() = stmt_;
-	      }
-
-	    // if select into query, then make sure that atmost one
-	    // row is returned by executor. More than one would
-	    // result in an error.
-	    if ((stmt_->isSelectInto()) &&
-		(rc == SUCCESS))
-	      {
-		// BertBert VV
-	      if (stmt_->isEmbeddedUpdateOrDelete() || stmt_->isStreamScan())
-		{
-		// For streams and destructive selects, we don't want the
-		// abovebehavior,instead, we should just return the first row.
-		}
-	      // BertBert ^^
-	      else
-		{
-		// select into and a row was returned.
-		// See if we can get one more row.
-		// Do not send in an output desc. We want
-		// to return the first row to application.
-		// This is being consistent with SQL/MP behavior.
-		stmt_->resetNoWaitOpEnabled();  // waited mode
-		rc = stmt_->fetch(cliGlobals, 0 /*no output desc*/,
-				  diagsArea, TRUE);
-
-		if (rc == SUCCESS)
-		  {
-		  diagsArea << DgSqlCode(-CLI_SELECT_INTO_ERROR);
-		  if (op_ == FETCH)
-		    stmt_->close(diagsArea);
-		  rc = ERROR;
-		  }
-	    
-		if (rc == SQL_EOF)
-		  {
-		  // remove warning 100 from diags.
-		  diagsArea.removeFinalCondition100();
-
-		  rc = SUCCESS;
-		  }
-		}
-	      }
-
-	    if ((rc == SQL_EOF) && (outputDesc_ == NULL))
-	      {
-	      // remove warning 100 from diags.
-	      diagsArea.removeFinalCondition100();
-	  
-	      rc = SUCCESS;
-	      }
-	    }   // end if rc != NOT_FINISHED
-
-	  }
-
-	  if (rc != NOT_FINISHED && op_ == FETCH_CLOSE)
-	  {
-	    rc1 = stmt_->close(diagsArea);
-	    if (rc1 == ERROR)
-		rc = rc1;
-	  }
-
-	  if ((op_ == FETCH_CLOSE) &&
-	      (rc != NOT_FINISHED) && 
-	      (rc != ERROR) &&
-	      (stmt_->noRowsAffected(diagsArea)))
-	    {
-	      rc = 
-		((diagsArea.getNumber(DgSqlCode::WARNING_) > 0) ? SUCCESS : SQL_EOF);
-	      // move EOF warning to diags area.
-	      if (rc == SQL_EOF)
-		diagsArea << DgSqlCode(SQL_EOF);
-	    }
-
-	break;
-	}
-
-      case PREPARE:
-	{
-	// drive the prepare with a zero time limit
-	ULng32 flags = PREPARE_NOT_A_NEW_OPERATION;
-	rc = stmt_->prepare(NULL,diagsArea,NULL,0,0,TRUE,flags);
-      
-	// $$$ for the moment, assume at most one no-wait op per
-	// Statement; can relax this later
-	if (rc != NOT_FINISHED)
-	  {
-          stmt_->issuePlanVersioningWarnings (diagsArea);
-	  stmt_->resetNoWaitOpPending();
-	  *tag = tag_;
-	  }
-	break;
-	}
-
-      default:
-	{
-	// $$$ operation invalid or not supported yet
-	rc = ERROR;
-	diagsArea << DgSqlCode(-EXE_INTERNAL_ERROR); 
-	break;
-	}
-      }
-    }
-
-
-  // restore original current context
-  cliGlobals->setCurrentContext(oldCurrentContext);
-  cliGlobals->setJmpBufPtr(oldJmpBufPtr);
-  return rc;
-  }
-
diff --git a/core/sql/cli/NoWaitOp.h b/core/sql/cli/NoWaitOp.h
deleted file mode 100644
index 9879d8e..0000000
--- a/core/sql/cli/NoWaitOp.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/**********************************************************************
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-**********************************************************************/
-#ifndef NOWAITOP_H
-#define NOWAITOP_H
-
-/* -*-C++-*-
-******************************************************************************
-*
-* File:         NoWaitOp.h
-* Description:  Definition of NowaitOp class.
-*               
-* Created:      3/26/2002
-* Language:     C++
-*
-*
-*
-******************************************************************************
-*/
-
-//#include "Statement.h"
-//#include "Descriptor.h"
-
-class Statement;
-class Descriptor;
-class NoWaitOp;
-
-#ifdef EX_GOD_H    // compile the following only if ex_god.h also included
-#ifdef CLI_STDH_H  // compile the following only if CliDefs.h also included
-
-class NoWaitOp : public NABasicObject
-{
-  public:
-
-    enum opType { FETCH, EXECUTE, PREPARE, FETCH_CLOSE };
-
-    NoWaitOp(Statement * stmt, 
-             Descriptor * inputDesc, Descriptor * outputDesc,
-             Lng32 tag, opType op, NABoolean initiated);
-
-    ~NoWaitOp(void);
-
-    RETCODE awaitIox(Lng32 *tag);
-
-    inline Statement * getStatement(void)
-      { return stmt_; };
-    inline Lng32 * getTagAddr(void)
-      { return &tag_; };
-
-    static inline Lng32 getTagSize()
-      { return sizeof(Lng32); }
-
-  private:
-
-    Statement * stmt_;        // Statement object of no-wait op
-    Descriptor * inputDesc_;  // input Descriptor for no-wait op
-    Descriptor * outputDesc_; // output Descriptor for no-wait op
-    Lng32 tag_;                // tag to be returned on operation completion
-    opType op_;               // type of operation (e.g. Fetch, Execute, ...)
-    NABoolean initiated_;     // true if operation has been started in
-                              // the Executor
-
-} ;
-
-
-#endif // CLI_STDH_H
-#endif // EX_GOD_H
-
-#endif /* NOWAITOP_H */
-
diff --git a/core/sql/cli/QuasiFileManager.cpp b/core/sql/cli/QuasiFileManager.cpp
deleted file mode 100644
index 7e1b8a3..0000000
--- a/core/sql/cli/QuasiFileManager.cpp
+++ /dev/null
@@ -1,577 +0,0 @@
-/**********************************************************************
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-**********************************************************************/
-/* -*-C++-*-
- *****************************************************************************
- *
- * File:         QuasiFileManager.cpp
- * Description:  Functions of QuasiFileManager and QuasiFileber.
- *               
- * Created:      3/26/2002
- * Language:     C++
- *
- *
- *
- *
- *****************************************************************************
- */
-
-// -----------------------------------------------------------------------
-
-
-#include "Platform.h"
-
-
-#include <stdlib.h>
-#include "cli_stdh.h"
-#include "Ipc.h"
-#include "ex_stdh.h"
-#include "QuasiFileManager.h"
-#include "NoWaitOp.h"
-
-#define SQL_QFO_FUNCTION_ATTRIBUTES __declspec(dllexport)
-#include "cextdecs/cextdecs.h"
-#include "guardian/dpcbz.h"
-
-
-SQL_QFO_FUNCTION_ATTRIBUTES short Sql_Qfo_IOComp(short quasi_file_number /*in*/,
-				     Lng32 *tag /*out*/,
-				     unsigned short *waitmask /*out*/,
-				     short userstop /*in*/);
-SQL_QFO_FUNCTION_ATTRIBUTES short Sql_Qfo_Close(short quasi_file_number /*in*/);
-
-//*************************************************************
-// Methods of QuasiFileManager
-//*************************************************************
- 
-QuasiFileManager::QuasiFileManager(NAHeap * noWaitHeap,
-                               IpcEnvironment * ipcEnv) : 
-pendingNoWaitOperations_(0), ipcEnv_(ipcEnv), noWaitHeap_(noWaitHeap)
-  {
-  quasiFileList_ = new(noWaitHeap_) Queue(noWaitHeap_);
-  }
-
-QuasiFileManager::~QuasiFileManager(void)
-  {
-  // delete quasiFile list
-  assert (quasiFileList_->isEmpty());
-
-  //  delete quasiFileList_;
-  NADELETE(quasiFileList_, Queue, noWaitHeap_);
-
-
-  // $$$ need to think about policy here... do we do disassociates
-  // on all Statements? Or do we assume we only get called after
-  // contexts and statements are destroyed?
-  }
-
-RETCODE QuasiFileManager::assocFileNumber(ComDiagsArea &diagsArea,
-                                           short fileNumber,
-                                           Statement * statement)
-  {
-  RETCODE rc = SUCCESS; 
-  QuasiFile *fn = NULL;
-
-  if (statement->getFileNumber() != -1)
-    {
-    // Statement is already associated with some file number --
-    // generate error
-    rc = ERROR;
-    // $$$ for now, just raise an internal error
-    diagsArea << DgSqlCode(-CLI_STATEMENT_ASSOCIATED_WITH_QFO);
-    }
-  else 
-    {
-    if ((fn = getQuasiFile(fileNumber)) == NULL)  // quasiFile entry not exist
-       {
-       
-       // first check if filename is $QFO
-
-
-	// add new quasiFile to list
-	fn = new (noWaitHeap_) 
-	        QuasiFile (noWaitHeap_, fileNumber, this);
-	quasiFileList_ -> insert((void *)fn);
-	} // create a new entry
-       
-    // associate this statement with this file number
-    fn->associateStatement(statement);
-   
-    } // else
-   
-  return rc;
-  }
-
-
-
-RETCODE QuasiFileManager::disassocFileNumber(ComDiagsArea &diagsArea,
-					     Statement * statement,
-					     NABoolean force)
-  { 
-  RETCODE rc = SUCCESS;  // assume success
-
-  short fileNumber = statement->getFileNumber();
-
-  if (fileNumber == -1)
-    {
-    diagsArea << DgSqlCode(-CLI_STATEMENT_WITH_NO_QFO);
-    rc = ERROR;
-    }
-  else
-  {
-    if (statement->noWaitOpPending() && force)
-      deleteNoWaitOps(diagsArea, fileNumber, statement);
-  
-    if (statement->noWaitOpPending())
-      {
-      // Statement has an incompleted no-wait op
-      // $$$ Later can consider cancelling or completing, or raising
-      // a user error but for now raise an internal error in this case
-      diagsArea << DgSqlCode(-CLI_OPERATION_WITH_PENDING_OPS);
-      rc = ERROR;
-      }
-    else
-      {
-      QuasiFile * fn = getQuasiFile (fileNumber);
-
-      if (fn == NULL)
-	{
-	diagsArea << DgSqlCode(-CLI_INVALID_QFO_NUMBER);
-	rc = ERROR;
-	}
-      else
-	{
-	if (fn->disassociateStatement(statement))
-	  {
-	  // last associated statement
-	  quasiFileList_->remove((void *)fn);
-	  delete fn;
-	  }
-	}
-      }
-  }
-
-  return rc;
-  }
-
-
-RETCODE QuasiFileManager::deleteNoWaitOps(ComDiagsArea &diagsArea,
-					short fileNumber, 
-					Statement * stmt)
-  {
-  RETCODE rc = SUCCESS;  // assume success
-
-  QuasiFile * fn = getQuasiFile(fileNumber);
-    
-  if (fn == NULL)
-    {
-    // trying to delete no-wait ops for a file number that is not allocated --
-    // generate error
-    rc = ERROR;
-    diagsArea << DgSqlCode(-CLI_INVALID_QFO_NUMBER);
-    }
-  else
-    {
-    // delete outstanding nowait ops
-    fn->deleteNoWaitOps(stmt);
-    }
-  return rc;
-  }
-
-RETCODE QuasiFileManager::awaitIox(Lng32 fileNumber,
-				 Lng32 * tag,
-				 short * feError)
-  {
-  RETCODE rc = NOT_FINISHED;  // show no completions yet
-  QuasiFile *quasiFile;
- 
-  quasiFile = getQuasiFile(fileNumber);
-  if (quasiFile != NULL)
-    rc = quasiFile->awaitIox(ipcEnv_, tag, feError);
-  else
-    *feError = FEBADPARMVALUE; // shouldn't be called with this file number
-
-  return rc;
-  }
-
-
-
-QuasiFile * QuasiFileManager::getQuasiFile(short fileNumber)
-  {
-  QuasiFile * fn = NULL; // assume failure
-  quasiFileList_->position();
-  fn = (QuasiFile *)quasiFileList_->getNext();
-
-  // go through the quasiFileList and find a match.
-  while (fn)
-    {
-    if (fileNumber == fn->getFileNumber())
-      return fn;
-    else
-      fn = (QuasiFile *)quasiFileList_->getNext();
-    }
-  return fn;
-  }
-
-
-void QuasiFileManager::notifyOfNewNoWaitOp(void)
-  {
-  pendingNoWaitOperations_++;
-  }
-
-void QuasiFileManager::closeQuasiFile(short fileNumber)
-  {
-  QuasiFile *quasiFile = getQuasiFile(fileNumber);
-  if (quasiFile)
-    {
-    quasiFile->closeNoWaitOpsPending();
-    quasiFileList_->remove((void *)quasiFile);
-    delete quasiFile;
-    }
-  }
-
-//***************************************************************************
-// Methods for class QuasiFile
-//***************************************************************************
-
-QuasiFile::QuasiFile(NAHeap * noWaitHeap,
-		       short fileNumber,
-		       QuasiFileManager *fnm) 
-  : fileNumber_(fileNumber), noWaitHeap_(noWaitHeap),
-     quasiFileManager_(fnm)
-  {
-  associatedStatements_ = new(noWaitHeap_) HashQueue(noWaitHeap_);
-  pendingNoWaitOps_ = new(noWaitHeap_) Queue(noWaitHeap_);
-  }
-
-QuasiFile::~QuasiFile(void)
-  {
-  assert(pendingNoWaitOps_->isEmpty());
-
-  // cannot use 'delete pendingNoWaitOps_' since it is not an NABasicObject.
-  // Need to add a Queue::cleanup method that will deallocate all the local
-  // members of Queue. Call that first and then call deallocateMemory.  TBD.
-  noWaitHeap_->deallocateMemory((void *)pendingNoWaitOps_);
-  //  delete pendingNoWaitOps_;
-  
-  // iterate through all associated Statements, disassociating them
-  
-  associatedStatements_->position();
-  Statement * stmt = (Statement *)associatedStatements_->getNext();
-  while (stmt)
-    {
-    // Disassociate statement, but without removing it from the list
-    
-    // We do this to defer calling Queue::remove(). Calling it
-    // now would force us to do another Queue::position() call.
-    // Also, the Queue destructor already contains logic to
-    // remove queue entries, so just deleting the Queue will
-    // do the trick.
-    stmt->resetFileNumber();
-    stmt->resetNoWaitOpEnabled();
-       
-    stmt = (Statement *)associatedStatements_->getNext();
-    }
-  delete associatedStatements_;
-  }
-
-// Note: These methods assume the caller has already validated that
-// the operation is a valid thing to do.
-
-void QuasiFile::associateStatement(Statement * stmt)
-  {
-  // associate this statement with this file number
-  stmt->setFileNumber(fileNumber_);
-  associatedStatements_->insert((char*)&stmt, 
-				sizeof(char *), 
-				(void *)stmt);
-  // Set the nowait enabled state in the Statement object
-  // stmt->setNoWaitOpEnableStatus(TRUE);
-  }
-
-NABoolean QuasiFile::disassociateStatement(Statement * stmt)
-  {
-  // disassociate this statement with this file number
-  stmt->resetFileNumber();
-  stmt->resetNoWaitOpEnabled();
-  associatedStatements_->position((char*)&stmt,
-                                  sizeof(char *));
-  associatedStatements_->getNext();
-  associatedStatements_->remove((void *)stmt);
-  // nothing to delete because the statement remains
-  return associatedStatements_->isEmpty();
-  }
-
-void QuasiFile::disableNoWaitOps(void)
-  {
-  // disable no-wait operations (updating cached flags in Statement 
-  // objects too)
-  associatedStatements_->position();
-  Statement * stmt = (Statement *)associatedStatements_->getNext();
-  while (stmt)
-    {
-    stmt->resetNoWaitOpEnabled();
-    stmt = (Statement *)associatedStatements_->getNext();
-    }
-  }
-
-
-void QuasiFile::deleteNoWaitOps(Statement * stmt)
-  {
-  // delete no-wait operations associated with the current statement
-  // (this is done when Statement level methods are about to do a cancel)
-
-  // $$$ at the moment, the code deletes all no-wait ops; it probably should
-  // only delete no-wait fetches. It works, though, because at the moment,
-  // the only no-wait ops *are* fetches.
-  
-  pendingNoWaitOps_->position();
-  NoWaitOp * nwo = (NoWaitOp *)pendingNoWaitOps_->getNext();
-  while (nwo)
-    {
-    if (stmt == nwo->getStatement())
-      {
-      // this no-wait op is on the current statement
-      pendingNoWaitOps_->remove((void *)nwo); // remove it
-      pendingNoWaitOps_->position();          // position to beginning
-      delete nwo;  // destroy it
-      quasiFileManager_->notifyOfDeletedNoWaitOp();
-      }
-    nwo = (NoWaitOp *)pendingNoWaitOps_->getNext();      
-    }
-
-  // indicate no no-wait ops pending now
-  stmt->resetNoWaitOpPending();
-  }
-
-void QuasiFile::closeNoWaitOpsPending()
-  {
-  // remove any pending nowait objects and set the flag in the statement 
-  // object to indicate that the QFO file was closed while a nowait
-  // operation was incomplete
-
-  pendingNoWaitOps_->position();
-  NoWaitOp * noWaitOp = (NoWaitOp *)pendingNoWaitOps_->getNext();
-  while (noWaitOp)
-    {
-    noWaitOp->getStatement()->setNoWaitOpIncomplete(); // mark the statement
-    noWaitOp->getStatement()->resetNoWaitOpPending(); // mark the statement
-    pendingNoWaitOps_->remove((void *)noWaitOp); // remove it
-    pendingNoWaitOps_->position();               // position to beginning
-    delete noWaitOp;  // destroy it
-    quasiFileManager_->notifyOfDeletedNoWaitOp();
-    noWaitOp = (NoWaitOp *)pendingNoWaitOps_->getNext();     
-    }
-  }
-
-RETCODE QuasiFile::awaitIox(IpcEnvironment * ipcEnv,
-			    Lng32 * tag,
-			    short * feError)
-  {
-
-  RETCODE rc = NOT_FINISHED;  // assume nothing finished
-  
-  pendingNoWaitOps_->position();
-  NoWaitOp * noWaitOp = (NoWaitOp *)pendingNoWaitOps_->getNext();
-  
-  if (noWaitOp == NULL)
-    {
-    // This can happen if awaitiox is called with this filenum (user error),
-    // or filenum -1 (might be normal usage)
-    *feError = FENONEOUT;
-    }
-  else
-    {
-
-    //Future: Will mark statement dispatchable if a message "is done"
-    ipcEnv->getAllConnections()->waitOnAll(0);
-
-    // clean up the completed MasterEspMessages
-    ipcEnv->deleteCompletedMessages();
-
-    while (noWaitOp)
-      {
-      Lng32 numPendingBeforeRedrive =
-	quasiFileManager_->getPendingNowaitOps();
-      rc = noWaitOp->awaitIox(tag);
-      if (rc == NOT_FINISHED)
-        {
-        noWaitOp = (NoWaitOp *)pendingNoWaitOps_->getNext();
-        }
-      else // it completed
-        {       
-        // remove NoWaitOp object from our list and destroy it and
-	// decr the pending count, if the redrive hasn't done it all
-	if (!pendingNoWaitOps_->remove((void *)noWaitOp))
-	  {
-	  assert(quasiFileManager_->getPendingNowaitOps() ==
-	         numPendingBeforeRedrive - 1);
-	  }
-	else
-	  {
-	  delete noWaitOp;
-	  quasiFileManager_->notifyOfDeletedNoWaitOp();
-	  }
-
-        noWaitOp = NULL;  // to exit loop without further processing
-        }
-      }
-    }      
-
-  return rc;
-  }
-
-
-RETCODE QuasiFile::queueNoWaitOp(ComDiagsArea &diagsArea,
-				 Statement * stmt,
-				 Descriptor * inputDesc,
-				 Descriptor * outputDesc,
-				 NoWaitOp::opType op,
-				 NABoolean operationStarted,
-				 Lng32 tag )
-  {
-  RETCODE rc = SUCCESS; // assume we are successful
-  
-    // Create a NoWaitOp object to represent the incompleted operation
-    // and queue it
-    
-  NoWaitOp * nwo = new(noWaitHeap_)
-    NoWaitOp(stmt,
-    inputDesc,
-    outputDesc,
-    tag, 
-    op, 
-    operationStarted);
-
-  pendingNoWaitOps_->insert((void *)nwo);
-  quasiFileManager_->notifyOfNewNoWaitOp();
-  return rc;
-  }
-
-// Code that does SEGMENT_REVEAL_ appears in three places:
-//   switchToPriv() in cli/CliLayerForNsk.cpp
-//   QfoRevealSegs() in QuasiFileManager.cpp
-//   stopCatcher() in cli/CliLayerForNsk.cpp
-short QfoRevealSegs(CliGlobals *&cliGlobals)
-  {
-  cliGlobals = GetCliGlobals();
-  cliGlobals->incrNumOfCliCalls();
-  return 0;
-  }
-
-//Code that does SEGMENT_HIDE_ appears in two places
-//  switchToNonPriv() in cli/CliLayerForNsk.cpp
-//  QfoHideSegs() in cli/CliLayerForNsk.cpp
-short QfoHideSegs(CliGlobals *cliGlobals)
-  {
-
-  cliGlobals->decrNumOfCliCalls();
-
-  return 0;
-  }
-
-SQL_QFO_FUNCTION_ATTRIBUTES short Sql_Qfo_IOComp(short quasi_file_number /*in*/,
-				     Lng32 *tag /*out*/,
-				     unsigned short *waitmask /*out*/,
-				     short userstop /*in*/)
-  {
-  short retVal, feError = FEOK;
-  RETCODE retcode;
-  QuasiFileManager *quasiFileManager;
-  *waitmask = LDONE;
-  CliGlobals *cliGlobals;
-  if (QfoRevealSegs(cliGlobals) != 0)
-    return FEBADPARMVALUE;
-  jmp_buf jmpBuf;
-  short oldStop;
-  oldStop = SETSTOP(1);
-  cliGlobals->setJmpBufPtr(&jmpBuf);
-  Int32 jmpRc = setjmp(jmpBuf);
-  if (jmpRc)
-    {
-    QfoHideSegs(cliGlobals);
-    SETSTOP(oldStop);
-    return FEBADPARMVALUE; // longjmp not associated with statement
-    }
-  quasiFileManager = cliGlobals->getQuasiFileManager();
-  if (quasiFileManager->getPendingNowaitOps() > 0)
-    retcode = quasiFileManager->awaitIox(quasi_file_number, tag, &feError);
-  else
-    {
-    QfoHideSegs(cliGlobals);
-    SETSTOP(oldStop);
-    return FENONEOUT;
-    }
-  if (feError != FEOK)
-    retVal = feError; // May be FEBADPARMVALUE, or FENONEOUT
-  else
-    {
-    if (1) // Not used but is compiled on NT
-      retVal = FEQFOEVENTCONSUMED;
-    else
-      switch (retcode)
-      {
-      case SUCCESS:
-	retVal = FEOK;
-	break;
-      case ERROR:
-	retVal = FESQLERR;
-	break;
-      case SQL_EOF:
-      case WARNING:
-	retVal = FESQLWARN;
-	break;
-      case NOT_FINISHED:
-	retVal = FEQFONOTCOMPLETE;
-	break;
-      default:
-	retVal = FEBADPARMVALUE;
-      }
-    }
-  QfoHideSegs(cliGlobals);
-  SETSTOP(oldStop);
-  return retVal;
-  }
-
-
-SQL_QFO_FUNCTION_ATTRIBUTES short Sql_Qfo_Close(short quasi_file_number)
-  {
-  CliGlobals *cliGlobals;
-  if (QfoRevealSegs(cliGlobals) != 0)
-    return 0;
-  cliGlobals->setLogEmsEvents(FALSE);
-  jmp_buf jmpBuf;
-  cliGlobals->setJmpBufPtr(&jmpBuf);
-  Int32 jmpRc = setjmp(jmpBuf);
-  if (jmpRc)
-    {
-    cliGlobals->setLogEmsEvents(TRUE);
-    QfoHideSegs(cliGlobals);
-    return 0;
-    }
-  QuasiFileManager *quasiFileManager = cliGlobals->getQuasiFileManager();
-  quasiFileManager->closeQuasiFile(quasi_file_number);
-  cliGlobals->setLogEmsEvents(TRUE);
-  QfoHideSegs(cliGlobals);
-  return 0;
-  }
-
diff --git a/core/sql/cli/QuasiFileManager.h b/core/sql/cli/QuasiFileManager.h
deleted file mode 100644
index b30f2ee..0000000
--- a/core/sql/cli/QuasiFileManager.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/**********************************************************************
-// @@@ START COPYRIGHT @@@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// @@@ END COPYRIGHT @@@
-**********************************************************************/
-#ifndef QUASIFILEMANAGER_H
-#define QUASIFILEMANAGER_H
-
-/* -*-C++-*-
-******************************************************************************
-*
-* File:         QuasiFile.h
-* Description:  This file contains definitions of the QuasiFileManager class
-*               and QuasiFile class. 
-*               
-* Created:      3/26/2002
-* Language:     C++
-*
-*
-*
-******************************************************************************
-*/
-#include "NoWaitOp.h"
-
-class Statement;
-class IpcEnvironment;
-class ComDiagsArea;
-class HashQueue;
-class NoWaitOp;
-class QuasiFile;
-class QuasiFileManager;
-
-// ------------------------------------------------------------------
-// Classes that keep state for no-wait SQL operations
-//
-// One QuasiFileManager object keeps track of all SQL pseudo-files
-// used by a process.
-//
-// One QuasiFile object exists for each SQL pseudo-file.
-//
-// One NoWaitOp object exists for each pending no-wait SQL operation.
-//
-// ------------------------------------------------------------------
-#ifdef EX_GOD_H    // compile the following only if ex_god.h also included
-#ifdef CLI_STDH_H  // compile the following only if CliDefs.h also included
-
-class QuasiFileManager : public NABasicObject 
-  {
-  public:
-
-    QuasiFileManager(NAHeap * noWaitHeap,IpcEnvironment * ipcEnv);
-    virtual ~QuasiFileManager(void);
-
-    RETCODE assocFileNumber(ComDiagsArea &diagsArea,short fileNumber,
-			    Statement * statement);
-    RETCODE disassocFileNumber(ComDiagsArea &diagsArea,
-			       Statement * statement,
-			       NABoolean force = FALSE);
-    RETCODE deleteNoWaitOps(ComDiagsArea &diagsArea,short fileNumber,
-			    Statement * statement);
-    RETCODE awaitIox(Lng32 fileNumber, Lng32 * tag, short * feError);
-
-    // returns QuasiFile object if one exists, 0 otherwise
-    QuasiFile * getQuasiFile(short fileNumber);
-
-    // get the QuasiFile if it exists, call QuasiFile::closeNoWaitOpsPending,
-    // removes the QuasiFile from quasiFileList_, and deletes the QuasiFile
-    void closeQuasiFile(short fileNumber);
-
-    // called whenever a new NoWaitOp object is created
-    void notifyOfNewNoWaitOp(void);
-
-    // called whenever a NowaitOp is destroyed
-    inline void notifyOfDeletedNoWaitOp(void)
-      { pendingNoWaitOperations_--; };
-    inline Lng32 getPendingNowaitOps() { return pendingNoWaitOperations_; }
-  
-  private:
-
-    Lng32 pendingNoWaitOperations_; // number of pending operations
-
-    // list of QuasiFile objects 
-    Queue * quasiFileList_; 
-
-    // Ipc environment
-    IpcEnvironment * ipcEnv_;
-
-    // heap used by no-wait SQL procedures
-    NAHeap * noWaitHeap_;
-
-  };
-
-// Where methods in the QuasiFile class do not raise errors, they assume
-// that the QuasiFileManager (their caller) has done all necessary
-// validation.
-
-class QuasiFile : public NABasicObject
-  {
-  public:
-
-    QuasiFile(NAHeap * noWaitHeap,short fileNumber,QuasiFileManager * fnm);
-    ~QuasiFile(void);
-
-    void associateStatement(Statement * stmt);
-    NABoolean disassociateStatement(Statement * stmt);
-    void disableNoWaitOps(void);
-    void deleteNoWaitOps(Statement * stmt);
-    void closeNoWaitOpsPending();
-
-    RETCODE awaitIox(IpcEnvironment * ipcEnv, Lng32 * tag, short * feError);
-
-    RETCODE queueNoWaitOp(ComDiagsArea &diagsArea,
-      Statement * stmt,
-      Descriptor * inputDesc,
-      Descriptor * outputDesc,
-      NoWaitOp::opType op,
-      NABoolean operationStarted,
-      Lng32 tag);
-
-    inline short getFileNumber() {return fileNumber_;};
-    inline NABoolean noWaitOpsPending(void)
-      { return !pendingNoWaitOps_->isEmpty(); } ;
-
-  private:
-
-    short fileNumber_;
-    NAHeap * noWaitHeap_;
-    QuasiFileManager * quasiFileManager_;
-
-    // a list of Statement objects associated with this QuasiFile
-    // in no particular order
-    HashQueue * associatedStatements_;
-    
-    // a list of NoWaitOp objects (representing pending no-wait
-    // operations), in order of initiation
-    Queue * pendingNoWaitOps_;
-
-  } ;
-
-#endif // CLI_STDH_H
-#endif // EX_GOD_H
-#endif /* QUASIFILEMANAGER_H */
diff --git a/core/sql/cli/Statement.cpp b/core/sql/cli/Statement.cpp
index 6d847f0..10be536 100644
--- a/core/sql/cli/Statement.cpp
+++ b/core/sql/cli/Statement.cpp
@@ -218,9 +218,6 @@
 {
   cliLevel_ = context_->getNumOfCliCalls();
 
-  space_.setJmpBuf(cliGlobals_->getJmpBuf());
-  heap_.setJmpBuf(cliGlobals_->getJmpBuf());
-
 #ifdef _DEBUG
   stmtDebug_ = FALSE;
   stmtListDebug_ = FALSE;
diff --git a/core/sql/common/BaseTypes.cpp b/core/sql/common/BaseTypes.cpp
index 2dfe75b..0b22550 100644
--- a/core/sql/common/BaseTypes.cpp
+++ b/core/sql/common/BaseTypes.cpp
@@ -47,9 +47,6 @@
 #endif
 
 #include <stdlib.h>		// exit(), in NAExit()
-//#include <setjmp.h>
-
-
 
 #include "seabed/fs.h"
 #include "seabed/ms.h"
diff --git a/core/sql/common/CmpCommon.h b/core/sql/common/CmpCommon.h
index 74393ad..1caaafa 100644
--- a/core/sql/common/CmpCommon.h
+++ b/core/sql/common/CmpCommon.h
@@ -141,7 +141,7 @@
   #define CURRCONTEXT_OPTDEBUG (CmpCommon::context()->getOptDbg())
   #define CURRCONTEXT_HISTCACHE (CmpCommon::context()->getHistogramCache())
   #define CURRCONTEXT_OPTSIMULATOR (CmpCommon::context()->getOptimizerSimulator())
-  #define GLOBAL_EMPTY_INPUT_LOGPROP (CmpCommon::context()->getGEILP())
+  #define GLOBAL_EMPTY_INPUT_LOGPROP (CmpCommon::statement()->getGEILP())
   #define CURRSTMT_OPTDEFAULTS (CmpCommon::context()->getOptDefaults())
 
   // For some routines that do care about the current CmpContext*. 
diff --git a/core/sql/common/NAAssert.h b/core/sql/common/NAAssert.h
index 4c37de7..7121060 100644
--- a/core/sql/common/NAAssert.h
+++ b/core/sql/common/NAAssert.h
@@ -97,18 +97,11 @@
   inline jmp_buf *getJmpBuf()             { return &longJmpTgt_; }
   inline jmp_buf *getJmpBufPtr()         { return longJmpTgtPtr_; }
   inline void setJmpBufPtr(jmp_buf *longJmpTgtPtr) { longJmpTgtPtr_ = longJmpTgtPtr; }
-  inline NABoolean getLogEmsEvents() const { return logEmsEvents_; }
-  inline void setLogEmsEvents(NABoolean logEmsEvents) { logEmsEvents_ = logEmsEvents; }
-  inline void setQfoProcessing() { qfoProcessing_ = TRUE; }
-  inline void clearQfoProcessing() { qfoProcessing_ = FALSE; }
-  inline NABoolean isQfoProcessing() { return qfoProcessing_; }
 protected:
   NABoolean globalsAreInitialized_;
   jmp_buf  *longJmpTgtPtr_;
   jmp_buf  longJmpTgt_;
   long     numCliCalls_;
-  NABoolean logEmsEvents_;
-  NABoolean qfoProcessing_;
 };
 
 NAAssertGlobals * GetNAAssertGlobals(NABoolean *logEmsEvents = NULL);
diff --git a/core/sql/common/NAMemory.cpp b/core/sql/common/NAMemory.cpp
index 8b50a2e..0c13caa 100644
--- a/core/sql/common/NAMemory.cpp
+++ b/core/sql/common/NAMemory.cpp
@@ -852,7 +852,7 @@
 #ifndef MUSE
 NAMemory::NAMemory(const char * name)
      : 
-    type_(NO_MEMORY_TYPE),
+    type_(EXECUTOR_MEMORY),
     maximumSize_((size_t)-1),            // no maximum
     parent_(NULL),
     firstBlk_(NULL),
@@ -864,11 +864,9 @@
     totalSize_(0),
     blockCnt_(0),
     thBlockCnt_(DEFAULT_THRESHOLD_BLOCK_COUNT),
-    segGlobals_(0),
     memoryList_(NULL),
     lastListEntry_(NULL),
     nextEntry_(NULL),
-    heapJumpBuf_(0),
     exhaustedMem_(FALSE),
     errorsMask_(0),
     crowdedTotalSize_(0ll)
@@ -880,6 +878,7 @@
     , maxVmSize_(0l)
     , sharedMemory_(FALSE)
 {
+  setType(type_, 0);
 #if ( defined(_DEBUG) || defined(NSK_MEMDEBUG) )  
   char * debugLevel = getenv("MEMDEBUG");
   if (debugLevel)
@@ -916,11 +915,9 @@
    totalSize_(0),
    blockCnt_(0),
    thBlockCnt_(DEFAULT_THRESHOLD_BLOCK_COUNT),
-   segGlobals_(0),
    memoryList_(NULL),
    lastListEntry_(NULL),
    nextEntry_(NULL),
-   heapJumpBuf_(0),
    exhaustedMem_(FALSE),
    errorsMask_(0),
     crowdedTotalSize_(0ll)
@@ -974,11 +971,9 @@
     totalSize_(0),
     blockCnt_(0),
     thBlockCnt_(DEFAULT_THRESHOLD_BLOCK_COUNT),
-    segGlobals_(0),
     memoryList_(NULL),
     lastListEntry_(NULL),
     nextEntry_(NULL),
-    heapJumpBuf_(0),
     exhaustedMem_(FALSE),
     errorsMask_(0),
     crowdedTotalSize_(0ll)
@@ -1011,13 +1006,10 @@
 }
 
 NAMemory::NAMemory(const char * name,
-		   SEG_ID  extFirstSegId,
-		   void  * extFirstSegStart,
-		   off_t    extFirstSegOffset,
-		   size_t   extFirstSegLen,
-		   size_t   extFirstSegMaxLen,
-		   NASegGlobals *segGlobals,
-		   Lng32    extMaxSecSegCount)
+           SEG_ID  segmentId,
+           void  * baseAddr,
+           off_t   heapStartOffset,
+           size_t  maxSize)
      : 
     type_(EXECUTOR_MEMORY),
     parent_(NULL),
@@ -1030,11 +1022,9 @@
     totalSize_(0),
     blockCnt_(0),
     thBlockCnt_(DEFAULT_THRESHOLD_BLOCK_COUNT),
-    segGlobals_(segGlobals),
     memoryList_(NULL),
     lastListEntry_(NULL),
     nextEntry_(NULL),
-    heapJumpBuf_(0),
     exhaustedMem_(FALSE),
     errorsMask_(0),
     crowdedTotalSize_(0ll)
@@ -1046,13 +1036,6 @@
     , maxVmSize_(0l)
     , sharedMemory_(FALSE)
 {
-  segGlobals_->setFirstSegInfo(extFirstSegId,
-                               extFirstSegStart,
-                               extFirstSegOffset,
-                               extFirstSegLen,
-                               extFirstSegMaxLen);
-  segGlobals_->setMaxSecSegCount(extMaxSecSegCount);
-
   // call setType to initialize the values of all the sizes
   setType(type_, 0);
 
@@ -1072,18 +1055,19 @@
   // space in the segment, then initialize the firstBlk_ within
   // the passed in memory.  The NAHeap constructor will initialize
   // the top NAHeapFragment.
-  if (extFirstSegStart != NULL) {
+  if (baseAddr != NULL) {
     blockCnt_ = 1;
-    size_t tsize = extFirstSegLen - extFirstSegOffset - BLOCK_OVERHEAD;
+    size_t tsize = maxSize - heapStartOffset - BLOCK_OVERHEAD;
     if (tsize > (8 * sizeof(size_t))) {
-      firstBlk_ = (NABlock*)((char*)extFirstSegStart + extFirstSegOffset);
-      firstBlk_->size_ = extFirstSegLen - extFirstSegOffset;
+      firstBlk_ = (NABlock*)((char*)baseAddr + heapStartOffset);
+      firstBlk_->size_ = maxSize - heapStartOffset;
       firstBlk_->sflags_ = NABlock::EXTERN_BIT;
       firstBlk_->next_ = NULL;
-      firstBlk_->segmentId_ = extFirstSegId;
+      firstBlk_->segmentId_ = segmentId;
+      totalSize_ = initialSize_ = maximumSize_ = firstBlk_->size_;
     }
   }
-
+  upperLimit_ = maxSize;
   // need to initialize an NAStringRef object "on top" of the array
   // (don't touch this unless you know what you're doing!)
   NAStringRef * tmp = 
@@ -1091,6 +1075,7 @@
     NAStringRef (NAStringRef::NULL_CTOR, this) ;
 }
 
+
 void NAMemory::reInitialize()
 {
   // delete all blocks allocated for this heap and re-set the heap
@@ -1135,9 +1120,7 @@
         // This code provides mutual exclusion for the runtime stats shared
         // memory segment.
         short semRetcode = 0;
-        if (parent_->getType() == EXECUTOR_MEMORY &&
-            parent_->getSegGlobals() != NULL &&
-            parent_->getSegGlobals()->getFirstSegId() == getStatsSegmentId()) {
+        if (parent_->getType() == EXECUTOR_MEMORY && getSharedMemory()) {
           semRetcode = getRTSSemaphore();
         }
         while (p) {
@@ -1178,7 +1161,7 @@
     firstBlk_ = externSegment;
     firstBlk_->next_ = NULL;
     blockCnt_ = 1;
-    totalSize_ = firstBlk_->size_ - segGlobals_->getFirstSegOffset();
+    totalSize_ = firstBlk_->size_ ;
   }
 
   // If this is an NAHeap, then call reInitializeHeap() to reinitialize
@@ -1211,24 +1194,9 @@
 
   switch(type_) {
   case EXECUTOR_MEMORY:
-    // input parameter blockSize is ignored
-    // this is an NAMemory using flat segments on NSK
-    if (segGlobals_ && segGlobals_->getFirstSegStart())
-    {
-      // take the segment size and max. size from the externally
-      // provided // segment
-      totalSize_ = initialSize_ = segGlobals_->getFirstSegLen() 
-         - segGlobals_->getFirstSegOffset();
-      maximumSize_ = segGlobals_->getFirstSegMaxLen() 
-         - segGlobals_->getFirstSegOffset();
-    }
-    else
-    {
-      initialSize_   = DEFAULT_NT_HEAP_INIT_SIZE ; 
-      maximumSize_   = DEFAULT_NT_HEAP_MAX_SIZE ;           // no maximum
-      incrementSize_ = DEFAULT_NT_HEAP_INCR_SIZE ;
-    }
-
+    initialSize_   = DEFAULT_NT_HEAP_INIT_SIZE ; 
+    maximumSize_   = DEFAULT_NT_HEAP_MAX_SIZE ;           // no maximum
+    incrementSize_ = DEFAULT_NT_HEAP_INCR_SIZE ;
     break;
 	
   case SYSTEM_MEMORY:
@@ -1976,9 +1944,7 @@
     HEAPLOG_OFF() // no recursive logging.
     // This code provides mutual exclusion for the runtime stats shared
     // memory segment.
-    if (parent_->getType() == EXECUTOR_MEMORY &&
-        parent_->getSegGlobals() != NULL &&
-        parent_->getSegGlobals()->getFirstSegId() == getStatsSegmentId()) {
+    if (parent_->getType() == EXECUTOR_MEMORY && getSharedMemory()) {
        short retcode = getRTSSemaphore();
        parent_->deallocateHeapMemory((void*)curr);
        if (retcode == 1)
@@ -2222,7 +2188,7 @@
 
     // This could be either Global Executor Memory or Stats Globals
     // Don't add a block if Stats Globals!
-    if (getSegGlobals()->getFirstSegId() == getStatsSegmentId())
+    if (getSharedMemory())
       return NULL;
 
     // Try to allocate the NABlock using mmap(). If it succeeds return the
@@ -2280,9 +2246,7 @@
     // semaphore is obtained in allocateHeapMemory or deallocateHeapMemory
     // for both global and process stats heap. But leaving it now
     // since it won't hurt other than extra cpu cycles
-    if (getSharedMemory() || (parent_->getType() == EXECUTOR_MEMORY &&
-        parent_->getSegGlobals() != NULL &&
-        parent_->getSegGlobals()->getFirstSegId() == getStatsSegmentId())) {
+    if (getSharedMemory()) {
       short retcode = getRTSSemaphore();
       p = (NABlock*)parent_->allocateHeapMemory(blockSize, FALSE);
 
@@ -2413,43 +2377,9 @@
 NAMemory::handleExhaustedMemory()
 {
   exhaustedMem_ = TRUE;
-  if (heapJumpBuf_)
-    {
-      ARKCMP_EXCEPTION_EPILOGUE("NAMemory");
-      longjmp(*heapJumpBuf_, MEMALLOC_FAILURE);
-    }
 }
 #endif // MUSE
 
-void
-NAMemory::logAllocateError(short error, SEG_ID segmentId, Lng32 blockSize, short errorDetail)
-{
-  char msg[128], msgErrorDetail[32];
-  if (error != 0 && error != 15)
-  {
-    unsigned short errorMask = 1 << error - 1;
-    if (!(errorsMask_ & errorMask))
-    {
-      errorsMask_ |= errorMask;
-      str_sprintf(msg, "SEGMENT_ALLOCATE_ for segment-id %u, segment-size %u returned error %u",
-                  segmentId, blockSize, error);
-      if (error == 1 || error == 2 || error == 3 || error == 14)
-      {
-        str_sprintf(msgErrorDetail, ", error-detail %u", errorDetail);
-        str_cat_c(msg, msgErrorDetail);
-      }
-      SQLMXLoggingArea::logExecRtInfo(__FILE__, __LINE__, msg, 0);
-    }
-  }
-}
-
-void NAMemory::setJmpBuf( jmp_buf *newJmpBuf )
-{ 
-  if (derivedClass_ == NAHEAP_CLASS)
-    assert(((NAHeap*)this)->getThreadSafe() == false);
-  heapJumpBuf_ = newJmpBuf;
-}
-
 
 NABoolean NAMemory::getUsage(size_t * lastBlockSize, size_t * freeSize, size_t * totalSize)
 {
@@ -2493,146 +2423,6 @@
 }
 
 // ---------------------------------------------------------------------------
-// NASegGlobals methods
-// ---------------------------------------------------------------------------
-void NASegGlobals::setFirstSegInfo(SEG_ID firstSegId,
-                                   void * firstSegStart,
-                                   off_t  firstSegOffset,
-                                   size_t firstSegLen,
-                                   size_t firstSegMaxLen)
-{
-  firstSegId_ = firstSegId;
-  firstSegStart_ = firstSegStart;
-  firstSegOffset_ = firstSegOffset;
-  firstSegLen_ = firstSegLen;
-  firstSegMaxLen_ = firstSegMaxLen;
-  addedSegCount_ = 0;
-  lowWaterMark_ = firstSegStart;
-  highWaterMark_ = (void *) ((char *)firstSegStart + firstSegLen);
-  for (Int32 i = 0; i < NA_MAX_SECONDARY_SEGS; i++)
-  {
-    addedSegId_[i] = 0;
-    startAddresses_[i] = 0;
-    lengths_[i] = 0;
-  }
-}
-
-Int32 NASegGlobals::addSegId(short segId, void *start, size_t len)
-{
-  if (addedSegCount_ == NA_MAX_SECONDARY_SEGS)
-    {
-      return 0;
-    }
-  else
-    {
-      Int32 segOffset = (Int32)(segId - firstSegId_ - 1);
-      addedSegId_[segOffset]     = segId;
-      startAddresses_[segOffset] = start;
-      lengths_[segOffset]        = len;
-      addedSegCount_++;
-    }
-  
-  void *end = (void *) ((char *)start + len);
-  if (lowWaterMark_ > start)
-    lowWaterMark_ = start;
-  if (highWaterMark_ < end)
-    highWaterMark_ = end;
-  return 1;
-}
-
-void NASegGlobals::deleteSegId(short segId)
-{
-  Lng32 addedSegCount, i;
-  void *start, *end;
-  Int32 segOffset = (Int32)(segId - firstSegId_ -1);
-  assert (--addedSegCount_ >= 0);
-  addedSegId_[segOffset] = 0;
-  startAddresses_[segOffset] = 0;
-  lengths_[segOffset] = 0;
-  lowWaterMark_ = firstSegStart_;
-  highWaterMark_ = (void *) ((char *)firstSegStart_ + firstSegLen_);
-  for (addedSegCount = 0, i = 0; addedSegCount < addedSegCount_; i++) {
-    if (addedSegId_[i] != 0)
-    {
-      addedSegCount += 1;
-      start = startAddresses_[i];
-      end = (void *) ((char *)start + lengths_[i]);
-      if (lowWaterMark_ > start)
-        lowWaterMark_ = start;
-      if (highWaterMark_ < end)
-        highWaterMark_ = end;
-    }
-  }
-}
-
-void NASegGlobals::resizeSeg(short segId, void *start, size_t newLen)
-{
-  Lng32 addedSegCount, i;
-  void *end = (void *) ((char *)start + newLen);
-  if (lowWaterMark_ > start)
-    lowWaterMark_ = start;
-  if (highWaterMark_ < end)
-    highWaterMark_ = end;
-
-  if (segId == firstSegId_)
-    {
-      firstSegLen_ = newLen;
-    }
-  else
-    {
-      for (addedSegCount = 0, i = 0; addedSegCount < addedSegCount_; i++)
-      {
-        if (addedSegId_[i] != 0)
-        {
-          addedSegCount += 1;
-          if (segId == addedSegId_[i])
-          {
-            lengths_[i] = newLen;
-            return;
-          }
-        }
-      }
-    }
-}
-
-NABoolean NASegGlobals::overlaps(void *start, size_t len) const
-{
-  // check the easy things first, performance of this method is critical
-  // because it is used in boundscheck routines that are frequently called
-  Lng32 addedSegCount, i;
-  void *end = (void *) ((char *)start + len);
-
-  // sanity check, does the provided memory range wrap around the end
-  // of the 32 bit addressing range (refuse to deal with such ranges
-  // and just return an overlap)
-  if (start > end)
-    return TRUE;
-
-  // quick check, using low and high water marks
-  if (end <= lowWaterMark_ || start >= highWaterMark_)
-    return FALSE;
-
-  // quick check won't work, loop over each executor segment separately
-  // in case the memory range lies between two executor segments.
-  for (addedSegCount = 0, i = 0; addedSegCount < addedSegCount_; i++)
-    {
-      if (addedSegId_[i] != 0)
-      {
-        addedSegCount += 1;
-        void *startSeg = startAddresses_[i];
-        void *endSeg = (void *) ((char *)startSeg + lengths_[i]);
-
-        // if the start address of the segment or its last byte lie
-        // in the range then there is an overlap
-        if (start <= startSeg && end > startSeg ||
-            start < endSeg && end >= endSeg)
-          return TRUE;
-      }
-    }
-  return FALSE;
-}
-
-// ---------------------------------------------------------------------------
 // NAHeap methods
 // ---------------------------------------------------------------------------
 #ifndef MUSE
@@ -2661,7 +2451,7 @@
 NAHeap::NAHeap(const char * name, 
 	       NAHeap * parent, 
 	       Lng32 blockSize, 
-	       Lng32 upperLimit)
+	       size_t upperLimit)
   : NAMemory(name, parent, blockSize, upperLimit),
     smallmap_(0),
     treemap_(0),
@@ -2691,7 +2481,7 @@
 NAHeap::NAHeap(const char * name, 
 	       NAMemoryType type, 
 	       Lng32 blockSize, 
-	       Lng32 upperLimit) 
+	       size_t upperLimit) 
   : NAMemory(name, type, blockSize, upperLimit),
     smallmap_(0),
     treemap_(0),
@@ -2713,17 +2503,51 @@
 #endif // _DEBUG
 }
 
-NAHeap::NAHeap(const char  * name,
-               SEG_ID  extFirstSegId,
-               void  * extFirstSegStart,
-               Lng32    extFirstSegOffset,
-               Lng32    extFirstSegLen,
-               Lng32    extFirstSegMaxLen,
-               NASegGlobals *segGlobals,
-               Lng32    extMaxSecSegCount)
-  : NAMemory(name, extFirstSegId, extFirstSegStart, extFirstSegOffset,
-             extFirstSegLen, extFirstSegMaxLen, segGlobals,
-             extMaxSecSegCount),
+NAHeap::NAHeap(const char  * name)
+  : NAMemory(name),
+    smallmap_(0),
+    treemap_(0),
+    dvsize_(0),
+    topsize_(0),
+    least_addr_(0),
+    dv_(NULL),
+    top_(NULL),
+    errCallback_(NULL)
+{
+  initBins();
+  derivedClass_ = NAHEAP_CLASS;
+
+  if (firstBlk_) {
+    initTop(firstBlk_);
+    least_addr_ = (char*)firstBlk_;
+  }
+
+  if (deallocTraceArray == 0)
+  {
+    char *deallocTraceEnvvar = getenv("EXE_DEALLOC_MEM_TRACE");
+    if (deallocTraceEnvvar != NULL)
+    {
+      deallocTraceArray =
+        (DeallocTraceEntry (*) [deallocTraceEntries])malloc(sizeof(DeallocTraceEntry) * deallocTraceEntries);
+      memset((void *)deallocTraceArray, '\0', sizeof(DeallocTraceEntry) * deallocTraceEntries);
+    }
+  }
+
+  threadSafe_ = false;
+  memset(&mutex_, '\0', sizeof(mutex_));
+
+#ifdef _DEBUG
+  setAllocTrace();
+#endif // _DEBUG
+}
+
+// Constructor that imposes the NAHeap struture on already allocated memory 
+NAHeap::NAHeap(const char * name,
+       SEG_ID  segmentId,
+       void  * baseAddr,
+       off_t   heapStartOffset,
+       size_t  maxSize)
+  : NAMemory(name, segmentId, baseAddr, heapStartOffset, maxSize),
     smallmap_(0),
     treemap_(0),
     dvsize_(0),
@@ -2769,7 +2593,6 @@
 
 void NAHeap::setThreadSafe()
 {
-  assert(((NAMemory*)this)->getJmpBuf() == NULL);
   int rc;
   pthread_mutexattr_t attr;
   rc = pthread_mutexattr_init(&attr);
@@ -2845,8 +2668,6 @@
   // That code frees the NABlocks and will reinitialize the firstBlk_
   // if it was allocated externally.
   if (firstBlk_ != NULL) {
-     assert((char*)firstBlk_ == (char*)segGlobals_->getFirstSegStart()
-                                + segGlobals_->getFirstSegOffset());
      least_addr_ = (char*)firstBlk_;
      initTop(firstBlk_);
   }
@@ -2941,9 +2762,7 @@
   // getSharedMemory() check alone is enough since it will return for both
   // global and process stats heap. Leaving the rest of the condition here
   //
-  if (getSharedMemory() || (parent_ && parent_->getType() == EXECUTOR_MEMORY &&
-             parent_->getSegGlobals() != NULL &&
-             parent_->getSegGlobals()->getFirstSegId() == getStatsSegmentId()))
+  if (getSharedMemory())
   {
     // Check if you are within semaphore
     if (! checkIfRTSSemaphoreLocked())
@@ -2988,10 +2807,7 @@
   // then allocate a NABlock using mmap(). This prevents any changes
   // to the "top_" fragment and allows the memory used by the request to
   // be returned to the operating system when the user frees it.
-  if (additionalUserSize >= MIN_MMAP_ALLOC_SIZE && parent_ == NULL
-     && (getSegGlobals() == NULL ||
-         getSegGlobals()->getFirstSegId() != getStatsSegmentId())
-     )
+  if (additionalUserSize >= MIN_MMAP_ALLOC_SIZE && parent_ == NULL && (! getSharedMemory()))
   {
 
     nb = PAD_REQUEST(additionalUserSize);
@@ -3163,11 +2979,7 @@
         (*errCallback_)(this, userSize);
 
       if (failureIsFatal) {
-        // Might never return...
         handleExhaustedMemory();
-        // If we return from this call it means that the caller wanted
-        // a memory allocation failure to be fatal yet did not set the
-        // the jump buffer.  This is not good.
         abort();
       }
 
@@ -3274,9 +3086,7 @@
 
   NAMutex mutex(threadSafe_, &mutex_);
  
-  if (getSharedMemory() || (parent_ && parent_->getType() == EXECUTOR_MEMORY &&
-             parent_->getSegGlobals() != NULL &&
-             parent_->getSegGlobals()->getFirstSegId() == getStatsSegmentId()))
+  if (getSharedMemory())
   {
     // Check if you are within semaphore
     if (! checkIfRTSSemaphoreLocked())
@@ -4103,7 +3913,6 @@
   if (rc) return rc;
   if (failureIsFatal) 
     {
-      // Might never return...
       handleExhaustedMemory();
       abort();
     }
diff --git a/core/sql/common/NAMemory.h b/core/sql/common/NAMemory.h
index a58461f..009ec1a 100644
--- a/core/sql/common/NAMemory.h
+++ b/core/sql/common/NAMemory.h
@@ -52,7 +52,6 @@
 #include "Platform.h"
 #include "NAStringDefGlobals.h"
 #include <stddef.h>
-#include <setjmp.h>
 
 #include "NAError.h"
 #include "HeapID.h"
@@ -74,7 +73,6 @@
 
 // contents of this file:
 class NAMemory;
-class NASegGlobals;
 class NABlock;
 class NAHeap;
 class NAHeapFragment;
@@ -108,84 +106,6 @@
                          // 17 - all objects > 2 MB
 };
 
-////////////////////////////////////////////////////////////////////////////
-// One NASegGlobals object exists in the executor as a member variable
-// of CliGlobals. Information about the first executor flat segment, as well
-// as the address of the NASegGlobals object in which the information is
-// stored, is passed as arguments to the setFirstSegInfo function, and used
-// by NAMEMORY::allocateBlock. addSegId is called by NAMemory::allocateBlock
-// to maintain an array of secondary (allocated after the first) flat segment
-// ids. getSegId is called on MIPS by switchToPriv and switchToNonPriv to
-// obtain the flat segment ids to hide and reveal.  On Yosemite, the
-// segments are not hidden and revealed.
-////////////////////////////////////////////////////////////////////////////
-class NASegGlobals {
-public:
-  inline short getSegId(Lng32 &index) const
-  {
-    Lng32 i, addedSegCount;
-    for (i = 0, addedSegCount = 0; addedSegCount < addedSegCount_; i++)
-    {
-      if (addedSegId_[i] != 0)
-      {
-        addedSegCount += 1;
-        if (i >= index)
-        {
-          index = i;
-          return addedSegId_[i];
-        }
-      }
-    }
-    return 0;
-  }
-
-  short getSegInfo(Lng32 index, void **startAddr) const
-    {
-      *startAddr = startAddresses_[index];
-      return addedSegId_[index];
-    }
-  void   setFirstSegInfo(
-			 SEG_ID firstSegId,
-                         void *firstSegStart,
-                         off_t  firstSegOffset,
-                         size_t  firstSegLen,
-                         size_t  firstSegMaxLen);
-  void   setMaxSecSegCount(Lng32 maxSecSegCount)
-                                { maxSecSegCount_ = maxSecSegCount; }
-  NABoolean reachedMaxSegCnt() const
-                                { return addedSegCount_ >= maxSecSegCount_; }
-  Lng32   addSegId(short segId, void *start, size_t len);
-  void   deleteSegId(short segId);
-  SEG_ID  getFirstSegId() const			 { return firstSegId_; }
-  void * getFirstSegStart() const                { return firstSegStart_; }
-  off_t   getFirstSegOffset() const              { return firstSegOffset_; }
-  size_t   getFirstSegLen() const                { return firstSegLen_; }
-  size_t   getFirstSegMaxLen() const             { return firstSegMaxLen_; }
-  void   resizeSeg(short segId, void *start, size_t newLen);
-
-  // check whether a specified range of memory overlaps any of the segments
-  NABoolean overlaps(void *start, size_t len) const;
-
-  enum { NA_MAX_SECONDARY_SEGS=28 };   // Seg IDs 2111 - 2138
-private:
-  SEG_ID firstSegId_;
-  void *firstSegStart_;    // starting addr of segment
-  off_t  firstSegOffset_;  // offset of free space in the segment
-  size_t  firstSegLen_;    // length of external segment
-  size_t  firstSegMaxLen_; // max. len the segment can be resized to
-  Lng32	addedSegCount_;    // number of additional segments
-  Lng32  maxSecSegCount_;  // Maximum number of secondary segments
-  short addedSegId_     [NA_MAX_SECONDARY_SEGS]; // array of secondary seg ids
-  void *startAddresses_ [NA_MAX_SECONDARY_SEGS]; // start addresses of segs
-  size_t lengths_[NA_MAX_SECONDARY_SEGS]; // lengths of segments
-
-  // total range of memory spanned by the segments (may have other
-  // things or holes between those water marks)
-  void *lowWaterMark_;
-  void *highWaterMark_;
-};
-
-
 
 ////////////////////////////////////////////////////////////////////////////
 // A NABlock is the basic allocation unit, i.e., we always request Blocks
@@ -264,15 +184,6 @@
   // time. Before a memory can be used, the type_ has to be set via
   // setType() or setParent()
   //
-  // The number of NAMemory objects of NAMemoryType SYSTEM_MEMORY or IPC_MEMORY
-  // is currently restricted to one because the assignment of flat segment ids
-  // (NSK) for SYSTEM/IPC_MEMORY is not managed globally. If one such object
-  // already exists, an assertion failure will occur following an attempt to
-  // to allocate a segment with an id that was previously used. This is not
-  // a problem because only the compiler's main statement heap resides in
-  // SYSTEM_MEMORY, and only ESPs use IPC_Memory. If needed in the future, 
-  // multiple SYSTEM/IPC _MEMORY heaps could be supported by keeping track of
-  // SYSTEM_MEMORY/IPC segment ids in the NASegGlobals object.
   enum NAMemoryType {
     NO_MEMORY_TYPE = 0,
     EXECUTOR_MEMORY = 2,
@@ -305,22 +216,18 @@
   NAMemory(const char * name, NAMemoryType type, size_t blockSize,
            size_t upperLimit);
 
-  // an NAMemory of type EXECUTOR_MEMORY that uses a first flat segment
-  // that is allocated by the caller (on NSK, uses malloc on NT and ignores
-  // the parameters after the first one)
-  NAMemory(const char * name,
-           SEG_ID  extFirstSegId,
-           void  * extFirstSegStart,
-           off_t   extFirstSegOffset,
-           size_t  extFirstSegLen,
-           size_t  extFirstSegMaxLen,
-           NASegGlobals  * segGlobals,
-           Lng32   extMaxSecSegCount = NASegGlobals::NA_MAX_SECONDARY_SEGS);
-
   // DERIVED_MEMORY
   NAMemory(const char * name, NAHeap * parent, size_t blockSize,
            size_t upperLimit);
 
+  // an NAMemory of type EXECUTOR_MEMORY that imposes the NAMemory struture 
+  // on already allocated memory
+  NAMemory(const char * name,
+           SEG_ID segmentId,
+           void  * baseAddr,
+           off_t   heapStartOffset,
+           size_t  maxSize);
+
   ~NAMemory();
 
   void reInitialize();
@@ -339,19 +246,10 @@
   // It is used to deallocate the above arrays.
   void deallocateMemory(void * addr);
 
-  // this method is used to set the upper limit - currently only used for testing
-  // setjmp and longjmp
   void setUpperLimit ( size_t newUpperLimit ) { upperLimit_ = newUpperLimit; };
   
-  // these four methods used to reside in class CollHeap 
-  void setJmpBuf( jmp_buf *newJmpBuf );
-
-  inline jmp_buf * getJmpBuf()                { return heapJumpBuf_; }
-
   inline NABoolean getWasMemoryExhausted()    { return exhaustedMem_; }
 
-  void logAllocateError(short error, SEG_ID segmentId, Lng32 blockSize, short errorDetail);
-
   void handleExhaustedMemory();
 
 #if (defined(_DEBUG) || defined(NSK_MEMDEBUG))
@@ -384,7 +282,6 @@
 
   inline void resetIntervalWaterMark() { intervalWaterMark_ = allocSize_;};
 
-  inline NASegGlobals * getSegGlobals() { return segGlobals_; }
   char *getName() {  return name_; }
   NAMemoryType getType() {  return type_; }
 
@@ -461,8 +358,6 @@
                               // that was allocated before this memory
                               // was created (allows to put the memory
                               // itself and other info into the segment)
-  NASegGlobals *segGlobals_;  // Executor flat segment globals object
-
   NAMemory *memoryList_;      // list of memory directly derived from this
   NAMemory *lastListEntry_;   // last entry of this list
   NAMemory *nextEntry_;       // pointer if this memory is on a memoryList_
@@ -473,7 +368,6 @@
 
   // these data members used to be in class CollHeap
 protected:
-  jmp_buf *heapJumpBuf_;      // Setjmp() buffer for handing memory failures
   NABoolean exhaustedMem_;    // Set to true if cannot satisfy memory request
   unsigned short errorsMask_; // SEGMENT_ALLOCATE_ errors that have occurred
   HeapID heapID_;             // For tracking leaks.  (eric)
@@ -542,17 +436,19 @@
   NAHeap(const char * name, 
 	 NAHeap * parent, 
 	 Lng32 blockSize = 0, 
-	 Lng32 upperLimit =0);
-  NAHeap(const char * name, NAMemoryType type = DERIVED_FROM_SYS_HEAP, 
-         Lng32 blockSize = 0, Lng32 upperLimit = 0);
-  NAHeap(const char  * name,
-         SEG_ID   extFirstSegId,
-	 void  * extFirstSegStart,
-	 Lng32    extFirstSegOffset,
-	 Lng32    extFirstSegLen,
-	 Lng32    extFirstSegMaxLen,
-	 NASegGlobals *segGlobals,
-         Lng32    extMaxSecSegCount = NASegGlobals::NA_MAX_SECONDARY_SEGS);
+	 size_t upperLimit =0);
+  NAHeap(const char * name, NAMemoryType type,
+         Lng32 blockSize = 0, size_t upperLimit = 0);
+
+  // Constructor that imposes the NAHeap struture on already allocated memory 
+  NAHeap(const char * name,
+           SEG_ID  segmentId,
+           void  * baseAddr,
+           off_t   heapStartOffset,
+           size_t  maxSize);
+
+  NAHeap(const char  * name);
+
   ~NAHeap();
   void destroy();
   void reInitializeHeap();
diff --git a/core/sql/executor/ExExeUtil.h b/core/sql/executor/ExExeUtil.h
index 2965d21..d26f660 100644
--- a/core/sql/executor/ExExeUtil.h
+++ b/core/sql/executor/ExExeUtil.h
@@ -2935,7 +2935,7 @@
   {
     return (ExExeUtilLobExtractTdb &) tdb;
   };
-  LOBglobals *getLobGlobals() { return lobGlobals_;}
+  ExLobGlobals *&getLobGlobals() { return exLobGlobals_;}
  protected:
   enum Step
   {
@@ -2985,7 +2985,7 @@
   ExLobStats lobStats_;
   char statusString_[200];
   fstream indata_;
-  LOBglobals *lobGlobals_;
+  ExLobGlobals *exLobGlobals_;
 };
 
 
@@ -3041,7 +3041,7 @@
   {
     return (ExExeUtilLobUpdateTdb &) tdb;
   };
-  LOBglobals *getLobGlobals() { return lobGlobals_;}
+  ExLobGlobals *&getLobGlobals() { return exLobGlobals_;}
  protected:
   enum Step
     {
@@ -3071,7 +3071,7 @@
   ExLobStats lobStats_;
   char statusString_[200];
   fstream indata_;
-  LOBglobals *lobGlobals_;
+  ExLobGlobals *exLobGlobals_;
 };
 // -----------------------------------------------------------------------
 // ExExeUtilFileUpdateTcb
diff --git a/core/sql/executor/ExExeUtilExplain.cpp b/core/sql/executor/ExExeUtilExplain.cpp
index 43f284b..cb7241f 100644
--- a/core/sql/executor/ExExeUtilExplain.cpp
+++ b/core/sql/executor/ExExeUtilExplain.cpp
@@ -3923,10 +3923,8 @@
 		// description field.
 		// All other errors are reported.
 		cliInterface()->retrieveSQLDiagnostics(getDiagsArea());
-		if (((getDiagsArea()->contains(-1292)) ||
-		     (getDiagsArea()->contains(-1293))) ||
-		    (exeUtilTdb().loadIfExists() &&
-		     getDiagsArea()->contains(-1055)))
+		if (exeUtilTdb().loadIfExists() &&
+		     getDiagsArea()->contains(-1055))
 		  {
 		    SQL_EXEC_ClearDiagnostics(NULL);
 		  }
diff --git a/core/sql/executor/ExExeUtilGetStats.cpp b/core/sql/executor/ExExeUtilGetStats.cpp
index 469f6fd..363fd89 100644
--- a/core/sql/executor/ExExeUtilGetStats.cpp
+++ b/core/sql/executor/ExExeUtilGetStats.cpp
@@ -3123,6 +3123,7 @@
     case FORMAT_AND_RETURN_BMO_STATS_:
       {
         const char *ofMode;
+        Int32 dop = 1;
         for (; currStatsItemEntry_ < maxBMOStatsItems_; currStatsItemEntry_++)
         {
           i = (short)currStatsItemEntry_;
@@ -3148,6 +3149,7 @@
                sprintf(&statsBuf_[strlen(statsBuf_)], "%20s", ofMode);
             break;
           case SQLSTATS_DOP:
+            dop = (Int32)bmoStatsItems_[i].int64_value;
             sprintf(&statsBuf_[strlen(statsBuf_)], "%10ld", bmoStatsItems_[i].int64_value);
             break;
           case SQLSTATS_TOPN:
@@ -3196,8 +3198,8 @@
             sprintf(&statsBuf_[strlen(statsBuf_)], "%20s", Int64Val);
             break;
           case SQLSTATS_BMO_EST_MEMORY:
-            sprintf(formattedFloatVal, "%.6g",  bmoStatsItems_[i].double_value);
-            str_sprintf(&statsBuf_[strlen(statsBuf_)], "%20s", formattedFloatVal);
+            sprintf(formattedFloatVal, "%.6g",  bmoStatsItems_[i].double_value * dop);
+            str_sprintf(&statsBuf_[strlen(statsBuf_)], "%-20s", formattedFloatVal);
             break;
           case SQLSTATS_BMO_SPACE_BUFFER_SIZE:
             sprintf(Int64Val, "%ld", bmoStatsItems_[i].int64_value);
diff --git a/core/sql/executor/ExExeUtilLoad.cpp b/core/sql/executor/ExExeUtilLoad.cpp
index 7ad1a50..ab1580c 100644
--- a/core/sql/executor/ExExeUtilLoad.cpp
+++ b/core/sql/executor/ExExeUtilLoad.cpp
@@ -2738,13 +2738,12 @@
 
   requestTag_ = -1;
   lobLoc_[0] = '\0';
+  exLobGlobals_ = NULL;
  
-  lobGlobals_ = 
-    new(currContext->exHeap()) LOBglobals(currContext->exHeap());
-  ExpLOBoper::initLOBglobal
-    (lobGlobals_->lobAccessGlobals(), 
-     currContext->exHeap(),currContext,lobTdb().getLobHdfsServer(),
-               lobTdb().getLobHdfsPort());
+  ExpLOBinterfaceInit(exLobGlobals_,currContext->exHeap(),currContext,TRUE,
+                      lobTdb().getLobHdfsServer(),
+                      lobTdb().getLobHdfsPort());
+                                     
     
 
 }
@@ -2753,7 +2752,9 @@
 {
   Lng32 cliRC = 0;
   Lng32 retcode = 0;
-  ExLobGlobals * lobGlobs = getLobGlobals()->lobAccessGlobals();
+
+  ExLobGlobals * lobGlobs = getLobGlobals();
+
   ContextCli *currContext =
     getGlobals()->castToExExeStmtGlobals()->castToExMasterStmtGlobals()->
     getStatement()->getContext();
@@ -2780,8 +2781,10 @@
 	       3, // close
                0); // open type not applicable
 
-   NADELETE(lobGlobals_,LOBglobals,currContext->exHeap());
-  lobGlobals_ = NULL;
+    
+  ExpLOBinterfaceCleanup
+    (exLobGlobals_, currContext->exHeap());
+  exLobGlobals_ = NULL;
 }
 
 ExExeUtilLobExtractTcb::~ExExeUtilLobExtractTcb()
@@ -2812,7 +2815,7 @@
 
   
 
-  ExLobGlobals * lobGlobs = getLobGlobals()->lobAccessGlobals();
+  ExLobGlobals * lobGlobs = getLobGlobals();
 
   ex_queue_entry * centry = NULL;
   
@@ -3189,7 +3192,7 @@
 						lobHandle_,
 						requestTag,
 						so,
-						((LOBglobals *)lobGlobs)->xnId(),
+						-1,
 						0,0,					       					
 						0, lobDataLen_, lobDataOutputLen, 
 						lobTdb().getFileName(),
@@ -3485,12 +3488,12 @@
     getStatement()->getContext();
   lobHandleLen_ = 2050;
   lobHandle_[0] = '\0';
-  lobGlobals_ = 
-    new(currContext->exHeap()) LOBglobals(currContext->exHeap());
-  ExpLOBoper::initLOBglobal
-    (lobGlobals_->lobAccessGlobals(), 
-     currContext->exHeap(),currContext,lobTdb().getLobHdfsServer(),
-               lobTdb().getLobHdfsPort());
+  exLobGlobals_=NULL;
+
+  ExpLOBinterfaceInit(exLobGlobals_,currContext->exHeap(),currContext,TRUE,
+                      lobTdb().getLobHdfsServer(),
+                      lobTdb().getLobHdfsPort());
+                                     
 }
 ExExeUtilLobUpdateTcb::~ExExeUtilLobUpdateTcb()
 {
@@ -3502,7 +3505,8 @@
  ContextCli *currContext =
     getGlobals()->castToExExeStmtGlobals()->castToExMasterStmtGlobals()->
     getStatement()->getContext();
-   NADELETE(lobGlobals_,LOBglobals,currContext->exHeap());
+ ExpLOBinterfaceCleanup(exLobGlobals_, currContext->exHeap());
+ exLobGlobals_ = NULL;
 }
 
 short ExExeUtilLobUpdateTcb::work()
@@ -3536,7 +3540,7 @@
   Int64 lobLen = lobTdb().updateSize();
   char * data = (char *)(lobTdb().getBufAddr());
  
-  ExLobGlobals * lobGlobs = getLobGlobals()->lobAccessGlobals();
+  ExLobGlobals * lobGlobs = getLobGlobals();
 
   while (1)
     {
@@ -3660,7 +3664,7 @@
                                             lobHandle_,
                                             &outHandleLen, outLobHandle,
                                             requestTag,
-                                            getLobGlobals()->xnId(),
+                                            -1,
                                             0,
                                             1,
                                             so,
@@ -3756,7 +3760,7 @@
                                             lobHandle_,
                                             &outHandleLen, outLobHandle,
                                             requestTag,
-                                            getLobGlobals()->xnId(),
+                                            -1,
                                             0,
                                             1,
                                             so,
@@ -3854,7 +3858,7 @@
                                             lobHandle_,
                                             &outHandleLen, outLobHandle,
                                             requestTag,
-                                            getLobGlobals()->xnId(),
+                                            -1,
                                             0,
                                             1,
                                             so,
@@ -3982,7 +3986,7 @@
 
   
 
-  ExLobGlobals * lobGlobs = getLobGlobals()->lobAccessGlobals();
+  ExLobGlobals * lobGlobs = getLobGlobals();
 
   while (1)
     {
@@ -4235,7 +4239,7 @@
 
   
 
-  ExLobGlobals * lobGlobs = getLobGlobals()->lobAccessGlobals();
+  ExLobGlobals * lobGlobs = getLobGlobals();
 
   while (1)
     {
diff --git a/core/sql/executor/ExSimpleSqlBuffer.cpp b/core/sql/executor/ExSimpleSqlBuffer.cpp
index d62e781..1405586 100644
--- a/core/sql/executor/ExSimpleSqlBuffer.cpp
+++ b/core/sql/executor/ExSimpleSqlBuffer.cpp
@@ -214,9 +214,6 @@
 #endif
     }
 
-  // If we could not get enough memory for at least one tuple, let the
-  // memory manager do a longjmp by requesting the original number of 
-  // tuples to be allocated while passing true for failureIsFatal.
   if (!data_)
     {
       nBytes = tuplesRequested * allocationSize_;
diff --git a/core/sql/executor/ex_ex.cpp b/core/sql/executor/ex_ex.cpp
index da1a411..8e5b7a1 100644
--- a/core/sql/executor/ex_ex.cpp
+++ b/core/sql/executor/ex_ex.cpp
@@ -646,11 +646,3 @@
 {
   return tcb->getGlobals()->computeSpace();
 }
-
-void ex_log_ems( const char *f, Int32 l, const char * m)
-{
-}
-void assert_botch_longjmp( const char *f, Int32 l, const char * m)
-{
-
-}
diff --git a/core/sql/executor/ex_ex.h b/core/sql/executor/ex_ex.h
index 421cb3c..75ba99f 100644
--- a/core/sql/executor/ex_ex.h
+++ b/core/sql/executor/ex_ex.h
@@ -38,20 +38,13 @@
  */
 
 // -----------------------------------------------------------------------
-#include <setjmp.h>
 #include "Platform.h"
 
-extern jmp_buf ExeBuf;
-
 //typedef	int		(*funcptr) (void *);
 typedef	Int32	funcptr;    // for now
 
 #define logInternalError(r) ((short)r)
 
-void ex_log_ems( const char *f, Int32 l, const char * m);
-
-void assert_botch_longjmp( const char *f, int l, const char * m);
-
 #define ex_assert(p, msg) if (!(p)) { assert_botch_abend( __FILE__ , __LINE__ , msg); };
 
 class	ex_expr;	// to be defined
diff --git a/core/sql/executor/ex_globals.cpp b/core/sql/executor/ex_globals.cpp
index e6f5252..b3c8ba3 100644
--- a/core/sql/executor/ex_globals.cpp
+++ b/core/sql/executor/ex_globals.cpp
@@ -61,9 +61,9 @@
        injectErrorAtQueueFreq_(0),
        flags_(0),
        planVersion_(0),
-       jmpInScope_(FALSE),
        sharedPool_(NULL),
-       rowNum_(1)
+       rowNum_(1),
+       exLobGlobals_(NULL)
 {
   // Small data items are allocated using space rather than heap so that
   // the allocation of memory for the heap can be avoided in simple queries.
@@ -81,19 +81,17 @@
 	tempList_[i] = NULL;
     }
 
-  
-  lobGlobals_ = new(heap_) LOBglobals(heap_);
 }
 
 ExLobGlobals *&ex_globals::getExLobGlobal() 
 { 
-   return lobGlobals()->lobAccessGlobals(); 
+  return exLobGlobals_;
 }
 
 void ex_globals::initLOBglobal(ContextCli *context)
 {
   // initialize lob interface
-  ExpLOBoper::initLOBglobal(getExLobGlobal(), (NAHeap *)heap_, context, (char *)"default", (Int32)0);
+  ExpLOBoper::initLOBglobal(exLobGlobals_, (NAHeap *)heap_, context, (char *)"default", (Int32)0);
 
 }
 
@@ -106,8 +104,7 @@
   tempList_ = NULL;
   
   tcbList_.allocate(0);
-
-  lobGlobals_ = new(heap_) LOBglobals(heap_);
+  exLobGlobals_ = NULL;
 }
 
 void ex_globals::deleteMe(NABoolean fatalError)
@@ -137,8 +134,6 @@
   statsArea_ = NULL;
   cleanupTcbs();
   tcbList_.deallocate();
-  NADELETE(lobGlobals_,LOBglobals,heap_);
-  lobGlobals_ = NULL;
 }
 
 void ex_globals::deleteMemory(void *mem)
diff --git a/core/sql/executor/ex_globals.h b/core/sql/executor/ex_globals.h
index 9a84a73..8ec9d76 100644
--- a/core/sql/executor/ex_globals.h
+++ b/core/sql/executor/ex_globals.h
@@ -38,8 +38,6 @@
 ******************************************************************************
 */
 
-#include <setjmp.h>
-
 #include "Platform.h"
 #include "ExCollections.h"
 #include "Int64.h"
@@ -114,10 +112,6 @@
   ExStatisticsArea* getOrigStatsArea() 
   { return statsArea_; }
 
-  inline jmp_buf *getJmpBuf()             { return &longJmpTgt_; }
-  inline void setJmpInScope(NABoolean jmpInScope)
-    { jmpInScope_ = jmpInScope; }
-  inline NABoolean IsJmpInScope() { return jmpInScope_; }
 
   inline void setEventConsumed(UInt32 *eventConsumed)
     { eventConsumedAddr_ = eventConsumed; }
@@ -188,7 +182,6 @@
   inline void setSharedPool(sql_buffer_pool *p) { sharedPool_ = p; }
 
   ExLobGlobals *&getExLobGlobal();
-  LOBglobals * lobGlobals() { return lobGlobals_; }
   
   void initLOBglobal(ContextCli *context);
   
@@ -236,10 +229,6 @@
   // pointer to the statsArea (if statistics are collected)
   ExStatisticsArea * statsArea_;
 
-  // for handling tcb-build-time errors, and memory alloc errors.
-  jmp_buf  longJmpTgt_;
-  NABoolean jmpInScope_;
-
   // for cleanup.
   LIST(ex_tcb *) tcbList_;
 
@@ -268,8 +257,7 @@
 
   // pool shared by among PAs under PAPA
   sql_buffer_pool *sharedPool_;
-
-  LOBglobals * lobGlobals_;
+  ExLobGlobals * exLobGlobals_;
 
   // pointer passed to interface methods that store and retrieve lob data
   // from flatfile or hdfs filesystem.
diff --git a/core/sql/executor/ex_mj.cpp b/core/sql/executor/ex_mj.cpp
index e00b769..05e8227 100644
--- a/core/sql/executor/ex_mj.cpp
+++ b/core/sql/executor/ex_mj.cpp
@@ -441,8 +441,7 @@
     {
 
       
-      tspace_->reacquireResources();  // failure will invoke longjmp handler
-
+      tspace_->reacquireResources();  
 
       if (mjTdb().getLogDiagnostics())
         {
diff --git a/core/sql/executor/ex_root.cpp b/core/sql/executor/ex_root.cpp
index e8b264f..3b3a4d0 100644
--- a/core/sql/executor/ex_root.cpp
+++ b/core/sql/executor/ex_root.cpp
@@ -66,10 +66,6 @@
 #include "ExSMGlobals.h"
 #include "ExSMCommon.h"
 #include "ExpHbaseInterface.h"
-// this contains the location where a longjmp is done after
-// an assertion failure in executor. See file ex_ex.h.
-jmp_buf ExeBuf;
-
 
 ////////////////////////////////////////////////////////////////////////
 //  TDB procedures
@@ -97,19 +93,9 @@
     }
   }
 
-  Int32 jmpRc;
-
-  
   // set this plan version in the statement globals.
   glob->setPlanVersion(planVersion_);
 
-  jmp_buf *jmpBufPtr;
-  if (setjmp(ExeBuf))
-    {
-      // an error may be stored in the global diags area
-      return NULL;
-    }
-
   // set the fragment directory in glob. This will be passed
   // to the build of all tdb's and used by them, if needed.
   master_glob->setFragDir(fragDir_);
@@ -165,22 +151,6 @@
     return NULL;
   }
 
-  exe_glob->getSpace()->setJmpBuf(exe_glob->getJmpBuf());
-  // TBD -- do the same (as above) for master_glob->getDefaultHeap ????
-
-//#ifndef NA_YOS
-   jmpBufPtr = exe_glob->getJmpBuf();
-   jmpRc =  setjmp(*jmpBufPtr);
-//#endif // NA_YOS
-
-  if (jmpRc)
-    {
-      exe_glob->cleanupTcbs();
-      if (exe_glob->getSpace()->getWasMemoryExhausted())
-        exe_glob->makeMemoryCondition(-EXE_NO_MEM_TO_BUILD); 
-      return NULL;
-    }
-
   if (getQueryUsesSM() && cliGlobals->getEnvironment()->smEnabled())
   {
     // Assign a SeaMonster ID to the query
@@ -673,43 +643,8 @@
 {
   Int32 jmpRc = 0;
 
-  // This setjmp is for assertion failure.  It won't work when the 
-  // executor is multi-threaded.
-  jmpRc = setjmp(ExeBuf);
-  if (jmpRc)
-   {
-      fatalError_ = TRUE;
-      if (jmpRc == MEMALLOC_FAILURE)
-      {
-         if (diagsArea == NULL)
-            diagsArea = ComDiagsArea::allocate(getHeap());
-         *diagsArea << DgSqlCode(-EXE_NO_MEM_TO_EXEC);
-         return -EXE_NO_MEM_TO_EXEC; 
-      }
-      else
-         return -1;
-    }
 
   ExMasterStmtGlobals *master_glob = glob->castToExMasterStmtGlobals();
-  master_glob->getSpace()->setJmpBuf(master_glob->getJmpBuf());
-
-//#ifndef NA_YOS
-  jmpRc =  setjmp(*master_glob->getJmpBuf());
-//#endif // NA_YOS
-  if (jmpRc)
-    {
-      fatalError_ = TRUE;
-      if (master_glob->getSpace()->getWasMemoryExhausted())
-      {
-        glob->makeMemoryCondition(-EXE_NO_MEM_TO_EXEC);
-        if (diagsArea == NULL) 
-           diagsArea = ComDiagsArea::allocate(getHeap());
-        *diagsArea << DgSqlCode(-EXE_NO_MEM_TO_EXEC);
-        return -EXE_NO_MEM_TO_EXEC; 
-      }
-      else
-         return -1;
-    }
 
   if (fatalError_)
     {
@@ -1126,19 +1061,6 @@
   // processing once the queue becomes empty.
   //
 
-  // Much of the "catastropic error" handling code (especially the 
-  // code that tries to handle longjmps) assumes that the diagsArea 
-  // passed in is NULL.  As of now, all of the Statement's calls 
-  // to this method do send in NULL, but if this ever changes, a 
-  // memory leak will result.  Hence the assertion:
-  // The assertion was removed by Gil Siegel because:
-  // a) fetchMultiple can call with a diagsArea if a warning  occurred
-  // b) longjmp is no longer done in the master executor or mxesp so a leak
-  //    is no longer a possibility
-
-//  ex_assert( diagsArea == NULL, 
-//    "Non-null diagsArea sent to ex_root_tcb::fetch can cause memory leaks.");
-
   // For the GET_NEXT_N protocol, we should only return when a Q_GET_DONE is
   //  received.  In addition, due to the incomplete implementation of the 
   //  GET_NEXT_N protocol, it is possible to receive a Q_GET_DONE without
@@ -1151,50 +1073,8 @@
   NABoolean nextIsQNoData         = FALSE;
   if (newOperation)
     time_of_fetch_call_usec_      = NA_JulianTimestamp();
-  Int32 jmpRc = 0;
-
-  // enable executor exception handling
-  // (ExeBuf should be moved to executor globals, except that would 
-  // mean that globals would need to be available to any code that
-  // does an assertion.) $$$$
-  // This is for assertion failure.  It might not work when the executor
-  // is multi-threaded.
-  jmpRc = setjmp(ExeBuf);
-  if (jmpRc)
-    {
-      fatalError_ = TRUE;
-      if (jmpRc == MEMALLOC_FAILURE)
-      {
-         if (diagsArea == NULL)
-            diagsArea = ComDiagsArea::allocate(getHeap());
-         *diagsArea << DgSqlCode(-EXE_NO_MEM_TO_EXEC);
-         return -EXE_NO_MEM_TO_EXEC; 
-      }
-      else
-      return -1;
-    }
 
   ExMasterStmtGlobals *master_glob = glob->castToExMasterStmtGlobals();
-  master_glob->getSpace()->setJmpBuf(master_glob->getJmpBuf());
- 
-//#ifndef NA_YOS 
-  jmpRc =  setjmp(*master_glob->getJmpBuf());
-//#endif // NA_YOS
-
-  if (jmpRc)
-    {
-      fatalError_ = TRUE;
-      if (master_glob->getSpace()->getWasMemoryExhausted())
-      {
-         glob->makeMemoryCondition(-EXE_NO_MEM_TO_EXEC);
-         if (diagsArea == NULL) 
-            diagsArea = ComDiagsArea::allocate(getHeap());
-         *diagsArea << DgSqlCode(-EXE_NO_MEM_TO_EXEC);
-         return -EXE_NO_MEM_TO_EXEC; 
-      }
-      else
-         return -1;
-    }
 
   // start off by calling the scheduler (again)
   ExWorkProcRetcode schedRetcode = WORK_CALL_AGAIN;
@@ -2009,8 +1889,6 @@
 			    Descriptor * output_desc,
 			    ComDiagsArea*& diagsArea)
 {
-  // $$TBD: put in code to do setjmp's
-
   ExMasterStmtGlobals *master_glob = getGlobals()->
 	            castToExExeStmtGlobals()->castToExMasterStmtGlobals(); 
 
diff --git a/core/sql/executor/ex_sort.cpp b/core/sql/executor/ex_sort.cpp
index d7358d4..cbc8935 100644
--- a/core/sql/executor/ex_sort.cpp
+++ b/core/sql/executor/ex_sort.cpp
@@ -749,14 +749,6 @@
   ExSortPrivateState &pstate = *((ExSortPrivateState*) pentry_down->pstate);
   ex_queue::down_request request = pentry_down->downState.request;
 
-  //A jump handler is introduced here as safty measure to handle any
-  //memory allocation failures(from sortHeap_). If NAHeap fails
-  //to allocate memory, it calls handleExhaustedMemory that performs
-  //a longjmp to this location. Basically pstate is set to error
-  //and cleanup is performed. This feature is only enabled in calls
-  //to allocateMemory by setting the failureIsFatal flag which is 
-  //always set by default.
-   
   //while there are requests in the parent down queue, process them
   while (qparent_.down->getHeadIndex() != processedInputs_)
     {
@@ -1944,14 +1936,6 @@
   
   ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
 
-  //A jump handler is introduced here as safty measure to handle any
-  //memory allocation failures(from sortHeap_). If NAHeap fails
-  //to allocate memory, it calls handleExhaustedMemory that performs
-  //a longjmp to this location. Basically pstate is set to error
-  //and cleanup is performed. This feature is only enabled in calls
-  //to allocateMemory by setting the failureIsFatal flag which is 
-  //always set by default.
-
   if (pentry_down->downState.request == ex_queue::GET_NOMORE)
     {
       // cancel request
diff --git a/core/sql/exp/ExpErrorEnums.h b/core/sql/exp/ExpErrorEnums.h
index d9aedf7..ba604bf 100644
--- a/core/sql/exp/ExpErrorEnums.h
+++ b/core/sql/exp/ExpErrorEnums.h
@@ -182,7 +182,7 @@
   EXE_NO_MEM_TO_BUILD			= 8570,
   EXE_NO_MEM_TO_EXEC 			= 8571,
   EXE_CANNOT_CONTINUE                   = 8572,
-  EXE_ACCESS_VIOLATION                  = 8573,
+  // unused                             = 8573,
   
   // ------------------------------------------------------------
   // Error 8574, lost open. Could result in reopening the table.
@@ -340,7 +340,7 @@
   CLI_USER_ENDED_XN_CLEANUP             = 8844,
   CLI_INTERR_NULL_TCB                   = 8845,
   CLI_EMPTY_SQL_STMT                    = 8846,
-  CLI_SQLMP_RTD_ERROR			= 8847,
+  // unused      			= 8847,
   CLI_CANCEL_REJECTED                   = 8848,
   CLI_NON_CURSOR_UPDEL_TABLE            = 8850,
   CLI_USER_MEMORY_IN_EXECUTOR_SEGMENT   = 8851,
@@ -394,7 +394,7 @@
 
   CLI_ARKCMP_INIT_FAILED		= 8890,
   CLI_NOT_ASCII_CHAR_TYPE		= 8891,
-  CLI_RTD_BUFFER_TOO_SMALL		= 8892,
+  // unused				= 8892,
   CLI_STMT_DESC_COUNT_MISMATCH          = 8893,
   CLI_RESERVED_ARGUMENT                 = 8894,
   CLI_INVALID_CHARSET_FOR_DESCRIPTOR    = 8895,
diff --git a/core/sql/exp/ExpLOB.cpp b/core/sql/exp/ExpLOB.cpp
index ec69403..c4336f4 100644
--- a/core/sql/exp/ExpLOB.cpp
+++ b/core/sql/exp/ExpLOB.cpp
@@ -824,7 +824,7 @@
          tgtLobName, 
          so,
          lobStorageLocation(),lobStorageType(),
-         getExeGlobals()->lobGlobals()->xnId(),
+         -1,
          handleLen, lobHandle,  &outHandleLen_, outLobHandle_,            
          lobData, lobLen, blackBox_, blackBoxLen_,lobMaxSize, getLobMaxChunkMemSize(),getLobGCLimit()); 
         
@@ -841,7 +841,7 @@
      &outHandleLen_, outLobHandle_,
      blackBoxLen_, blackBox_,
      requestTag_,
-     getExeGlobals()->lobGlobals()->xnId(),
+     -1,
      descSyskey,
      lo,
      &cliError,
@@ -882,12 +882,6 @@
 
   getOperand(0)->setVarLength(handleLen, op_data[-MAX_OPERANDS]);
 
-  if (NOT fromExternal())
-    {
-      getExeGlobals()->lobGlobals()->lobLoadInfo()->
-	setLobHandle(lobNum(), handleLen, lobHandle);
-    }
-
   return ex_expr::EXPR_OK;
 }
 
@@ -996,7 +990,7 @@
 				 blackBoxLen_, blackBox_,
 
 				 requestTag_,
-				 getExeGlobals()->lobGlobals()->xnId(),
+				 -1,
 				 
 				 descSyskey, 
 				 lo,
@@ -1114,7 +1108,7 @@
      lobStorageLocation(),
      handleLen, op_data[1],
      requestTag_,
-     getExeGlobals()->lobGlobals()->xnId(),
+     -1,
      descSyskey,
      //     (getExeGlobals()->lobGlobals()->getCurrLobOperInProgress() ? 1 : 0),
      (lobOperStatus == CHECK_STATUS_ ? 1 : 0),
@@ -1354,7 +1348,7 @@
 	 handleLen, lobHandle,
 	 &outHandleLen_, outLobHandle_,
 	 requestTag_,
-	 getExeGlobals()->lobGlobals()->xnId(),
+	 -1,
 	 
 	 (lobOperStatus == CHECK_STATUS_ ? 1 : 0),
 	 waitedOp,
@@ -1377,7 +1371,7 @@
 	 handleLen, lobHandle,
 	 &outHandleLen_, outLobHandle_,
 	 requestTag_,
-	 getExeGlobals()->lobGlobals()->xnId(),
+	 -1,
 	 
 	 (lobOperStatus == CHECK_STATUS_ ? 1 : 0),
 	 waitedOp,
@@ -1551,7 +1545,7 @@
 				 handleLen, lobHandle,
 				 requestTag_,
                                  so,
-				 getExeGlobals()->lobGlobals()->xnId(),
+				 -1,
 				 (lobOperStatus == CHECK_STATUS_ ? 1 : 0),
  				 waitedOp,
 
@@ -1576,7 +1570,7 @@
 				 handleLen, lobHandle,
 				 requestTag_,
                                  so,
-				 getExeGlobals()->lobGlobals()->xnId(),
+				 -1,
 				 (lobOperStatus == CHECK_STATUS_ ? 1 : 0),
  				 waitedOp,
 
@@ -1713,62 +1707,6 @@
   return ex_expr::EXPR_OK;
 }
 
-////////////////////////////////////////////////////////
-// ExpLOBload
-////////////////////////////////////////////////////////
-ExpLOBload::ExpLOBload(){};
-ExpLOBload::ExpLOBload(OperatorTypeEnum oper_type,
-		       Lng32 numAttrs,
-		       Attributes ** attr, 
-		       Int64 objectUID,
-		       short descSchNameLen,
-		       char * descSchName,
-		       Space * space)
-  : ExpLOBinsert(oper_type, numAttrs,attr, objectUID, 
-		 descSchNameLen, descSchName, space),
-    llFlags_(0)
-{
-};
-
-void ExpLOBload::displayContents(Space * space, const char * displayStr, 
-				 Int32 clauseNum, char * constsArea)
-
-{
-  ExpLOBoper::displayContents(space, "ExpLOBload", clauseNum, constsArea);
-
-}
-
-ex_expr::exp_return_type ExpLOBload::eval(char *op_data[],
-					  CollHeap*h,
-					  ComDiagsArea** diagsArea)
-{
-  ex_expr::exp_return_type err = ex_expr::EXPR_OK;
-
-  char * handle =
-    getExeGlobals()->lobGlobals()->lobLoadInfo()->lobHandle(lobNum());
-  Lng32 handleLen =
-    getExeGlobals()->lobGlobals()->lobLoadInfo()->lobHandleLen(lobNum());
-
-  if (handle == NULL)
-    return ex_expr::EXPR_ERROR;
-  
-  if (fromLoad())
-    {
-      char * clientFile = op_data[1];
-      // call ExLoadApi
-      Lng32 rc = LOBsql2loaderInterface
-	(clientFile, strlen(clientFile),
-	 (char*)"loaderPort", strlen("loaderPort"),
-	 handle, handleLen,
-	 lobStorageLocation(), strlen(lobStorageLocation()));
-    }
-  else
-    {
-      err = insertData(handleLen, handle, op_data, h, diagsArea);
-    }
-
-  return err;
-}
 
 //////////////////////////////////////////////////
 // ExpLOBfunction
diff --git a/core/sql/exp/ExpLOB.h b/core/sql/exp/ExpLOB.h
index f475461..f8a40cd 100644
--- a/core/sql/exp/ExpLOB.h
+++ b/core/sql/exp/ExpLOB.h
@@ -49,122 +49,6 @@
 
 
 class ExLobInMemoryDescChunksEntry;
-////////////////////////////////
-// class LOBglobals
-////////////////////////////////
-class LobLoadInfo
-{
- private:
-  class LobLoadEntry
-  {
-  public:
-    LobLoadEntry() 
-      { 
-	handle_ = NULL; 
-	handleLen_ = 0;
-      };
-    
-    char* &handle() { return handle_; }
-    Lng32 &handleLen() { return handleLen_; }
-
-  private:
-    char *handle_;
-    Lng32 handleLen_;
-  };
-
- public:
-  LobLoadInfo(CollHeap * heap)
-    : heap_(heap)
-    , lobEntryList_(heap)
-  {};
-  ~LobLoadInfo(){}
-  void setLobLoadEntries(Lng32 num)
-  {
-  }
-
-  void setLobHandle(Lng32 pos, Lng32 handleLen, char * handle) 
-  {
-    if (lobEntryList_.used(pos))
-      {
-	NADELETEBASIC(lobEntryList_[pos]->handle(), heap_);
-      }
-    else
-      {
-	LobLoadEntry* lle = new(heap_) LobLoadEntry();
-	lobEntryList_.insertAt(pos, lle);
-      }
-
-    lobEntryList_[pos]->handleLen() = handleLen;
-    lobEntryList_[pos]->handle() = new(heap_) char[handleLen];
-    str_cpy_all(lobEntryList_[pos]->handle(), handle, handleLen);
-  }
-
-  char * lobHandle(Lng32 pos) 
-  { 
-    if (lobEntryList_.used(pos))
-      return lobEntryList_[pos]->handle(); 
-    else
-      return NULL;
-  };
-
-  Lng32 lobHandleLen(Lng32 pos) 
-  { 
-    if (lobEntryList_.used(pos))
-      return lobEntryList_[pos]->handleLen(); 
-    else
-      return -1;
-  };
-
- private:
-  CollHeap * heap_;
-
-  NAArray<LobLoadEntry*> lobEntryList_;
-};
-  
-class LOBglobals : public NABasicObject {
- public:
- LOBglobals(CollHeap * heap) : heap_(heap),
-    lobAccessGlobals_(NULL),
-    xnId_(-1),
-    lobOperInProgressList_(heap)
-      {
-	lobLoadInfo_ = new(heap) LobLoadInfo(heap);
-      };
-  ~LOBglobals() { NADELETE(lobLoadInfo_,LobLoadInfo,heap_); lobLoadInfo_=NULL;}
-  ExLobGlobals* &lobAccessGlobals() { return lobAccessGlobals_; };
-  LobLoadInfo * lobLoadInfo() { return lobLoadInfo_; }
-
-  Int64 &xnId() { return xnId_; };
-
-  void setLobOperInProgress(Lng32 pos, NABoolean v) 
-  {
-    if (lobOperInProgressList_.used(pos))
-      lobOperInProgressList_[pos] = (v ? 1 : 0);
-    else
-      lobOperInProgressList_.insertAt(pos, v);
-  }
-
-  NABoolean getLobOperInProgress(Lng32 pos)
-  {
-    return (lobOperInProgressList_[pos] != 0);
-  }
-
-  void setCurrLobOperInProgress(NABoolean v) { currLobOperInProgress_ = v; }
-  NABoolean getCurrLobOperInProgress() { return currLobOperInProgress_; }
- private:
-  CollHeap * heap_;
-  ExLobGlobals * lobAccessGlobals_;
-  LobLoadInfo * lobLoadInfo_;
-
-  // transaction id of the current transaction in progress.
-  // -1, if no transaction is associated with the current request.
-  Int64 xnId_;
-
-  NAArray<Lng32> lobOperInProgressList_;
-
-  NABoolean currLobOperInProgress_;
- 
-};
 
 
 /////////////////////////////////////////
@@ -905,49 +789,7 @@
   char  filler1_[4];
 };
 
-class ExpLOBload : public ExpLOBinsert {
-public:
-  ExpLOBload(OperatorTypeEnum oper_type,
-	     Lng32 numAttrs,
-	     Attributes ** attr, 
-	     Int64 objectUID,
-	     short descSchNameLen,
-	     char * descSchName,
-	     Space * space);
-  ExpLOBload();
-
-  virtual ex_expr::exp_return_type eval(char *op_data[],
-					CollHeap*,
-					ComDiagsArea** diagsArea = 0);
-  
-  // Display
-  //
-  virtual void displayContents(Space * space, const char * displayStr, 
-			       Int32 clauseNum, char * constsArea);
-
-  // ---------------------------------------------------------------------
-  // Redefinition of methods inherited from NAVersionedObject.
-  // ---------------------------------------------------------------------
-  virtual unsigned char getClassVersionID()
-  {
-    return 1;
-  }
-
-  virtual void populateImageVersionIDArray()
-  {
-    setImageVersionID(2,getClassVersionID());
-    ExpLOBoper::populateImageVersionIDArray();
-  }
-
-  virtual short getClassSize() { return (short)sizeof(*this); }
-  // ---------------------------------------------------------------------
-
- private:
-
-  Lng32 llFlags_;
-  char  filler1_[4];
-};
-
+	   
 /////////////////////////////////////////
 // Class ExpLOBfunction                //
 /////////////////////////////////////////
diff --git a/core/sql/exp/exp_clause.cpp b/core/sql/exp/exp_clause.cpp
index 3de521b..796e2e5 100644
--- a/core/sql/exp/exp_clause.cpp
+++ b/core/sql/exp/exp_clause.cpp
@@ -574,9 +574,6 @@
 	case ITM_LOBCONVERTHANDLE:
 	  setClassID(LOB_CONVERTHANDLE);
 	  break;
-	case ITM_LOBLOAD:
-	  setClassID(LOB_LOAD);
-	  break;
 	case ITM_SUBSTR:
 	  setClassID(LOB_FUNC_SUBSTR);
 	  break;
@@ -972,9 +969,6 @@
     case ex_clause::LOB_CONVERTHANDLE:
       GetVTblPtr(vtblPtr, ExpLOBconvertHandle);
       break;
-    case ex_clause::LOB_LOAD:
-      GetVTblPtr(vtblPtr, ExpLOBload);
-      break;
     case ex_clause::LOB_FUNC_SUBSTR:
       GetVTblPtr(vtblPtr, ExpLOBfuncSubstring);
       break;
@@ -1405,7 +1399,6 @@
     case ITM_LOBUPDATE: return "ITM_LOBUPDATE";
     case ITM_LOBCONVERT: return "ITM_LOBCONVERT";
     case ITM_LOBCONVERTHANDLE: return "ITM_LOBCONVERTHANDLE";
-    case ITM_LOBLOAD: return "ITM_LOBLOAD";
 
     case ITM_UNIQUE_EXECUTE_ID: return "ITM_UNIQUE_EXECUTE_ID";
     case ITM_GET_TRIGGERS_STATUS: return "ITM_GET_TRIGGERS_STATUS";
diff --git a/core/sql/exp/exp_conv.cpp b/core/sql/exp/exp_conv.cpp
index df1e608..81e0db0 100644
--- a/core/sql/exp/exp_conv.cpp
+++ b/core/sql/exp/exp_conv.cpp
@@ -2058,7 +2058,7 @@
       return ex_expr::EXPR_OK;
     }
   /* remove below code according to discussion in github
-   * https://github.com/apache/incubator-trafodion/pull/706
+   * https://github.com/apache/trafodion/pull/706
    * with above validation, below checking is no longer needed
    * comment out
   
diff --git a/core/sql/generator/GenExplain.cpp b/core/sql/generator/GenExplain.cpp
index 4707dc8..81d1e75 100644
--- a/core/sql/generator/GenExplain.cpp
+++ b/core/sql/generator/GenExplain.cpp
@@ -457,14 +457,14 @@
      if ( reportMemoryEst == TRUE ) {
         if (nodeType == ComTdb::ex_HASH_GRBY || nodeType == ComTdb::ex_HASHJ
                || nodeType == ComTdb::ex_SORT) {
-           double memUsage = getEstimatedRunTimeMemoryUsage(FALSE).value()/1024;
+           double memUsage = tdb->getEstimatedMemoryUsage();
            if ( memUsage > 0 ) {
               sprintf(buf, "est_memory_per_instance: %.3f KB ", memUsage);
               explainTuple->setDescription(buf);
            }
         }
         else {
-           double memUsage = getEstimatedRunTimeMemoryUsage(TRUE).value()/1024;
+           double memUsage = getEstimatedRunTimeMemoryUsage(generator, TRUE).value()/1024;
            if ( memUsage > 0 ) {
               sprintf(buf, "est_memory_per_node: %.3f KB ", memUsage);
               explainTuple->setDescription(buf);
@@ -1440,14 +1440,6 @@
   statement += maxMaxCard;
   statement += " ";
 
-  double total_overflow_size = generator->getTotalOverflowMemory();
-  statement += "total_overflow_size: ";
-
-  char ovSizeVal[1024];
-  sprintf(ovSizeVal, "%.2lf", total_overflow_size/1024);
-  statement += ovSizeVal;
-  statement += " KB ";
-
   FragmentDir *fragDir = generator->getFragmentDir();
   for (CollIndex i = 0; i < fragDir->entries(); i++) {
     if (fragDir->getPartitioningFunction(i) != NULL &&
diff --git a/core/sql/generator/GenItemFunc.cpp b/core/sql/generator/GenItemFunc.cpp
index f1c4618..9821e4a 100644
--- a/core/sql/generator/GenItemFunc.cpp
+++ b/core/sql/generator/GenItemFunc.cpp
@@ -2824,43 +2824,6 @@
   return 0;
 }
 
-short LOBload::codeGen(Generator * generator)
-{
-  Attributes ** attr;
-  
-  Space * space = generator->getSpace();
-  
-  if (generator->getExpGenerator()->genItemExpr(this, &attr, (1 + getArity()), -1) == 1)
-    return 0;
-
-  ExpLOBload * ll =
-    new(generator->getSpace()) ExpLOBload
-    (getOperatorType(), 
-     getArity()+1,
-     attr, 
-     objectUID_,
-     (short)insertedTableSchemaName().length(),
-     (char*)insertedTableSchemaName().data(),
-     space);
-  
-  if (obj_ == LOBoper::STRING_)
-    ll->setFromString(TRUE);
-  else if (obj_ == LOBoper::FILE_)
-    ll->setFromFile(TRUE);
-  else if (obj_ == LOBoper::LOAD_)
-    ll->setFromLoad(TRUE);
-  else if (obj_ == LOBoper::LOB_)
-    ll->setFromLob(TRUE);
-  
-
-  ll->lobNum() = lobNum();
-  ll->setLobStorageType(lobStorageType());
-  ll->setLobStorageLocation((char*)lobStorageLocation().data());
-  
-  generator->getExpGenerator()->linkClause(this, ll);
- return 0;
-}
- 
 
 short SequenceValue::codeGen(Generator * generator)
 {
diff --git a/core/sql/generator/GenPreCode.cpp b/core/sql/generator/GenPreCode.cpp
index c016cd5..cc17512 100644
--- a/core/sql/generator/GenPreCode.cpp
+++ b/core/sql/generator/GenPreCode.cpp
@@ -1910,10 +1910,8 @@
       if (generator->getBindWA()->getUdrStoiList().entries () > 0)
         generator->setAqrEnabled(FALSE);
 
-      // Reset the accumulated # of BMOs and memory usages in 
-      // the generator 
+      // Reset the accumulated # of BMOs for the fragment
       prevNumBMOs = generator->replaceNumBMOs(0);
-      prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0);
 
     } // true root
 
@@ -2138,8 +2136,6 @@
       // Remember # of BMOs that children's preCodeGen found for my fragment.
       setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) );
 
-      setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) );
-
       // Compute the total available memory quota for BMOs
       NADefaults &defs               = ActiveSchemaDB()->getDefaults();
 
@@ -2234,6 +2230,8 @@
   
   setHdfsAccess(generator->hdfsAccess());
 
+  generator->finetuneBMOEstimates();
+
   markAsPreCodeGenned();
 
 #ifdef _DEBUG
@@ -2432,12 +2430,14 @@
       if (!(getEquiJoinPredicates().isEmpty() || getJoinPred().isEmpty() || 
 	    isAntiSemiJoin()))
       {
-	ValueIdSet dummy1, dummy2, dummy3, uncoveredPreds ;
+	ValueIdSet coveredPreds, dummy2, dummy3, uncoveredPreds ;
 	child(0)->getGroupAttr()->coverTest(getJoinPred(),
 					    getGroupAttr()->getCharacteristicInputs(),
-					    dummy1, dummy2, NULL,
+					    coveredPreds, dummy2, NULL,
 					    &uncoveredPreds);
-	if (uncoveredPreds.isEmpty())
+	// set the flag only if all the non-equi-join preds are covered
+	if  ((getJoinPred().entries() == coveredPreds.entries()) &&
+	      uncoveredPreds.isEmpty())
 	  setBeforeJoinPredOnOuterOnly();
       }
 
@@ -3729,7 +3729,7 @@
   generator->incrNumBMOs();
 
   if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-    generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
+    generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE));
 
 
   // store the transformed predicates back into the hash join node
@@ -5985,7 +5985,7 @@
     generator->incrNumBMOs();
 
   if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-      generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
+      generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE));
 
   }
 
@@ -6624,7 +6624,7 @@
         generator->incrNumBMOs();
 
         if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-          generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
+          generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE));
       }
     }
 
@@ -6720,15 +6720,14 @@
                      (availableValues,
 		      getGroupAttr()->getCharacteristicInputs());
 
-  /*
+/*
   TBD - maybe ProbeCache as BMO memory participant??
   if(CmpCommon::getDefault(PROBE_CACHE_MEMORY_QUOTA_SYSTEM) != DF_OFF)
     generator->incrNumBMOs();
-  */
 
   if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-    generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(TRUE));
-
+    generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(generator, TRUE));
+*/
   markAsPreCodeGenned();
   return this;
     
@@ -6906,7 +6905,6 @@
   NABoolean inputOltMsgOpt = generator->oltOptInfo()->oltMsgOpt();
 
   unsigned short prevNumBMOs = generator->replaceNumBMOs(0);
-  CostScalar prevBMOsMemoryUsage = generator->replaceBMOsMemoryUsage(0);
 
   // These are used to fix solution 10-071204-9253 and for 
   // solution 10-100310-8659.
@@ -7069,7 +7067,6 @@
     generator->setHalloweenESPonLHS(halloweenESPonLHS);
 
   setNumBMOs( generator->replaceNumBMOs(prevNumBMOs) );
-  setBMOsMemoryUsage( generator->replaceBMOsMemoryUsage(prevBMOsMemoryUsage) );
 
   if (! child(0).getPtr())
     return NULL;
@@ -7201,9 +7198,8 @@
   {
     result = child(0).getPtr();
 
-    // transfer the # of BMOs and their memory usages to generator as
+    // transfer the # of BMOs to generator as
     // this exchange node is to be discarded.
-    generator->incrBMOsMemoryPerFrag(getBMOsMemoryUsage());
     generator->incrNumBMOsPerFrag(getNumBMOs());
   }
 
@@ -7231,8 +7227,7 @@
       
     } // isEspExchange() && !eliminateThisExchange
   
-  if ((ActiveSchemaDB()->getDefaults()).getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-    generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(TRUE));
+
   
   return result;
   
diff --git a/core/sql/generator/GenProbeCache.cpp b/core/sql/generator/GenProbeCache.cpp
index e6c6d41..17a6dce 100644
--- a/core/sql/generator/GenProbeCache.cpp
+++ b/core/sql/generator/GenProbeCache.cpp
@@ -428,9 +428,9 @@
                                         probeCacheTdb->getQueueResizeFactor());
   }
 
-  double probeCacheMemEst = getEstimatedRunTimeMemoryUsage(probeCacheTdb);
+  double probeCacheMemEst = getEstimatedRunTimeMemoryUsage(generator, probeCacheTdb);
   generator->addToTotalEstimatedMemory(probeCacheMemEst);
-  Lng32 pcMemEstInKBPerNode = getEstimatedRunTimeMemoryUsage(TRUE).value() / 1024;
+  Lng32 pcMemEstInKBPerNode = getEstimatedRunTimeMemoryUsage(generator, TRUE).value() / 1024;
   if(!generator->explainDisabled()) {
     generator->setExplainTuple(
        addExplainInfo(probeCacheTdb, childExplainTuple, 0, generator));
@@ -443,7 +443,7 @@
   return 0;
 }
 
-CostScalar ProbeCache::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar ProbeCache::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
   const Lng32 probeSize = 
       getGroupAttr()->getCharacteristicInputs().getRowLength();
@@ -488,12 +488,12 @@
   return totalMemory;
 }
 
-double ProbeCache::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
+double ProbeCache::getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb)
 {
   // tdb is ignored for ProbeCache because this operator
   // does not participate in the BMO quota system.
   Lng32 numOfStreams = 1;
-  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
+  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(generator, FALSE, &numOfStreams);
   totalMemory = totalMemory * numOfStreams ;
   return totalMemory.value();
 }
diff --git a/core/sql/generator/GenRelEnforcer.cpp b/core/sql/generator/GenRelEnforcer.cpp
index 197aadb..bb557a1 100644
--- a/core/sql/generator/GenRelEnforcer.cpp
+++ b/core/sql/generator/GenRelEnforcer.cpp
@@ -1310,7 +1310,7 @@
                                             considerBufferDefrag);
 
 }
-CostScalar Exchange::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar Exchange::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
    //////////////////////////////////////
    // compute the buffer length (for both 
@@ -1384,10 +1384,10 @@
   return memoryRequired;
 }
 
-double Exchange::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
+double Exchange::getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb)
 {
   Lng32 numOfStreams = 1;
-  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
+  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(generator, FALSE, &numOfStreams);
   totalMemory = totalMemory * numOfStreams ;
   return totalMemory.value();
 }
diff --git a/core/sql/generator/GenRelGrby.cpp b/core/sql/generator/GenRelGrby.cpp
index 4920a35..0ab7e2e 100644
--- a/core/sql/generator/GenRelGrby.cpp
+++ b/core/sql/generator/GenRelGrby.cpp
@@ -1474,7 +1474,7 @@
   double memQuota = 0;
   Lng32 numStreams;
   double memQuotaRatio;
-  double bmoMemoryUsagePerNode = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
+  double bmoMemoryUsagePerNode = generator->getEstMemPerNode(getKey() ,numStreams);
 
   if(isPartialGroupBy) {
     // The Quota system does not apply to Partial GroupBy
@@ -1536,16 +1536,12 @@
 
   }
 
-  generator->addToTotalOverflowMemory(
-           getEstimatedRunTimeOverflowSize(memQuota)
-                                     );
-
   // For debugging overflow only (default is zero == not used).
   hashGrbyTdb->
     setForceOverflowEvery((UInt16)(ActiveSchemaDB()->getDefaults()).
 			  getAsULong(EXE_TEST_HASH_FORCE_OVERFLOW_EVERY));
 
-  double hashGBMemEst = getEstimatedRunTimeMemoryUsage(hashGrbyTdb);
+  double hashGBMemEst = generator->getEstMemPerInst(getKey());
   hashGrbyTdb->setEstimatedMemoryUsage(hashGBMemEst / 1024);
   generator->addToTotalEstimatedMemory(hashGBMemEst);
 
@@ -1620,22 +1616,26 @@
 
 }
 
-CostScalar HashGroupBy::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar HashGroupBy::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
   GroupAttributes * childGroupAttr = child(0).getGroupAttr();
   const CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength();
   const CostScalar childRowCount = getEstRowsUsed(); // the number of 
+  //TODO: Line below dumps core at times 
+  //const CostScalar maxCard = childGroupAttr->getResultMaxCardinalityForEmptyInput();
+  const CostScalar maxCard = 0;
+
                                                      // distinct rows groupped
   // Each record also uses a header (HashRow) in memory (8 bytes for 32bit).
   // Hash tables also take memory -- they are about %50 longer than the 
   // number of entries.
   const ULng32 
     memOverheadPerRecord = sizeof(HashRow) + sizeof(HashTableHeader) * 3 / 2 ;
-
+  CostScalar estMemPerNode;
+  CostScalar estMemPerInst;
   // totalHashTableMemory is for all CPUs at this point of time.
   CostScalar totalHashTableMemory = 
     childRowCount * (childRecordSize + memOverheadPerRecord);
-
   Lng32 numOfStreams = 1;
   const PhysicalProperty* const phyProp = getPhysicalProperty();
   if (phyProp)
@@ -1647,51 +1647,22 @@
   }
   if (numStreams != NULL)
      *numStreams = numOfStreams;
-  if (perNode) 
-     totalHashTableMemory /= MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numOfStreams);
-  else 
-     totalHashTableMemory /= numOfStreams;
-  return totalHashTableMemory;
-}
-
-double HashGroupBy::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
-{
-  Lng32 numOfStreams = 1;
-  CostScalar totalHashTableMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
-  totalHashTableMemory *= numOfStreams ;
-  return totalHashTableMemory.value();
-}
-
-double HashGroupBy::getEstimatedRunTimeOverflowSize(double memoryQuotaMB)
-{
-
-  if ( memoryQuotaMB > 0 ) {
-
-     CostScalar memoryUsage =
-        getEstimatedRunTimeMemoryUsage(TRUE /*per CPU*/);
-
-     double delta = memoryUsage.getValue() - memoryQuotaMB * COM_ONE_MEG ;
-
-     if ( delta > 0 ) {
-        const PhysicalProperty* const phyProp = getPhysicalProperty();
-        Lng32 pipelines = 1;
-   
-        if (phyProp)
-        {
-          PartitioningFunction * partFunc = 
-                   phyProp -> getPartitioningFunction() ;
-   
-          if ( partFunc )
-             pipelines = partFunc -> getCountOfPartitions();
-        }
-   
-   
-        return delta * pipelines;
-     } 
-  } 
-
-  return 0;
-
+  estMemPerNode =  totalHashTableMemory / MINOF(MAXOF(gpClusterInfo->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerInst =  totalHashTableMemory / numOfStreams;
+  NABoolean isPartialGroupBy = (isAPartialGroupByNonLeaf() ||
+                                isAPartialGroupByLeaf());
+  if (isPartialGroupBy)
+  {
+     estMemPerNode = 1024;
+     estMemPerInst = 1024;
+  }
+  OperBMOQuota *operBMOQuota = new (generator->wHeap()) OperBMOQuota(getKey(), numOfStreams, 
+                                                  estMemPerNode, estMemPerInst, childRowCount, maxCard);
+  generator->getBMOQuotaMap()->insert(operBMOQuota);
+  if (perNode)
+     return estMemPerNode;
+  else
+     return estMemPerInst; 
 }
 
 /////////////////////////////////////////////////////////
diff --git a/core/sql/generator/GenRelJoin.cpp b/core/sql/generator/GenRelJoin.cpp
index 245b838..cc97b1d 100644
--- a/core/sql/generator/GenRelJoin.cpp
+++ b/core/sql/generator/GenRelJoin.cpp
@@ -1784,7 +1784,7 @@
   double memQuota = 0;
   double memQuotaRatio;
   Lng32 numStreams;
-  double bmoMemoryUsagePerNode = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
+  double bmoMemoryUsagePerNode = generator->getEstMemPerNode(getKey(), numStreams);
   if (mmu != 0) {
     memQuota = mmu;
     hashj_tdb->setMemoryQuotaMB(mmu);
@@ -1825,11 +1825,7 @@
   if (beforeJoinPredOnOuterOnly())
     hashj_tdb->setBeforePredOnOuterOnly();
 
-  generator->addToTotalOverflowMemory(
-                      getEstimatedRunTimeOverflowSize(memQuota)
-                                     );
-
-  double hjMemEst = getEstimatedRunTimeMemoryUsage(hashj_tdb);
+  double hjMemEst = generator->getEstMemPerInst(getKey());
   hashj_tdb->setEstimatedMemoryUsage(hjMemEst / 1024);
   generator->addToTotalEstimatedMemory(hjMemEst);
 
@@ -2059,17 +2055,23 @@
 }
 
 
-CostScalar HashJoin::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar HashJoin::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
   GroupAttributes * childGroupAttr = child(1).getGroupAttr();
   const CostScalar childRecordSize = childGroupAttr->getCharacteristicOutputs().getRowLength();
   const CostScalar childRowCount = child(1).getPtr()->getEstRowsUsed();
+  //TODO: Line below dumps core at times 
+  //const CostScalar maxCard = childGroupAttr->getResultMaxCardinalityForEmptyInput();
+  const CostScalar maxCard = 0;
   // Each record also uses a header (HashRow) in memory (8 bytes for 32bit).
   // Hash tables also take memory -- they are about %50 longer than the 
   // number of entries.
   const ULng32 
     memOverheadPerRecord = sizeof(HashRow) + sizeof(HashTableHeader) * 3 / 2 ;
 
+  CostScalar estMemPerNode;
+  CostScalar estMemPerInst;
+
   CostScalar totalHashTableMemory = 
     childRowCount * (childRecordSize + memOverheadPerRecord);
   // one buffer for the outer table
@@ -2089,57 +2091,15 @@
   }
   if (numStreams != NULL)
      *numStreams = numOfStreams;
-  if (perNode) 
-     totalHashTableMemory /= MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerNode = totalHashTableMemory / MINOF(MAXOF(gpClusterInfo->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerInst = totalHashTableMemory / numOfStreams;
+  OperBMOQuota *operBMOQuota = new (generator->wHeap()) OperBMOQuota(getKey(), numOfStreams,         
+                                                  estMemPerNode, estMemPerInst, childRowCount, maxCard);
+  generator->getBMOQuotaMap()->insert(operBMOQuota);
+  if (perNode)
+     return estMemPerNode;
   else
-     totalHashTableMemory /= numOfStreams;
-  return totalHashTableMemory;
-}
-
-double HashJoin::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
-{
-  Lng32 numOfStreams = 1;
-  CostScalar totalHashTableMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
-  totalHashTableMemory *= numOfStreams ;
-  return totalHashTableMemory.value();
-}
-
-double HashJoin::getEstimatedRunTimeOverflowSize(double memoryQuotaMB)
-{
-  // Setup overflow size for join with formula ov = ((s0-m)/s0)*(s0+s1), where
-  // s0 = size of child0, s1 = size of child1 and m the memory quota for NJ
-  //
-  if ( memoryQuotaMB > 0 ) {
-
-     GroupAttributes * c0 = child(0).getGroupAttr();
-     double c0RLen = c0->getCharacteristicOutputs().getRowLength();
-     double c0Rows = (child(0).getPtr()->getEstRowsUsed()).getValue();
-
-     GroupAttributes * c1 = child(1).getGroupAttr();
-     double c1RLen = c1->getCharacteristicOutputs().getRowLength();
-     double c1Rows = (child(1).getPtr()->getEstRowsUsed()).getValue();
-
-     double s0 = c0RLen * c0Rows;
-     double s1 = c1RLen * c1Rows;
-
-     Lng32 pipelines = 1;
-     const PhysicalProperty* const phyProp = getPhysicalProperty() ;
-     if (phyProp)
-     {
-       PartitioningFunction * partFunc = phyProp -> getPartitioningFunction() ;
-       if ( partFunc )
-          pipelines = partFunc->getCountOfPartitions();
-     }
-
-     double delta = s1 / pipelines - memoryQuotaMB * COM_ONE_MEG ;
-     if ( delta > 0 ) {
-       double ov = ((delta / s1) * (s0 + s1)) * pipelines;
-       return ov;
-     }
-
-  } 
-   
-  return 0;
+     return estMemPerInst; 
 }
 
 // NABoolean HashJoin::canUseUniqueHashJoin()
@@ -3072,7 +3032,7 @@
   UInt16 quotaMB = 0;
   Lng32 numStreams;
   double memQuotaRatio;
-  double bmoMemoryUsage = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
+  double bmoMemoryUsage = generator->getEstMemPerNode(getKey(), numStreams);
 
   NADefaults &defs = ActiveSchemaDB()->getDefaults();
   if ( CmpCommon::getDefaultLong(MJ_BMO_QUOTA_PERCENT) != 0) 
diff --git a/core/sql/generator/GenRelMisc.cpp b/core/sql/generator/GenRelMisc.cpp
index 160ff0f..737bf4e 100644
--- a/core/sql/generator/GenRelMisc.cpp
+++ b/core/sql/generator/GenRelMisc.cpp
@@ -2912,7 +2912,7 @@
   compilerStatsInfo->collectStatsType() = generator->collectStatsType();
   compilerStatsInfo->udr() = noOfUdrs;
   compilerStatsInfo->ofMode() = generator->getOverflowMode();
-  compilerStatsInfo->ofSize() = generator->getTotalOverflowMemory();
+  compilerStatsInfo->ofSize() = 0;
   compilerStatsInfo->bmo() = generator->getTotalNumBMOs();
   compilerStatsInfo->queryType() = (Int16)root_tdb->getQueryType();
   compilerStatsInfo->subqueryType() = (Int16)root_tdb->getSubqueryType();
@@ -3061,7 +3061,7 @@
   short memoryQuotaMB = 0;
   double memoryQuotaRatio;
   Lng32 numStreams;
-  double bmoMemoryUsagePerNode = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
+  double bmoMemoryUsagePerNode = generator->getEstMemPerNode(getKey(), numStreams);
 
   if (CmpCommon::getDefault(SORT_MEMORY_QUOTA_SYSTEM) != DF_OFF)
   {
@@ -3172,13 +3172,10 @@
 
   generator->initTdbFields(sort_tdb);
 
-  double sortMemEst = getEstimatedRunTimeMemoryUsage(sort_tdb);
+  double sortMemEst = generator->getEstMemPerInst(getKey());
   sort_tdb->setEstimatedMemoryUsage(sortMemEst / 1024);
   generator->addToTotalEstimatedMemory(sortMemEst);
 
-  generator->addToTotalOverflowMemory(
-         getEstimatedRunTimeOverflowSize(memoryQuotaMB));
-
   if (sortPrefixKeyLen > 0)
     ((ComTdbSort *)sort_tdb)->setPartialSort(TRUE);  // do partial sort
 
@@ -3853,35 +3850,7 @@
   return rc;
 }
 
-double Sort::getEstimatedRunTimeOverflowSize(double memoryQuotaMB)
-{
-   if ( memoryQuotaMB > 0 ) {
-     CostScalar memoryUsage = getEstimatedRunTimeMemoryUsage(TRUE /*per CPU*/);
-
-     double delta = memoryUsage.getValue() - memoryQuotaMB * COM_ONE_MEG ;
-
-     if ( delta > 0 )  {
-
-        const PhysicalProperty* const phyProp = getPhysicalProperty();
-        Lng32 pipelines = 1;
-   
-        if (phyProp != NULL)
-        {
-          PartitioningFunction * partFunc = 
-                 phyProp -> getPartitioningFunction() ;
-   
-          if ( partFunc )
-             pipelines = partFunc->getCountOfPartitions();
-        }
-   
-        return delta * pipelines;
-     }
-  }
-     
-  return 0;
-}
-
-CostScalar Sort::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar Sort::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
   GroupAttributes * childGroupAttr = child(0).getGroupAttr();
   Lng32 childRecordSize = 
@@ -3893,6 +3862,12 @@
   else
      rowsUsed = getEstRowsUsed();
   CostScalar totalMemory = rowsUsed * childRecordSize;
+  CostScalar estMemPerNode;
+  CostScalar estMemPerInst;
+ 
+  //TODO: Line below dumps core at times 
+  //const CostScalar maxCard = childGroupAttr->getResultMaxCardinalityForEmptyInput();
+  const CostScalar maxCard = 0;
 
   Lng32 numOfStreams = 1;
   const PhysicalProperty* const phyProp = getPhysicalProperty();
@@ -3905,22 +3880,17 @@
   }
   if (numStreams != NULL)
      *numStreams = numOfStreams;
-  if (perNode) 
-      totalMemory /= MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerNode = totalMemory / MINOF(MAXOF(gpClusterInfo->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerInst = totalMemory / numOfStreams;
+  OperBMOQuota *operBMOQuota = new (generator->wHeap()) OperBMOQuota(getKey(), numOfStreams,
+                                                  estMemPerNode, estMemPerInst, rowsUsed, maxCard);
+  generator->getBMOQuotaMap()->insert(operBMOQuota);
+  if (perNode)
+     return estMemPerNode;
   else
-      totalMemory /= numOfStreams;
-  return totalMemory;
+     return estMemPerInst; 
 }
 
-double Sort::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
-{
-  Lng32 numOfStreams = 1;
-  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
-  totalMemory = totalMemory * numOfStreams ;
-  return totalMemory.value();
-}
-
-
 /////////////////////////////////////////////////////////
 //
 // Tuple::codeGen()
@@ -4044,7 +4014,16 @@
 	    tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
             setInUpdateOrInsert(bindWA, NULL);
 	    childNode = tmpAssign->getSource().getItemExpr();
-
+            //don't allow LOB insert in a tuple list
+            if (childNode->getOperatorType() == ITM_LOBINSERT)
+              {                                                          
+                // cannot have this function in a values list with
+                // multiple tuples. Use a single tuple.
+                *CmpCommon::diags() << DgSqlCode(-4483);
+                GenExit();
+                return -1;
+                        
+              }
             castNode->child(0) = childNode;
           }
           else
diff --git a/core/sql/generator/GenRelSequence.cpp b/core/sql/generator/GenRelSequence.cpp
index 39a713e..41bfc20 100644
--- a/core/sql/generator/GenRelSequence.cpp
+++ b/core/sql/generator/GenRelSequence.cpp
@@ -778,10 +778,10 @@
     
     if ((ActiveSchemaDB()->getDefaults()).
 	getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB) > 0)
-      generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(TRUE));
+      generator->incrBMOsMemory(getEstimatedRunTimeMemoryUsage(generator, TRUE));
   }
   else
-    generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(TRUE));
+    generator->incrNBMOsMemoryPerNode(getEstimatedRunTimeMemoryUsage(generator, TRUE));
 
   markAsPreCodeGenned();
 
@@ -1074,7 +1074,7 @@
   // update the estimated value of HistoryRowLength with actual value
   //setEstHistoryRowLength(historyIds.getRowLength());
 
-  double sequenceMemEst = getEstimatedRunTimeMemoryUsage(sequenceTdb);
+  double sequenceMemEst = generator->getEstMemPerInst(getKey());
   generator->addToTotalEstimatedMemory(sequenceMemEst);
 
   if(!generator->explainDisabled()) {
@@ -1104,7 +1104,7 @@
   UInt16 mmu = (UInt16)(defs.getAsDouble(EXE_MEM_LIMIT_PER_BMO_IN_MB));
   UInt16 numBMOsInFrag = (UInt16)generator->getFragmentDir()->getNumBMOs();
   Lng32 numStreams;
-  double bmoMemoryUsagePerNode = getEstimatedRunTimeMemoryUsage(TRUE, &numStreams).value();
+  double bmoMemoryUsagePerNode = generator->getEstMemPerNode(getKey(), numStreams);
   double memQuota = 0;
   double memQuotaRatio;
   if (mmu != 0)
@@ -1282,22 +1282,28 @@
 }
 
 
-CostScalar PhysSequence::getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams)
+CostScalar PhysSequence::getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams)
 {
   // input param is not used as this operator does not participate in the
   // quota system.
 
   ValueIdSet outputFromChild = child(0).getGroupAttr()->getCharacteristicOutputs();
+  //TODO: Line below dumps core at times 
+  //const CostScalar maxCard = child(0).getGroupAttr()->getResultMaxCardinalityForEmptyInput();
+  const CostScalar maxCard = 0;
+  const CostScalar rowCount = numHistoryRows();
 
   //ValueIdSet historyIds;
   //getHistoryAttributes(sequenceFunctions(),outputFromChild, historyIds);
   //historyIds += sequenceFunctions();
   const Lng32 historyBufferWidthInBytes = getEstHistoryRowLength(); //historyIds.getRowLength();
-  const double historyBufferSizeInBytes = numHistoryRows() * 
+  const double historyBufferSizeInBytes = rowCount.value() * 
                                             historyBufferWidthInBytes;
 
   // totalMemory is per CPU at this point of time.
   double totalMemory = historyBufferSizeInBytes;
+  CostScalar estMemPerNode;
+  CostScalar estMemPerInst;
 
   const PhysicalProperty* const phyProp = getPhysicalProperty();
   Lng32 numOfStreams = 1;
@@ -1312,19 +1318,15 @@
   }
   if (numStreams != NULL)
      *numStreams = numOfStreams;
-  if (perNode) 
-     totalMemory /= MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerNode = totalMemory /= MINOF(MAXOF(gpClusterInfo->getTotalNumberOfCPUs(), 1), numOfStreams);
+  estMemPerInst = totalMemory /= numOfStreams;
+  OperBMOQuota *operBMOQuota = new (generator->wHeap()) OperBMOQuota(getKey(), numOfStreams,         
+                                                  estMemPerNode, estMemPerInst, rowCount, maxCard);
+  generator->getBMOQuotaMap()->insert(operBMOQuota);
+  if (perNode)
+     return estMemPerNode;
   else
-     totalMemory /= numOfStreams;
-  return totalMemory;
-}
-
-double PhysSequence::getEstimatedRunTimeMemoryUsage(ComTdb * tdb)
-{
-  Lng32 numOfStreams = 1;
-  CostScalar totalMemory = getEstimatedRunTimeMemoryUsage(FALSE, &numOfStreams);
-  totalMemory = totalMemory * numOfStreams ;
-  return totalMemory.value();
+     return estMemPerInst; 
 }
 
 ExplainTuple*
diff --git a/core/sql/generator/GenRelUpdate.cpp b/core/sql/generator/GenRelUpdate.cpp
index 422e532..e0a8d38 100644
--- a/core/sql/generator/GenRelUpdate.cpp
+++ b/core/sql/generator/GenRelUpdate.cpp
@@ -2515,8 +2515,10 @@
       FALSE,                                 // [IN] add convert nodes?
       1,                                     // [IN] target atp number (work atp 1)
       loggingTuppIndex,                      // [IN] target tupp index
-      tupleFormat,                           // [IN] target tuple data format
-      loggingRowLen,                          // [OUT] target tuple length
+      // The target format should be exploded format always because the column delimiter
+      // added during execution assumes exploded format
+      ExpTupleDesc::SQLARK_EXPLODED_FORMAT,  // [IN] target tuple data format 
+      loggingRowLen,                         // [OUT] target tuple length
       &loggingDataExpr,                      // [OUT] move expression
       &loggingDataTupleDesc,                 // [optional OUT] target tuple desc
       ExpTupleDesc::LONG_FORMAT              // [optional IN] target desc format
diff --git a/core/sql/generator/Generator.cpp b/core/sql/generator/Generator.cpp
index 3f061b8..20f799a 100644
--- a/core/sql/generator/Generator.cpp
+++ b/core/sql/generator/Generator.cpp
@@ -104,6 +104,7 @@
     ,insertNodesList_(wHeap())  
     ,avgVarCharSizeList_(wHeap())  
     ,trafSimTableInfoList_(wHeap())
+    ,bmoQuotaMap_(wHeap())
 {
   // nothing generated yet.
   genObj = 0;
@@ -163,24 +164,11 @@
   tempSpace_ = NULL;
 
   numBMOs_ = 0;
-  totalNumBMOsPerNode_ = 0;
 
-  BMOsMemoryPerFrag_ = 0;
   totalBMOsMemoryPerNode_ = 0;
 
-  nBMOsMemoryPerNode_ = 0;
-
   BMOsMemoryLimitPerNode_ = 0;
 
-  totalNumBMOsPerNode_ = 0;
-
-  BMOsMemoryPerFrag_ = 0;
-  totalBMOsMemoryPerNode_ = 0;
-
-  nBMOsMemoryPerNode_ = 0;
-
-  BMOsMemoryLimitPerNode_ = 0;
-  
   totalNumBMOs_ = 0;
 
   numESPs_ = 1;
@@ -208,7 +196,6 @@
   largeQueueSize_ = 0;
 
   totalEstimatedMemory_ = 0.0;
-  totalOverflowMemory_ = 0.0;
   operEstimatedMemory_ = 0;
 
   maxCpuUsage_ = 0;
@@ -564,7 +551,7 @@
     disableExplain();
 
   foundAnUpdate_ = FALSE;
-
+  
   // walk through the tree of RelExpr and ItemExpr objects, generating
   // ComTdb, ex_expr and their relatives
   expr_node->codeGen(this);
@@ -3386,3 +3373,90 @@
     hbpa->setDopParallelScanner(CmpCommon::getDefaultNumeric(HBASE_DOP_PARALLEL_SCANNER));
 }
 
+double Generator::getEstMemPerNode(NAString *key, Lng32 &numStreams)
+{
+  OperBMOQuota *operBMOQuota = bmoQuotaMap_.get(key); 
+  if (operBMOQuota != NULL) {
+     numStreams = operBMOQuota->getNumStreams();
+     return operBMOQuota->getEstMemPerNode();
+  } else {
+     numStreams = 0;
+     return 0;
+  }
+}
+
+double Generator::getEstMemForTdb(NAString *key)
+{
+  OperBMOQuota *operBMOQuota = bmoQuotaMap_.get(key); 
+  if (operBMOQuota != NULL) 
+     return operBMOQuota->getEstMemForTdb();
+  else
+     return 0;
+}
+
+double Generator::getEstMemPerInst(NAString *key)
+{
+  OperBMOQuota *operBMOQuota = bmoQuotaMap_.get(key); 
+  if (operBMOQuota != NULL) 
+     return operBMOQuota->getEstMemPerInst();
+  else
+     return 0;
+}
+
+void Generator::finetuneBMOEstimates()
+{
+   if (bmoQuotaMap_.entries() == 1)
+      return;
+   double bmoMemoryLimitPerNode = ActiveSchemaDB()->getDefaults().getAsDouble(BMO_MEMORY_LIMIT_PER_NODE_IN_MB);
+   if (bmoMemoryLimitPerNode == 0)
+      return;
+   NAHashDictionaryIterator<NAString, OperBMOQuota> iter (bmoQuotaMap_) ;
+
+   double capMemoryRatio = ActiveSchemaDB()->getDefaults().getAsDouble(BMO_MEMORY_ESTIMATE_RATIO_CAP);
+   double bmoMemoryEstOutlier = 
+      ActiveSchemaDB()->getDefaults().getAsDouble(BMO_MEMORY_ESTIMATE_OUTLIER_FACTOR) * bmoMemoryLimitPerNode * 1024 * 1024;
+
+   double totalEstMemPerNode = totalBMOsMemoryPerNode_.value();
+   double bmoMemoryRatio;
+   double calcTotalEstMemPerNode = 0;
+   double calcOperEstMemPerNode;
+
+   NAString* key;
+   OperBMOQuota *operBMOQuota;
+   // Find the outliers and set it to the tolerable value first
+   iter.reset(); 
+   iter.getNext(key,operBMOQuota);
+   while(key) {
+     calcOperEstMemPerNode = operBMOQuota->getEstMemPerNode();
+     if (calcOperEstMemPerNode > bmoMemoryEstOutlier) {
+        operBMOQuota->setEstMemPerNode(bmoMemoryEstOutlier);
+        calcTotalEstMemPerNode += bmoMemoryEstOutlier;
+     }
+     else 
+       calcTotalEstMemPerNode += calcOperEstMemPerNode;
+     iter.getNext(key,operBMOQuota);
+   }
+   totalBMOsMemoryPerNode_ = calcTotalEstMemPerNode;   
+   
+   // Then check for the CAP to adjust it again
+   totalEstMemPerNode = totalBMOsMemoryPerNode_.value();
+   calcTotalEstMemPerNode = 0;
+   iter.reset();
+   iter.getNext(key,operBMOQuota);
+   while(key) {
+     calcOperEstMemPerNode = operBMOQuota->getEstMemPerNode();
+     bmoMemoryRatio = calcOperEstMemPerNode / totalEstMemPerNode;
+     if (capMemoryRatio > 0 && capMemoryRatio <=1 && bmoMemoryRatio > capMemoryRatio) {
+        bmoMemoryRatio = capMemoryRatio;
+        calcOperEstMemPerNode = bmoMemoryRatio * calcOperEstMemPerNode;
+        operBMOQuota->setEstMemPerNode(calcOperEstMemPerNode);
+        calcTotalEstMemPerNode += calcOperEstMemPerNode;
+     }
+     else
+        calcTotalEstMemPerNode += calcOperEstMemPerNode;
+     iter.getNext(key,operBMOQuota);
+   }
+   totalBMOsMemoryPerNode_ = calcTotalEstMemPerNode;   
+}
+
+
diff --git a/core/sql/generator/Generator.h b/core/sql/generator/Generator.h
index b398703..e4524a8 100644
--- a/core/sql/generator/Generator.h
+++ b/core/sql/generator/Generator.h
@@ -72,6 +72,7 @@
 class Attributes;
 class DP2Insert;
 class TrafSimilarityTableInfo;
+class OperBMOQuota;
 
 // this define is used to raise assertion in generator.
 // Calls GeneratorAbort which does a longjmp out of the calling scope.
@@ -86,6 +87,15 @@
     double avgSize;
   };
 
+
+class XBMOQuotaMap : public NAKeyLookup<NAString, OperBMOQuota>
+{
+public:
+   XBMOQuotaMap(CollHeap *heap)
+    : NAKeyLookup<NAString, OperBMOQuota>(10, NAKeyLookupEnums::KEY_INSIDE_VALUE, heap)
+   {}
+};
+
 //////////////////////////////////////////////////////////////////////////
 // class Generator
 //////////////////////////////////////////////////////////////////////////
@@ -425,9 +435,7 @@
 
   // temporary value holder (during pre code gen) for #BMOs in this fragment
   unsigned short numBMOs_;
-  unsigned short totalNumBMOsPerNode_; // accumulated # of BMO, per Node
 
-  CostScalar BMOsMemoryPerFrag_; // accumulated BMO memory, per fragment 
   CostScalar totalBMOsMemoryPerNode_; // accumulated BMO memory, per Node
 
   CostScalar nBMOsMemoryPerNode_; // accumulated nBMO memory, per Node
@@ -555,9 +563,6 @@
   // total estimated memory used by BMOs and certain other operators in bytes
   double totalEstimatedMemory_ ;
 
-   // total overflowed memory used by Sort, HashGroupBy and HashJoin in bytes
-  double totalOverflowMemory_ ;
-
   // estimated memory for an individual operator. Used by Explain
   // set to 0 after Explain has been called so that next operator
   // can used this field. In KB and on a per Node basis.
@@ -628,6 +633,7 @@
     return FALSE;
   };
 
+  XBMOQuotaMap bmoQuotaMap_;
 public:
   enum cri_desc_type {
     UP, DOWN
@@ -1460,25 +1466,13 @@
 			      ItemExpr * childNode0, ItemExpr * childNode1,
 			      ComDiagsArea * diagsArea);
 
-  inline CostScalar getBMOsMemory() { return BMOsMemoryPerFrag_; }
-
   inline void incrBMOsMemory(CostScalar x) 
-     { incrBMOsMemoryPerFrag(x); totalBMOsMemoryPerNode_ += x; }
+     { totalBMOsMemoryPerNode_ += x; }
 
-  inline void incrBMOsMemoryPerFrag(CostScalar x) 
-     { BMOsMemoryPerFrag_ += x;  }
-
-  inline CostScalar replaceBMOsMemoryUsage(CostScalar newVal)
-  {
-    CostScalar retVal = BMOsMemoryPerFrag_;
-    BMOsMemoryPerFrag_ = newVal;
-    return retVal;
-  }
   inline CostScalar getTotalBMOsMemoryPerNode() 
-                 { return totalBMOsMemoryPerNode_; }
-
+           { return totalBMOsMemoryPerNode_; }
   inline void incrNumBMOs() 
-     {  incrNumBMOsPerFrag(1);  totalNumBMOsPerNode_++; totalNumBMOs_++;}
+     {  incrNumBMOsPerFrag(1);  totalNumBMOs_++;}
 
   inline void incrNumBMOsPerFrag(UInt32 x) { numBMOs_ += x; }
 
@@ -1488,11 +1482,10 @@
     numBMOs_ = newVal;
     return retVal;
   }
-  inline unsigned short getTotalNumBMOsPerNode() { return totalNumBMOsPerNode_; }
-  
+ 
   inline CostScalar getTotalNBMOsMemoryPerNode() { return nBMOsMemoryPerNode_; }
   inline void incrNBMOsMemoryPerNode(CostScalar x) { nBMOsMemoryPerNode_ += x; }
-
+ 
   inline void setBMOsMemoryLimitPerNode(CostScalar x) 
             { BMOsMemoryLimitPerNode_ = x; }
 
@@ -1605,10 +1598,6 @@
   inline short getMaxCpuUsage(){return maxCpuUsage_;}
   inline void setMaxCpuUsage(short val){maxCpuUsage_ = val;}
 
-  inline double getTotalOverflowMemory(){return totalOverflowMemory_;}
-  inline void addToTotalOverflowMemory(double val)
-        {totalOverflowMemory_ += val;}
-
   inline ComTdb::OverflowModeType getOverflowMode() {return overflowMode_; }
 
   // Each of these two mutators return the old value
@@ -1692,7 +1681,11 @@
   inline void setTopNRows(ULng32 topNRows) 
      { topNRows_ = topNRows; }
   inline ULng32 getTopNRows() { return topNRows_; }
-        
+  inline XBMOQuotaMap *getBMOQuotaMap() { return &bmoQuotaMap_; }      
+  double getEstMemPerNode(NAString *key, Lng32 &numStreams);
+  double getEstMemForTdb(NAString *key);
+  double getEstMemPerInst(NAString *key);
+  void finetuneBMOEstimates();
 }; // class Generator
 
 class GenOperSimilarityInfo : public NABasicObject
@@ -1731,6 +1724,46 @@
   UInt32 flags_;
 };
 
+
+class OperBMOQuota : public NABasicObject
+{
+public: 
+   OperBMOQuota(NAString *operAddr, Int32 numStreams, CostScalar estMemPerNode, CostScalar estMemPerInst,
+                CostScalar estRowsUsed, CostScalar maxCard) :
+     operAddr_(operAddr) 
+   , numStreams_(numStreams)
+   , estMemPerNode_(estMemPerNode)
+   , estMemPerInst_(estMemPerInst)
+   , estRowsUsed_(estRowsUsed)
+   , maxCard_(maxCard) 
+   , ignoreEstimate_(FALSE)
+   , origEstMemPerNode_(estMemPerNode)
+   { 
+     //weight_ = (estRowsUsed_ / maxCard_).value();
+     weight_ = 0;
+   }
+   const NAString *getKey() const {return operAddr_; }
+   inline Int32 getNumStreams() { return numStreams_; }
+   inline double getEstMemPerNode() { return estMemPerNode_.value(); }
+   inline double getEstMemPerInst() { return estMemPerInst_.value(); }
+   inline double getEstMemForTdb() { return estMemPerInst_.value() * numStreams_; }
+   inline void setIgnoreEstimate() { ignoreEstimate_ = TRUE; } 
+   NABoolean operator==(const OperBMOQuota &other) const
+                                        { return this == &other; }
+   inline void setEstMemPerNode(double estMemPerNode) { estMemPerNode_ = estMemPerNode; }
+
+private:
+   const NAString *operAddr_;
+   Int32 numStreams_;
+   CostScalar estMemPerNode_; 
+   CostScalar estMemPerInst_;
+   CostScalar estRowsUsed_;
+   CostScalar maxCard_;
+   CostScalar origEstMemPerNode_;
+   double weight_;
+   NABoolean ignoreEstimate_; 
+};
+
 // Get table and index filename
 extern const NAString GenGetQualifiedName(const CorrName&,
 					  NABoolean formatForDisplay = FALSE);
@@ -1784,5 +1817,6 @@
   return explainTuple_;
 }
 
+
 #endif
 
diff --git a/core/sql/langman/Measure.cpp b/core/sql/langman/Measure.cpp
index 9dfe206..3d57f31 100644
--- a/core/sql/langman/Measure.cpp
+++ b/core/sql/langman/Measure.cpp
@@ -57,7 +57,7 @@
   char fileName[256];
   char *logFileEnv = 0;
   if((logFileEnv = getenv("MEASURE_LOG_FILE")) != NULL){
-    sprintf(fileName, "%s.%d", pid);
+    sprintf(fileName, "%s.%d", logFileEnv, pid);
     logFile = fopen(fileName, "w+t");
     if(logFile == 0){
       logFile = stderr;
diff --git a/core/sql/nskgmake/executor/Makefile b/core/sql/nskgmake/executor/Makefile
index 1b85950..ca487fb 100755
--- a/core/sql/nskgmake/executor/Makefile
+++ b/core/sql/nskgmake/executor/Makefile
@@ -130,8 +130,5 @@
 	ExSMReadyList.cpp \
 	ExFastTransport.cpp
 
-ifneq ($(SP_DIS),)
-EXTERN_LIBS := $(SP_EXPORT_LIB)/libwrappersq.so
-endif
 SYS_LIBS := -lrt -lpthread
 SRCPATH := bin executor runtimestats porting_layer qmscommon
diff --git a/core/sql/nskgmake/tdm_sqlmxevents/Makefile b/core/sql/nskgmake/tdm_sqlmxevents/Makefile
index eb395d1..ca35880 100755
--- a/core/sql/nskgmake/tdm_sqlmxevents/Makefile
+++ b/core/sql/nskgmake/tdm_sqlmxevents/Makefile
@@ -29,6 +29,5 @@
 
 ifeq ($(TARGTYPE),linux)
 EXTERN_LIBS := $(XMPIROOT)/libevlsq.so
-#EXTERN_LIBS += $(SP_EXPORT_LIB)/libwrappersq.so
 CPPSRC += vers_libtdm_sqlmxevents.cpp
 endif
diff --git a/core/sql/optimizer/BindItemExpr.cpp b/core/sql/optimizer/BindItemExpr.cpp
index dcd6897..3b71061 100644
--- a/core/sql/optimizer/BindItemExpr.cpp
+++ b/core/sql/optimizer/BindItemExpr.cpp
@@ -3654,12 +3654,10 @@
         else if (convType == 2)
           {
             Parser parser(bindWA->currentCmpContext());
-            char buf[1000];
+            char buf[128];
             
-            // right justify the string representation of numeric operand 
-            // and then do the concat
-            sprintf(buf, "CAST(SPACE(%d - CHAR_LENGTH(CAST(@A1 AS VARCHAR(%d)))) || CAST(@A1 AS VARCHAR(%d)) AS VARCHAR(%d))",
-                    dLen, dLen, dLen, dLen);
+            sprintf(buf, "CAST(CAST(@A1 AS VARCHAR(%d)) AS VARCHAR(%d))",
+                    dLen, dLen);
             newChild = 
               parser.getItemExprTree(buf, strlen(buf), BINDITEMEXPR_STMTCHARSET, 1, child(srcChildIndex));
             
@@ -9496,7 +9494,6 @@
       // track the size of this object.  Otherwise we might use the context heap.
       const Lng32 size = 16 * 1024;  // The initial size
       routineHeap = new CTXTHEAP NAHeap("NARoutine Heap", (NAHeap *)CTXTHEAP, size);
-      routineHeap->setJmpBuf(CmpInternalErrorJmpBufPtr);
     }
     // If not caching, put NARoutine on statement heap.
     else routineHeap=CmpCommon::statementHeap(); 
diff --git a/core/sql/optimizer/BindRelExpr.cpp b/core/sql/optimizer/BindRelExpr.cpp
index 527c029..53abc71 100644
--- a/core/sql/optimizer/BindRelExpr.cpp
+++ b/core/sql/optimizer/BindRelExpr.cpp
@@ -1354,7 +1354,6 @@
        const Lng32 size = 16 * 1024;  // The initial size
        routineHeap = new CTXTHEAP NAHeap("NARoutine Heap", (NAHeap *)CTXTHEAP, 
                                          size);
-       routineHeap->setJmpBuf(CmpInternalErrorJmpBufPtr);
      }
      else 
        routineHeap=CmpCommon::statementHeap(); 
diff --git a/core/sql/optimizer/ItemExpr.cpp b/core/sql/optimizer/ItemExpr.cpp
index 732dfde..c791a92 100644
--- a/core/sql/optimizer/ItemExpr.cpp
+++ b/core/sql/optimizer/ItemExpr.cpp
@@ -12828,18 +12828,6 @@
   return LOBoper::copyTopNode(result, outHeap);
 }
 
-ItemExpr * LOBload::copyTopNode(ItemExpr *derivedNode, CollHeap* outHeap)
-{
-  ItemExpr *result;
-
-  if (derivedNode == NULL)
-    result = new (outHeap) LOBload(NULL, obj_);
-  else
-    result = derivedNode;
-
-  return LOBinsert::copyTopNode(result, outHeap);
-}
-
 ItemExpr * LOBextract::copyTopNode(ItemExpr *derivedNode, CollHeap* outHeap)
 {
   ItemExpr *result;
diff --git a/core/sql/optimizer/ItemFunc.h b/core/sql/optimizer/ItemFunc.h
index c60447d..0435521 100644
--- a/core/sql/optimizer/ItemFunc.h
+++ b/core/sql/optimizer/ItemFunc.h
@@ -3137,23 +3137,6 @@
  private:
 }; // class LOBconvertHandle
 
-class LOBload : public LOBinsert
-{
- public:
-  
- LOBload(ItemExpr *val1Ptr, ObjectType fromObj)
-   : LOBinsert(val1Ptr, NULL, fromObj, FALSE, ITM_LOBLOAD)
-    {};
-  
-  // copyTopNode method
-  virtual ItemExpr * copyTopNode(ItemExpr *derivedNode = NULL,
-				 CollHeap* outHeap = 0);
-  
-  // method to do code generation
-  virtual short codeGen(Generator*);
-
- private:
-}; // class LOBload
 
 class LOBextract : public LOBoper
 {
diff --git a/core/sql/optimizer/MJVIndexBuilder.cpp b/core/sql/optimizer/MJVIndexBuilder.cpp
index 3fe08fc..99ba778 100644
--- a/core/sql/optimizer/MJVIndexBuilder.cpp
+++ b/core/sql/optimizer/MJVIndexBuilder.cpp
@@ -534,7 +534,7 @@
   for (size_t i = 0; i < size; i++) {
     at(i)->out();
     if (i < size-1) {
-      printf(" --> ", i);
+      printf(" --> ");
     }
   }
 };
@@ -853,7 +853,7 @@
 void PrintColIndList (const IndexList& toPrint, char* name)
 {
   printf ("\n%s:\n",name);
-  printf ("--------------------------\n",name);
+  printf ("--------------------------\n");
   for (size_t i = 0; i < toPrint.entries(); i++) 
   {
     ColIndList currList = toPrint.at(i);	
diff --git a/core/sql/optimizer/OptimizerSimulator.cpp b/core/sql/optimizer/OptimizerSimulator.cpp
index 7b7fa38..53af84d 100644
--- a/core/sql/optimizer/OptimizerSimulator.cpp
+++ b/core/sql/optimizer/OptimizerSimulator.cpp
@@ -947,6 +947,18 @@
               CmpCommon::diags()->mergeAfter(*(cliInterface_->getDiagsArea()));
               raiseOsimException("drop external table: %d", retcode);
           }
+          //unregister hive table
+          NAString unregisterStmt = "UNREGISTER HIVE TABLE IF EXISTS ";
+          unregisterStmt += name;
+          debugMessage("%s\n", unregisterStmt.data());
+          retcode = executeFromMetaContext(unregisterStmt.data());
+          if(retcode < 0)
+          {
+              //suppress errors for now, even with IF EXISTS this will
+              //give an error if the Hive table does not exist
+              //CmpCommon::diags()->mergeAfter(*(cliInterface_->getDiagsArea()));
+              //raiseOsimException("unregister hive table: %d", retcode);
+          }
           //drop hive table
           NAString hiveSchemaName;
           qualName->getHiveSchemaName(hiveSchemaName);
@@ -1048,10 +1060,12 @@
     {
         int end = stmt.index('\n', begin);
         if(end > begin)
-        {
-            stmt.extract(begin, end-1, tmp);
-            return tmp.data();
-        }
+          end -= 1;
+        else
+          end = stmt.length()-1;
+
+        stmt.extract(begin, end, tmp);
+        return tmp.data();
     }
     return NULL;
 }
@@ -1156,7 +1170,14 @@
     while(readHiveStmt(hiveCreateExternalTableSql, statement, comment))
    {
         if(statement.length() > 0) {
-            debugMessage("%s\n", extractAsComment("CREATE EXTERNAL TABLE", statement));
+            // this could be a create external table or just a register table
+            // if this Hive table just has stats but no external table
+            const char *stmtText = extractAsComment("CREATE EXTERNAL TABLE", statement);
+
+            if (!stmtText)
+              stmtText = extractAsComment("REGISTER  HIVE TABLE", statement);
+            debugMessage("%s\n", stmtText);
+
             retcode = executeFromMetaContext(statement.data()); //create hive external table
             if(retcode < 0)
             {
@@ -2943,8 +2964,9 @@
             for (int i = 0; i < outQueue->numEntries(); i++) {
                 OutputInfo * vi = (OutputInfo*)outQueue->getNext();
                 char * ptr = vi->get(0);
-                //write "CREATE EXTERNAL TABLE" DDL to another file.
-                if(strstr(ptr, "CREATE EXTERNAL TABLE"))
+                //write "CREATE EXTERNAL TABLE" and "REGISTER" DDL to another file.
+                if(strstr(ptr, "CREATE EXTERNAL TABLE") ||
+                   strstr(ptr, "REGISTER /*INTERNAL*/ HIVE TABLE"))
                     inExtDDL = TRUE;
                 if(inExtDDL){
                     (*writeLogStreams_[HIVE_CREATE_EXTERNAL_TABLE]) << ptr << endl;
diff --git a/core/sql/optimizer/RelEnforcer.h b/core/sql/optimizer/RelEnforcer.h
index e6ebc12..752d1ec 100644
--- a/core/sql/optimizer/RelEnforcer.h
+++ b/core/sql/optimizer/RelEnforcer.h
@@ -188,10 +188,7 @@
   virtual NABoolean isBigMemoryOperator(const PlanWorkSpace* pws,
                                         const Lng32 planNumber);
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
-
-  virtual double getEstimatedRunTimeOverflowSize(double memoryQuotaMB);
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
 
   virtual PlanPriority computeOperatorPriority
     (const Context* context,
@@ -585,8 +582,8 @@
   inline void setBMOsMemoryUsage(CostScalar x) { BMOsMemoryUsage_ = x; }
   inline CostScalar getBMOsMemoryUsage() { return BMOsMemoryUsage_ ; }
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
+  virtual double getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb);
 
   void setExtractProducerFlag() { isExtractProducer_ = TRUE; }
   NABoolean getExtractProducerFlag() { return isExtractProducer_; }
diff --git a/core/sql/optimizer/RelExpr.cpp b/core/sql/optimizer/RelExpr.cpp
index 5a841c7..c9d3f83 100644
--- a/core/sql/optimizer/RelExpr.cpp
+++ b/core/sql/optimizer/RelExpr.cpp
@@ -288,6 +288,7 @@
   ,cachedResizeCIFRecord_(FALSE)
   ,dopReduced_(FALSE)
   ,originalExpr_(NULL)
+  ,operKey_(outHeap)
 {
 
   child_[0] = leftChild;
@@ -2042,12 +2043,6 @@
         variableMemLimit = (1-equalQuotaShareRatio) * exeMem;
      }
      double bmoMemoryRatio = bmoMemoryUsage / totalBMOsMemoryUsage;
-     double capMemoryRatio = 1; 
-     if (totalNumBMOs > 1) {
-        capMemoryRatio = ActiveSchemaDB()->getDefaults().getAsDouble(BMO_MEMORY_ESTIMATE_RATIO_CAP);
-        if (capMemoryRatio > 0 && capMemoryRatio <=1 && bmoMemoryRatio > capMemoryRatio)
-           bmoMemoryRatio = capMemoryRatio;
-     }
      bmoQuotaRatio = bmoMemoryRatio;
      double bmoMemoryQuotaPerNode = constMemQuota + (variableMemLimit * bmoMemoryRatio);
      double numInstancesPerNode = numStreams / MINOF(MAXOF(((NAClusterInfoLinux*)gpClusterInfo)->getTotalNumberOfCPUs(), 1), numStreams);
@@ -16321,3 +16316,14 @@
    return ch0RowCount;
 }
 
+NAString *RelExpr::getKey()
+{
+
+   if (operKey_.length() == 0)
+   {
+     char keyBuffer[30];
+     snprintf(keyBuffer, sizeof(keyBuffer), "%ld", (Int64)this);
+     operKey_ = keyBuffer;
+   }
+   return &operKey_;
+}
diff --git a/core/sql/optimizer/RelExpr.h b/core/sql/optimizer/RelExpr.h
index 90be900..44ed086 100644
--- a/core/sql/optimizer/RelExpr.h
+++ b/core/sql/optimizer/RelExpr.h
@@ -39,6 +39,7 @@
 
 #include "ObjectNames.h"
 #include "CmpContext.h"
+#include "CmpStatement.h"
 #include "RETDesc.h"
 #include "ValueDesc.h"
 #include "Rule.h"
@@ -1291,13 +1292,9 @@
   // ---------------------------------------------------------------------
   virtual NABoolean isBigMemoryOperator(const PlanWorkSpace* pws,
                                         const Lng32 planNumber);
-/*
-  virtual CostScalar getEstimatedRunTimeMemoryUsageInMB(NABoolean perNode) 
-      { return getEstimatedRunTimeMemoryUsage(perNode) / (1024*1024); }
-*/
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL) {return 0;}
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb) {return 0;}
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL) {return 0;}
+  virtual double getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb) {return 0;}
 
   inline NABoolean isinBlockStmt() const
                           { return isinBlockStmt_; }
@@ -1598,6 +1595,8 @@
   // remember the original here, e.g. to find VEG regions
   RelExpr *originalExpr_;
 
+  NAString operKey_;
+
 public:
 
   // begin: accessors & mutators for relexpr tracking info
@@ -1640,7 +1639,7 @@
 
   CostScalar getChild0Cardinality(const Context*);
 
-
+  NAString *getKey();
 }; // class RelExpr
 
 // -----------------------------------------------------------------------
diff --git a/core/sql/optimizer/RelFastTransport.cpp b/core/sql/optimizer/RelFastTransport.cpp
index 8773b5c..9981b6c 100644
--- a/core/sql/optimizer/RelFastTransport.cpp
+++ b/core/sql/optimizer/RelFastTransport.cpp
@@ -457,7 +457,7 @@
   return sppForMe ;
 };
 
-double PhysicalFastExtract::getEstimatedRunTimeMemoryUsage(ComTdb * tdb) 
+double PhysicalFastExtract::getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb) 
 {
 
 // The executor attempts to get buffers, each of size 1 MB. This memory
diff --git a/core/sql/optimizer/RelFastTransport.h b/core/sql/optimizer/RelFastTransport.h
index 4a4acb7..8edfb61 100644
--- a/core/sql/optimizer/RelFastTransport.h
+++ b/core/sql/optimizer/RelFastTransport.h
@@ -408,7 +408,7 @@
                                const ValueIdSet & externalInputs,
                                ValueIdSet &pulledNewInputs);
 
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb) ;
+  virtual double getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb) ;
 
   virtual short codeGen(Generator *);
   static NABoolean isSpecialChar(char * str , char & chr);
diff --git a/core/sql/optimizer/RelGrby.h b/core/sql/optimizer/RelGrby.h
index 28d0fbc..fc3e486 100644
--- a/core/sql/optimizer/RelGrby.h
+++ b/core/sql/optimizer/RelGrby.h
@@ -821,11 +821,8 @@
   virtual NABoolean isBigMemoryOperator(const PlanWorkSpace* pws,
                                         const Lng32 planNumber);
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
 
-  virtual double getEstimatedRunTimeOverflowSize(double memoryQuotaMB);
-    
   virtual PlanPriority computeOperatorPriority
     (const Context* context,
      PlanWorkSpace *pws=NULL,
diff --git a/core/sql/optimizer/RelJoin.h b/core/sql/optimizer/RelJoin.h
index 3643210..f2eda71 100644
--- a/core/sql/optimizer/RelJoin.h
+++ b/core/sql/optimizer/RelJoin.h
@@ -1631,10 +1631,7 @@
   virtual NABoolean isBigMemoryOperator(const PlanWorkSpace* pws,
                                         const Lng32 planNumber);
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
-
-  virtual double getEstimatedRunTimeOverflowSize(double memoryQuotaMB);
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
 
   inline ValueIdSet & checkInputValues() { return checkInputValues_;}
   inline ValueIdSet & moveInputValues()  { return moveInputValues_;}
diff --git a/core/sql/optimizer/RelProbeCache.h b/core/sql/optimizer/RelProbeCache.h
index bc79a3d..ac2f0f7 100644
--- a/core/sql/optimizer/RelProbeCache.h
+++ b/core/sql/optimizer/RelProbeCache.h
@@ -74,8 +74,8 @@
 
   virtual short codeGen(Generator *g);
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
+  virtual double getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb);
 
   virtual const NAString getText() const;
 
diff --git a/core/sql/optimizer/RelRoutine.cpp b/core/sql/optimizer/RelRoutine.cpp
index da660ab..7853c79 100644
--- a/core/sql/optimizer/RelRoutine.cpp
+++ b/core/sql/optimizer/RelRoutine.cpp
@@ -1186,7 +1186,7 @@
 // -----------------------------------------------------------------------
 // methods for class PhysicalTableMappingUDF
 // -----------------------------------------------------------------------
-double PhysicalTableMappingUDF::getEstimatedRunTimeMemoryUsage(ComTdb * tdb) {return 0;}
+double PhysicalTableMappingUDF::getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb) {return 0;}
 
 RelExpr * PhysicalTableMappingUDF::copyTopNode(RelExpr *derivedNode,
                                                CollHeap* outHeap)
diff --git a/core/sql/optimizer/RelRoutine.h b/core/sql/optimizer/RelRoutine.h
index ca63321..05a74e1 100644
--- a/core/sql/optimizer/RelRoutine.h
+++ b/core/sql/optimizer/RelRoutine.h
@@ -1105,7 +1105,7 @@
                                const ValueIdSet & externalInputs,
                                ValueIdSet &pulledNewInputs);
 
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb) ;
+  virtual double getEstimatedRunTimeMemoryUsage(Generator *generator, ComTdb * tdb) ;
 
   virtual short codeGen(Generator *);
 
diff --git a/core/sql/optimizer/RelSequence.h b/core/sql/optimizer/RelSequence.h
index b5069d1..189fdbe 100644
--- a/core/sql/optimizer/RelSequence.h
+++ b/core/sql/optimizer/RelSequence.h
@@ -430,9 +430,7 @@
   virtual NABoolean isBigMemoryOperator(const Context* context,
                                         const Lng32 planNumber);
 
-  virtual CostScalar getEstimatedRunTimeMemoryUsage(NABoolean perNode, Lng32 *numStreams = NULL);
-  virtual double getEstimatedRunTimeMemoryUsage(ComTdb * tdb);
-
+  virtual CostScalar getEstimatedRunTimeMemoryUsage(Generator *generator, NABoolean perNode, Lng32 *numStreams = NULL);
 
   // Redefine these virtual methods to declare this node as a
   // physical node.
diff --git a/core/sql/parser/sqlparser.y b/core/sql/parser/sqlparser.y
index b839b87..2c805da 100755
--- a/core/sql/parser/sqlparser.y
+++ b/core/sql/parser/sqlparser.y
@@ -25789,11 +25789,6 @@
                                 column_constraint
                                 optional_constraint_attributes
                                 {
-//                Commented out to allow non iso88591 constraint text
-//				  if (NonISO88591LiteralEncountered) {
-//				    *SqlParser_Diags << DgSqlCode(-1242);
-//				    YYABORT;
-//				  }
                                   $$ = $2 /*column_constraint*/;
 				  if ($2)
 				    {
@@ -25807,11 +25802,6 @@
                                 }
                       | column_constraint optional_constraint_attributes
                                 {
-//                Commented out to allow non iso88591 constraint text
-//				  if (NonISO88591LiteralEncountered) {
-//				    *SqlParser_Diags << DgSqlCode(-1242);
-//				    YYABORT;
-//				  }
                                   $$ = $1 /*column_constraint*/;
 				  if ($1)
 				    $1->setConstraintAttributes(
@@ -26257,11 +26247,6 @@
 table_constraint_definition : { NonISO88591LiteralEncountered = FALSE; } constraint_name_definition table_constraint
                                 optional_constraint_attributes
                                 {
-//                Commented out to allow non iso88591 constraint text
-//			 	  if (NonISO88591LiteralEncountered) {
-//				    *SqlParser_Diags << DgSqlCode(-1242);
-//				    YYABORT;
-//				  }
                                   $3->setConstraintName(
                                       *$2 /*constraint_name_definition*/);
                                   $3->setConstraintKind(ElemDDLConstraint::
@@ -26274,11 +26259,6 @@
 
                       | { NonISO88591LiteralEncountered = FALSE; } table_constraint optional_constraint_attributes
                                 {
-//                Commented out to allow non iso88591 constraint text
-//				  if (NonISO88591LiteralEncountered) {
-//				    *SqlParser_Diags << DgSqlCode(-1242);
-//				    YYABORT;
-//				  }
                                   $2->setConstraintKind(ElemDDLConstraint::
                                                         TABLE_CONSTRAINT_DEF);
                                   $2->setConstraintAttributes(
@@ -27710,11 +27690,6 @@
                                 order_by_clause
                                 optional_with_check_option
                                 {
-				//  if (NonISO88591LiteralEncountered) {
-				//    *SqlParser_Diags << DgSqlCode(-1239);
-				//    YYABORT;
-				//  }
-
 				  RelRoot *top = finalize($8);
 				  if (($9) &&
 				      (CmpCommon::getDefault(ALLOW_ORDER_BY_IN_CREATE_VIEW) == DF_OFF))
@@ -29215,11 +29190,6 @@
 before_trigger_definition: before_trigger_prefix triggerred_when_clause
                                                     triggered_before_action
             {
-	  //    if (NonISO88591LiteralEncountered) {
-	  //	*SqlParser_Diags << DgSqlCode(-1238);
-	  //	YYABORT;
-	  //    }
-
 	      $$ = $1;  // the CreateTriggerStmt object to return
 
 	      StmtDDLCreateTrigger *triggerObject = $1;
@@ -29256,12 +29226,7 @@
 // returns pStmtDDL
 after_trigger_definition: after_trigger_prefix triggerred_when_clause
                                               triggered_after_action
-            {
-	  //    if (NonISO88591LiteralEncountered) {
-	  //       *SqlParser_Diags << DgSqlCode(-1238);
-	  //       YYABORT;
-	  //    }
-	    
+            {	    
 	      $$ = $1;   // the CreateTriggerStmt object to return
 
 	      StmtDDLCreateTrigger *triggerObject = $1;
@@ -29325,10 +29290,6 @@
 
 		InsideTriggerDefinition = TRUE;
 
-	    //   if (NonISO88591LiteralEncountered) {
-	    //     *SqlParser_Diags << DgSqlCode(-1238);
-	    //     YYABORT;
-	    //   }
 	       // 
                // Initialize names for REFERENCING 
 	       //
@@ -29401,11 +29362,6 @@
 
 		InsideTriggerDefinition = TRUE;
 
-	     //  if (NonISO88591LiteralEncountered) {
-	     //    *SqlParser_Diags << DgSqlCode(-1238);
-	     //    YYABORT;
-	     //  }
-
 	       // 
                // Initialize names for REFERENCING 
 	       //
diff --git a/core/sql/regress/charsets/DIFF312.KNOWN.SB.OS b/core/sql/regress/charsets/DIFF312.KNOWN.SB.OS
index fbefff4..6f7d002 100644
--- a/core/sql/regress/charsets/DIFF312.KNOWN.SB.OS
+++ b/core/sql/regress/charsets/DIFF312.KNOWN.SB.OS
@@ -1,7 +1,7 @@
 5618c5618,5621
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
@@ -44,14 +44,14 @@
 5721c5724,5727
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
 5725c5731,5734
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
@@ -130,7 +130,7 @@
 5878c5887,5890
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
@@ -185,14 +185,14 @@
 5981c5993,5996
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
 5985c6000,6003
 < --- SQL operation complete.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 > 
diff --git a/core/sql/regress/compGeneral/DIFF005.KNOWN.SB.OS b/core/sql/regress/compGeneral/DIFF005.KNOWN.SB.OS
index 1bf18e2..25c9a80 100644
--- a/core/sql/regress/compGeneral/DIFF005.KNOWN.SB.OS
+++ b/core/sql/regress/compGeneral/DIFF005.KNOWN.SB.OS
@@ -1,7 +1,7 @@
 1004c1004,1006
 < *** ERROR[12001] Creation failed. The materialized view cannot be maintained incrementally.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 1006d1007
@@ -9,7 +9,7 @@
 1015c1016,1018
 < *** ERROR[12001] Creation failed. The materialized view cannot be maintained incrementally.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 > 
 > *** ERROR[8822] The statement was not prepared.
 1017d1019
diff --git a/core/sql/regress/compGeneral/EXPECTED023 b/core/sql/regress/compGeneral/EXPECTED023
index 46352b8..03beacb 100644
--- a/core/sql/regress/compGeneral/EXPECTED023
+++ b/core/sql/regress/compGeneral/EXPECTED023
@@ -125,7 +125,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240681236_1508882731_922052                                                                                                                                                                                                                                                                                                                                                                                                                                      M                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
+STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280032_959042                                                                                                                                                                                                                                                                                                                                                                                                                                      M                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
 
 --- 1 row(s) selected.
 >>-- should see one row
@@ -141,7 +141,7 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882731_922052
+TRAF_SAMPLE_05715215834648412060_1513280032_959042
 
 --- SQL operation complete.
 >> -- should be stest, stest_empty, stestc, sb_* tables + a sample table
@@ -165,7 +165,7 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882785_656325
+TRAF_SAMPLE_05715215834648412060_1513280079_749188
 
 --- SQL operation complete.
 >> -- should be stest, stest_empty, stestc, sb_* tables + a different sample table
@@ -175,7 +175,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240681236_1508882785_656325                                                                                                                                                                                                                                                                                                                                                                                                                                      M                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
+STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280079_749188                                                                                                                                                                                                                                                                                                                                                                                                                                      M                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
 
 --- 1 row(s) selected.
 >>-- should see one row
@@ -222,7 +222,7 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882823_902847
+TRAF_SAMPLE_05715215834648412060_1513280122_597819
 
 --- SQL operation complete.
 >> -- should be stest, stest_empty, stestc, sb_* tables + another sample table
@@ -232,7 +232,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240681236_1508882823_902847                                                                                                                                                                                                                                                                                                                                                                                                                                      I                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
+STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280122_597819                                                                                                                                                                                                                                                                                                                                                                                                                                      I                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
 
 --- 1 row(s) selected.
 >>-- should see one row
@@ -256,7 +256,7 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
 
 --- SQL operation complete.
 >> -- should be stest, stest_empty, stestc, sb_* tables + another sample table
@@ -266,7 +266,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240681236_1508882873_484291                                                                                                                                                                                                                                                                                                                                                                                                                                      I                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
+STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280167_871663                                                                                                                                                                                                                                                                                                                                                                                                                                      I                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
 
 --- 1 row(s) selected.
 >>-- should see one row
@@ -327,7 +327,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240681236_1508882873_484291                                                                                                                                                                                                                                                                                                                                                                                                                                      I        c1 >= 100000                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
+STEST                                                                                                                                                                                                                                                             TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280167_871663                                                                                                                                                                                                                                                                                                                                                                                                                                      I        c1 >= 100000                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
 
 --- 1 row(s) selected.
 >>
@@ -384,8 +384,8 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
-TRAF_SAMPLE_04737367368240683297_1508882959_040281
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
+TRAF_SAMPLE_05715215834648413689_1513280221_712692
 
 --- SQL operation complete.
 >>
@@ -394,7 +394,7 @@
 OBJECT_NAME                                                                                                                                                                                                                                                       SAMPLE_NAME                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           REASON  LAST_WHERE_PREDICATE
 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------  ------  --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-STESTC                                                                                                                                                                                                                                                            TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240683297_1508882959_040281                                                                                                                                                                                                                                                                                                                                                                                                                                      I        c1 >= 'naaaa'                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                      
+STESTC                                                                                                                                                                                                                                                            TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648413689_1513280221_712692                                                                                                                                                                                                                                                                                                                                                                                                                                      I        c1 >= 'naaaa'                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                      
 
 --- 1 row(s) selected.
 >>
@@ -419,8 +419,8 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
-TRAF_SAMPLE_04737367368240683297_1508882959_040281
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
+TRAF_SAMPLE_05715215834648413689_1513280221_712692
 
 --- SQL operation complete.
 >> -- should be the same as previous "get tables"
@@ -447,8 +447,8 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
-TRAF_SAMPLE_04737367368240683297_1508882959_040281
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
+TRAF_SAMPLE_05715215834648413689_1513280221_712692
 
 --- SQL operation complete.
 >> -- should be the same as previous "get tables"
@@ -476,8 +476,8 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
-TRAF_SAMPLE_04737367368240683297_1508882959_040281
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
+TRAF_SAMPLE_05715215834648413689_1513280221_712692
 
 --- SQL operation complete.
 >> -- should be the same as previous "get tables"
@@ -488,10 +488,9 @@
 *** ERROR[9219] Incremental UPDATE STATISTICS: An operation failed, possibly due to an invalid WHERE clause.
 
 *** ERROR[15001] A syntax error occurred at or before: 
-UPSERT USING LOAD INTO TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_04737367368240
-681236_1508882873_484291_I (SELECT "C1", "C2", "C3" FROM TRAFODION.COMPGENERAL_
-TEST023.STEST WHERE  1  SAMPLE RANDOM 9.943000 PERCENT );
-                                    ^ (195 characters from start of SQL statement)
+DELETE WITH NO ROLLBACK FROM TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215
+834648412060_1513280167_871663 WHERE  1;
+                                       ^ (119 characters from start of SQL statement)
 
 *** ERROR[8822] The statement was not prepared.
 
@@ -508,8 +507,8 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
-TRAF_SAMPLE_04737367368240683297_1508882959_040281
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
+TRAF_SAMPLE_05715215834648413689_1513280221_712692
 
 --- SQL operation complete.
 >> -- should be the same as previous "get tables"
@@ -519,7 +518,7 @@
 
 *** ERROR[9219] Incremental UPDATE STATISTICS: An operation failed, possibly due to an invalid WHERE clause.
 
-*** ERROR[4001] Column BADCOL is not found.  Tables in scope: TRAFODION.COMPGENERAL_TEST023.STEST.  Default schema: TRAFODION.SCH.
+*** ERROR[4001] Column BADCOL is not found.  Tables in scope: TRAFODION.COMPGENERAL_TEST023.TRAF_SAMPLE_05715215834648412060_1513280167_871663.  Default schema: TRAFODION.SCH.
 
 *** ERROR[8822] The statement was not prepared.
 
@@ -560,7 +559,7 @@
 STEST
 STESTC
 STEST_EMPTY
-TRAF_SAMPLE_04737367368240681236_1508882873_484291
+TRAF_SAMPLE_05715215834648412060_1513280167_871663
 
 --- SQL operation complete.
 >> -- should be the same as previous "get tables" except only one sample table
diff --git a/core/sql/regress/compGeneral/FILTER023 b/core/sql/regress/compGeneral/FILTER023
index 35672c1..c6f63fe 100755
--- a/core/sql/regress/compGeneral/FILTER023
+++ b/core/sql/regress/compGeneral/FILTER023
@@ -35,7 +35,7 @@
 s/TRAF_SAMPLE_[0-9]*_[0-9]*_[0-9]*/TRAF_SAMPLE_nn_nnnnnnnnnn_nnnnnn/g
 s/TRAF_SAMPLE_[0-9]*_[0-9]*/TRAF_SAMPLE_nn_nnnnnnnnnn/g
 s/TRAF_SAMPLE_[0-9]*/TRAF_SAMPLE_nn/g
-s/E_[0-9]*_[0-9]*_[0-9]* WHERE/E_nn_nnnnnnnnnn_nnnnnn WHERE/g
+s/[0-9]*_[0-9]*_[0-9]* WHERE/nn_nnnnnnnnnn_nnnnnn WHERE/g
 s/[0-9]*\.[0-9]* PERCENT/nn.nn PERCENT/g
 s/[0-9]*_[0-9]*_[0-9]*_I/nn_nnnnnnnnnn_nnnnnn_I/g
 s/[0-9]*_[0-9]*_I/nnnnnnnnnn_nnnnnn_I/g
diff --git a/core/sql/regress/core/DIFF024.KNOWN.SB.OS b/core/sql/regress/core/DIFF024.KNOWN.SB.OS
index 97db8b6..7dedc85 100644
--- a/core/sql/regress/core/DIFF024.KNOWN.SB.OS
+++ b/core/sql/regress/core/DIFF024.KNOWN.SB.OS
@@ -1,7 +1,7 @@
 1535c1535
 < *** ERROR[1002] Catalog SEABASE does not exist or has not been registered on node @system@.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 1537c1537
 < *** ERROR[4082] Object #CAT.#SCH.T024MDAM does not exist or is inaccessible.
 ---
@@ -11,7 +11,7 @@
 1803c1802
 < *** ERROR[1002] Catalog SEABASE does not exist or has not been registered on node @system@.
 ---
-> *** ERROR[4222] The DDL feature is not supported in this software version.
+> *** ERROR[4222] The DDL feature is not supported in this software version or edition.
 1805c1804
 < *** ERROR[4082] Object #CAT.#SCH.T024MDAM does not exist or is inaccessible.
 ---
diff --git a/core/sql/regress/executor/EXPECTED002.SB b/core/sql/regress/executor/EXPECTED002.SB
index bc4ce8a..c6fe1bf 100644
--- a/core/sql/regress/executor/EXPECTED002.SB
+++ b/core/sql/regress/executor/EXPECTED002.SB
@@ -38,6 +38,41 @@
 
 --- SQL operation complete.
 >>
+>>-- added for JIRA TRAFODION-2843
+>>Create table D03s
++>                (
++>                pk int not null not droppable primary key
++>                , val01 int
++>                , val02 int
++>                );
+
+--- SQL operation complete.
+>>
+>>Create table F01s
++>                (
++>                  pk int not null not droppable primary key
++>                , fk_d01 int not null
++>                , fk_d02 int not null
++>                , fk_d03 int not null
++>                , fk_d04 int not null
++>                , fk_d05 int not null
++>                , fk_d06 int not null
++>                , fk_d07 int not null
++>                , fk_d08 int not null
++>                , fk_d09 int not null
++>                , fk_d10 int not null
++>                , val01 int
++>                , val02 int
++>                , val01_d01 int
++>                , val02_d01 int
++>                , val01_d02 int
++>                , val02_d02 int
++>                , val01_d03 int
++>                , val02_d03 int
++>                ) salt using 8 partitions;
+
+--- SQL operation complete.
+>>
 >>?section Genesis_10_970911_6859
 >>?ifMX
 >>create view t002v(w,x) as values(1,11);
@@ -6645,9 +6680,9 @@
 STUDENT_NAME  (EXPR)
 ------------  ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-s1            40-50-60-79-88-100-101                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
-s2            18-58-88-188                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     
-s3            40-90-100                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        
+s1            40-50-60-79-88-100-101                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          
+s2            18-58-88-188                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    
+s3            40-90-100                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
 
 --- 3 row(s) selected.
 >>
@@ -6660,9 +6695,9 @@
 STUDENT_NAME  (EXPR)
 ------------  ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-s1            40-50-60-79-88-88-100-101                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        
-s2            18-58-88-88-188                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                  
-s3            40-40-90-100                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     
+s1            40-50-60-79-88-88-100-101                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
+s2            18-58-88-88-188                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                 
+s3            40-40-90-100                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    
 
 --- 3 row(s) selected.
 >>
@@ -6699,9 +6734,9 @@
 STUDENT_NAME  (EXPR)
 ------------  ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 
-s1            101-100-88-79-60-50-40                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                           
-s2            188-88-58-18                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                     
-s3            100-90-40                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                        
+s1            101-100-88-79-60-50-40                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                          
+s2            188-88-58-18                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                    
+s3            100-90-40                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                       
 
 --- 3 row(s) selected.
 >>control query shape cut;
@@ -6728,7 +6763,7 @@
 >>insert into regexp_test values( 'english' );
 
 --- 1 row(s) inserted.
->>insert into regexp_test values( 'dev@TRAFODION.org' );
+>>insert into regexp_test values( 'dev@trafodion.org' );
 
 --- 1 row(s) inserted.
 >>insert into regexp_test values( '127.0.0.1' );
@@ -6744,54 +6779,54 @@
 >>-- only number
 >>select * from regexp_test where c1 regexp '^[0-9]*\s*$';
 
-C1
-----------
+C1                              
+--------------------------------
 
-123
+123                             
 
 --- 1 row(s) selected.
 >>select * from regexp_test where c1 regexp '^[[:digit:]]*\s*$';
 
-C1
-----------
+C1                              
+--------------------------------
 
-123
+123                             
 
 --- 1 row(s) selected.
 >>-- only english
 >>select * from regexp_test where c1 regexp '^.[A-Za-z]+\s*$';
 
-C1
-----------
+C1                              
+--------------------------------
 
-english
+english                         
 
 --- 1 row(s) selected.
 >>-- valid email address
 >>select * from regexp_test where c1 regexp '\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*\s*';
 
-C1
-----------
+C1                              
+--------------------------------
 
-dev@TRAFODION.org
+dev@trafodion.org               
 
 --- 1 row(s) selected.
 >>-- valid ip address
 >>select * from regexp_test where c1 regexp '^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])\s*$';
 
-C1
-----------
+C1                              
+--------------------------------
 
-127.0.0.1
+127.0.0.1                       
 
 --- 1 row(s) selected.
 >>-- utf-8 code
 >>select * from regexp_test where c1 regexp '(中文测试)';
 
-C1
-----------
+C1                              
+--------------------------------
 
-中文测试
+中文测试                    
 
 --- 1 row(s) selected.
 >>select * from regexp_test where c1 regexp '[^\';
@@ -6803,6 +6838,122 @@
 
 --- SQL operation complete.
 >>------------------------------------------------------------------------
+>>-- added for JIRA TRAFODION-2843
+>>
+>>insert into D03s
++>                select c1+c2*10+c3*100, c1, c1+c2*10
++>                from (values(1)) T
++>                transpose 0,1 as c1
++>                transpose 0,1 as c2
++>                transpose 0,1 as c3;
+
+--- 8 row(s) inserted.
+>>
+>>insert with no rollback into F01s
++>                select c1+c2*10+c3*100+c4*1000+c5*10000+c6*100000
++>                      ,c1
++>                      ,c1+c2*10
++>                      ,c1+c2*10+c3*100
++>                      ,c1
++>                      ,c1+c2*10
++>                      ,c1+c2*10+c3*100
++>                      ,c1
++>                      ,c1+c2*10
++>                      ,c1+c2*10+c3*100
++>                      ,c1
++>                      ,c1+c2*10
++>                      ,mod(c1+c2*100+c3*100,200)
++>                      ,mod(c1,3)
++>                      ,mod(c1,6)
++>                      ,mod(c1+c2*10,5)
++>                      ,c1
++>                      ,c1
++>                      ,c1+c2*10
++>                from (values(1)) T
++>                transpose 0,1 as c1
++>                transpose 0,1 as c2
++>                transpose 0,1 as c3
++>                transpose 0,1 as c4
++>                transpose 0 as c5
++>                transpose 0 as c6
++>                ;
+
+--- 16 row(s) inserted.
+>>
+>>prepare x1 from
++>                        select F01s.val01, TD03.val01
++>                        From F01s 
++>                        full outer join
++>                          (select D03s.val01,count(D03s.pk)
++>                          from D03s
++>                          group by D03s.val01) as TD03(val01,pk) 
++>                        on (TD03.pk=F01s.fk_d03
++>                          AND TD03.pk>0 );
+
+--- SQL command prepared.
+>>
+>>execute x1;
+
+VAL01        VAL01      
+-----------  -----------
+
+          0            ?
+         10            ?
+         10            ?
+          1            ?
+          0            ?
+          1            ?
+          0            ?
+          1            ?
+         11            ?
+         11            ?
+         10            ?
+          1            ?
+          0            ?
+         11            ?
+         10            ?
+         11            ?
+          ?            1
+          ?            0
+
+--- 18 row(s) selected.
+>>
+>>prepare x1v from
++>                        select F01s.val01, F01s.fk_d03, TD03.pk, TD03.val01
++>                        From F01s 
++>                        full outer join
++>                          (select D03s.val01,count(D03s.pk)
++>                          from D03s
++>                          group by D03s.val01) as TD03(val01,pk) 
++>                        on (TD03.pk=F01s.fk_d03);
+
+--- SQL command prepared.
+>>
+>>execute x1v;
+
+VAL01        FK_D03       PK                    VAL01      
+-----------  -----------  --------------------  -----------
+
+          0          100                     ?            ?
+         10          110                     ?            ?
+         10           10                     ?            ?
+          1          101                     ?            ?
+          0          100                     ?            ?
+          1          101                     ?            ?
+          0            0                     ?            ?
+          1            1                     ?            ?
+         11          111                     ?            ?
+         11          111                     ?            ?
+         10          110                     ?            ?
+          1            1                     ?            ?
+          0            0                     ?            ?
+         11           11                     ?            ?
+         10           10                     ?            ?
+         11           11                     ?            ?
+          ?            ?                     4            1
+          ?            ?                     4            0
+
+--- 18 row(s) selected.
 >>
 >>obey TEST002(BR0198_MULTI);
 >>select count(*) from T002T1;
diff --git a/core/sql/regress/executor/TEST002 b/core/sql/regress/executor/TEST002
index a043b82..07a4122 100755
--- a/core/sql/regress/executor/TEST002
+++ b/core/sql/regress/executor/TEST002
@@ -65,6 +65,37 @@
 create table t002tab2 (char_1 CHAR(1),
                        numeric_1 NUMERIC(4, 0));
 
+-- added for JIRA TRAFODION-2843
+Create table D03s
+                (
+                pk int not null not droppable primary key
+                , val01 int
+                , val02 int
+                );
+
+Create table F01s
+                (
+                  pk int not null not droppable primary key
+                , fk_d01 int not null
+                , fk_d02 int not null
+                , fk_d03 int not null
+                , fk_d04 int not null
+                , fk_d05 int not null
+                , fk_d06 int not null
+                , fk_d07 int not null
+                , fk_d08 int not null
+                , fk_d09 int not null
+                , fk_d10 int not null
+                , val01 int
+                , val02 int
+                , val01_d01 int
+                , val02_d01 int
+                , val01_d02 int
+                , val02_d02 int
+                , val01_d03 int
+                , val02_d03 int
+                ) salt using 8 partitions;
+
 ?section Genesis_10_970911_6859
 ?ifMX
 create view t002v(w,x) as values(1,11);	-- should work
@@ -1155,6 +1186,66 @@
 select * from regexp_test where c1 regexp '[^\';
 drop table regexp_test;
 ------------------------------------------------------------------------
+-- added for JIRA TRAFODION-2843
+
+insert into D03s
+                select c1+c2*10+c3*100, c1, c1+c2*10
+                from (values(1)) T
+                transpose 0,1 as c1
+                transpose 0,1 as c2
+                transpose 0,1 as c3;
+
+insert with no rollback into F01s
+                select c1+c2*10+c3*100+c4*1000+c5*10000+c6*100000
+                      ,c1
+                      ,c1+c2*10
+                      ,c1+c2*10+c3*100
+                      ,c1
+                      ,c1+c2*10
+                      ,c1+c2*10+c3*100
+                      ,c1
+                      ,c1+c2*10
+                      ,c1+c2*10+c3*100
+                      ,c1
+                      ,c1+c2*10
+                      ,mod(c1+c2*100+c3*100,200)
+                      ,mod(c1,3)
+                      ,mod(c1,6)
+                      ,mod(c1+c2*10,5)
+                      ,c1
+                      ,c1
+                      ,c1+c2*10
+                from (values(1)) T
+                transpose 0,1 as c1
+                transpose 0,1 as c2
+                transpose 0,1 as c3
+                transpose 0,1 as c4
+                transpose 0 as c5
+                transpose 0 as c6
+                ;
+
+prepare x1 from
+                        select F01s.val01, TD03.val01
+                        From F01s 
+                        full outer join
+                          (select D03s.val01,count(D03s.pk)
+                          from D03s
+                          group by D03s.val01) as TD03(val01,pk) 
+                        on (TD03.pk=F01s.fk_d03
+                          AND TD03.pk>0 );
+
+execute x1;
+
+prepare x1v from
+                        select F01s.val01, F01s.fk_d03, TD03.pk, TD03.val01
+                        From F01s 
+                        full outer join
+                          (select D03s.val01,count(D03s.pk)
+                          from D03s
+                          group by D03s.val01) as TD03(val01,pk) 
+                        on (TD03.pk=F01s.fk_d03);
+
+execute x1v;
 
 obey TEST002(BR0198_MULTI);
 obey TEST002(BR0198_EMPTY);
@@ -1212,6 +1303,9 @@
 drop table t002_inner_lower_hj ;
 drop table t002_inner_upper_hj ;
 
+drop table F01s;
+drop table D03s;
+
 ?section clnup_end
 
 
diff --git a/core/sql/regress/hive/EXPECTED005 b/core/sql/regress/hive/EXPECTED005
index f91b49a..493fe4e 100644
--- a/core/sql/regress/hive/EXPECTED005
+++ b/core/sql/regress/hive/EXPECTED005
@@ -1002,6 +1002,8 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 --- SQL operation complete.
 >>
 >>truncate hive.hive.thive_insert_smallint;
@@ -1111,6 +1113,8 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 --- SQL operation complete.
 >>cqd hive_insert_error_mode '1';
 
diff --git a/core/sql/regress/hive/EXPECTED007 b/core/sql/regress/hive/EXPECTED007
index 5bc2c3a..06d035b 100644
--- a/core/sql/regress/hive/EXPECTED007
+++ b/core/sql/regress/hive/EXPECTED007
@@ -47,6 +47,8 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.THIVE1;
 /* ObjectUID = 7080927501909560103 */
 
@@ -257,6 +259,8 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.THIVE2;
 /* ObjectUID = 1928809434068290686 */
 
@@ -1057,11 +1061,11 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 REGISTER HIVE TABLE HIVE.HIVESCH007.THIVE9;
 /* ObjectUID = 6918234965366838662 */
 
-/* Trafodion DDL */
-
 CREATE EXTERNAL TABLE THIVE9
   FOR HIVE.HIVESCH007.THIVE9
 ;
@@ -1126,6 +1130,8 @@
   stored as textfile
 ;
 
+/* Trafodion DDL */
+
 --- SQL operation complete.
 >>showstats for table hive.hivesch007.thive9 on every column;
 
diff --git a/core/sql/regress/hive/EXPECTED009 b/core/sql/regress/hive/EXPECTED009
index 92af6e4..a59e5dd 100644
--- a/core/sql/regress/hive/EXPECTED009
+++ b/core/sql/regress/hive/EXPECTED009
@@ -636,7 +636,6 @@
   fragment_type .......... master
   affinity_value ....... ###
   max_max_cardinality    ###
-  total_overflow_size    ###
   xn_access_mode ......... read_only
   xn_autoabort_interval  ###
   auto_query_retry ....... enabled
@@ -1000,11 +999,11 @@
   stored as textfile
 ;
 
-REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.DATE_DIM;
-/* ObjectUID = 1585406280815125826 */
-
 /* Trafodion DDL */
 
+REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.DATE_DIM;
+/* ObjectUID = 4145713645956211204 */
+
 CREATE EXTERNAL TABLE DATE_DIM
   (
     D_DATE_SK                        INT DEFAULT NULL
@@ -1083,7 +1082,6 @@
   fragment_type .......... master
   affinity_value ....... ###
   max_max_cardinality    ###
-  total_overflow_size    ###
   xn_access_mode ......... read_only
   xn_autoabort_interval  ###
   auto_query_retry ....... enabled
@@ -1235,11 +1233,11 @@
   stored as textfile
 ;
 
-REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.DATE_DIM;
-/* ObjectUID = 1585406280815125826 */
-
 /* Trafodion DDL */
 
+REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.DATE_DIM;
+/* ObjectUID = 4145713645956211204 */
+
 CREATE EXTERNAL TABLE DATE_DIM
   (
     D_DATE_SK                        INT DEFAULT NULL
diff --git a/core/sql/regress/hive/EXPECTED030 b/core/sql/regress/hive/EXPECTED030
index e0b1132..d40e041 100644
--- a/core/sql/regress/hive/EXPECTED030
+++ b/core/sql/regress/hive/EXPECTED030
@@ -803,7 +803,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinal  1,440,202
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (-1(4 times))
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -923,7 +922,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1287,7 +1285,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinality  1,698
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (-1(4 times))
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -1434,7 +1431,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinal  2,880,404
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1830,7 +1826,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinality    271
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2084,7 +2079,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinality    271
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2378,7 +2372,6 @@
   statement_index ........ 0
   affinity_value ......... 0
   max_max_cardinal  2,880,404
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (-1(4 times))
   esp_3_node_map ......... (-1(4 times))
   xn_access_mode ......... read_only
diff --git a/core/sql/regress/privs1/EXPECTED136 b/core/sql/regress/privs1/EXPECTED136
index 7d6b3ef..ccd9fb2 100755
--- a/core/sql/regress/privs1/EXPECTED136
+++ b/core/sql/regress/privs1/EXPECTED136
@@ -600,7 +600,7 @@
 >>
 >>grant create on schema $$TEST_CATALOG$$.t136sch to test136_user1;
 
-*** ERROR[4222] The DDL feature is not supported in this software version.
+*** ERROR[4222] The DDL feature is not supported in this software version or edition.
 
 *** ERROR[8822] The statement was not prepared.
 
@@ -668,7 +668,7 @@
 --- SQL operation complete.
 >>grant create on schema $$TEST_CATALOG$$.t136sch to test136_user1;
 
-*** ERROR[4222] The DDL feature is not supported in this software version.
+*** ERROR[4222] The DDL feature is not supported in this software version or edition.
 
 *** ERROR[8822] The statement was not prepared.
 
@@ -679,7 +679,7 @@
 --- SQL operation complete.
 >>revoke create on schema $$TEST_CATALOG$$.t136sch from test136_user1;
 
-*** ERROR[4222] The DDL feature is not supported in this software version.
+*** ERROR[4222] The DDL feature is not supported in this software version or edition.
 
 *** ERROR[8822] The statement was not prepared.
 
diff --git a/core/sql/regress/seabase/EXPECTED010 b/core/sql/regress/seabase/EXPECTED010
index a708caf..ab7f2f3 100644
--- a/core/sql/regress/seabase/EXPECTED010
+++ b/core/sql/regress/seabase/EXPECTED010
@@ -241,7 +241,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -325,7 +324,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -407,7 +405,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -491,7 +488,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -838,7 +834,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -918,7 +913,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -998,7 +992,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1077,7 +1070,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -1185,7 +1177,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -1296,7 +1287,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1377,7 +1367,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1458,7 +1447,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -1538,7 +1526,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -1647,7 +1634,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -2069,7 +2055,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2153,7 +2138,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2234,7 +2218,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2317,7 +2300,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2624,7 +2606,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2704,7 +2685,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2796,7 +2776,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -2878,7 +2857,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -2991,7 +2969,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -3104,7 +3081,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -3186,7 +3162,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -3268,7 +3243,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -3349,7 +3323,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -3460,7 +3433,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -3883,7 +3855,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -3967,7 +3938,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4049,7 +4019,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4133,7 +4102,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4441,7 +4409,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4522,7 +4489,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4603,7 +4569,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4683,7 +4648,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -4793,7 +4757,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -4903,7 +4866,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -4985,7 +4947,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -5067,7 +5028,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -5148,7 +5108,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -5259,7 +5218,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -5681,7 +5639,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -5765,7 +5722,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -5846,7 +5802,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -5929,7 +5884,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6275,7 +6229,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6354,7 +6307,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6433,7 +6385,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6511,7 +6462,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -6618,7 +6568,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -6730,7 +6679,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6810,7 +6758,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6890,7 +6837,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -6969,7 +6915,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -7077,7 +7022,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -7523,7 +7467,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -7607,7 +7550,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -7689,7 +7631,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -7773,7 +7714,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8120,7 +8060,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8200,7 +8139,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8280,7 +8218,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8359,7 +8296,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -8467,7 +8403,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -8578,7 +8513,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8659,7 +8593,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8740,7 +8673,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -8820,7 +8752,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -8929,7 +8860,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -9351,7 +9281,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -9435,7 +9364,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -9516,7 +9444,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 10
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -9599,7 +9526,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 1
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -9945,7 +9871,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10024,7 +9949,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10103,7 +10027,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10181,7 +10104,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -10288,7 +10210,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -10400,7 +10321,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10480,7 +10400,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 5
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10560,7 +10479,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
@@ -10639,7 +10557,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 4
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -10747,7 +10664,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality .... 2
-  total_overflow_size .... 0.00 KB
   upd_action_on_error .... xn_rollback
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
diff --git a/core/sql/regress/seabase/EXPECTED011 b/core/sql/regress/seabase/EXPECTED011
index a35bcfd..56c32a0 100644
--- a/core/sql/regress/seabase/EXPECTED011
+++ b/core/sql/regress/seabase/EXPECTED011
@@ -163,7 +163,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinality ... 11
-  total_overflow_size .... 0.00 KB
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
   auto_query_retry ....... enabled
diff --git a/core/sql/regress/seabase/EXPECTED016 b/core/sql/regress/seabase/EXPECTED016
index 4be8bfc..05ce633 100644
--- a/core/sql/regress/seabase/EXPECTED016
+++ b/core/sql/regress/seabase/EXPECTED016
@@ -201,7 +201,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -349,7 +348,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -497,7 +495,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -645,7 +642,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -793,7 +789,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -941,7 +936,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
@@ -1089,7 +1083,6 @@
   fragment_type .......... master
   affinity_value ......... 0
   max_max_cardinal  1,000,000,000
-  total_overflow_size .... 0.00 KB
   esp_2_node_map ......... (\NSK:-1:-1:-1:-1)
   xn_access_mode ......... read_only
   xn_autoabort_interval    0
diff --git a/core/sql/regress/seabase/EXPECTED022 b/core/sql/regress/seabase/EXPECTED022
index 879a079..2a0251d 100644
--- a/core/sql/regress/seabase/EXPECTED022
+++ b/core/sql/regress/seabase/EXPECTED022
@@ -411,13 +411,13 @@
 --- 0 row(s) inserted.
 >>insert into hbase."_ROW_".t022hbt1 values ('2', '100');
 
-*** ERROR[1429] Inserts into _ROW_ format external hbase tables can only use the VALUES clause and must use the column_create function to create values.
+*** ERROR[1429] Inserts into _ROW_ format external HBase tables can only use the VALUES clause and must use the column_create function to create values.
 
 *** ERROR[8822] The statement was not prepared.
 
 >>insert into hbase."_ROW_".t022hbt1 select * from hbase."_ROW_".t022hbt2;
 
-*** ERROR[1429] Inserts into _ROW_ format external hbase tables can only use the VALUES clause and must use the column_create function to create values.
+*** ERROR[1429] Inserts into _ROW_ format external HBase tables can only use the VALUES clause and must use the column_create function to create values.
 
 *** ERROR[8822] The statement was not prepared.
 
diff --git a/core/sql/regress/seabase/EXPECTED025 b/core/sql/regress/seabase/EXPECTED025
index df98a2e..677cafa 100644
--- a/core/sql/regress/seabase/EXPECTED025
+++ b/core/sql/regress/seabase/EXPECTED025
@@ -328,7 +328,7 @@
 +>                      primary key(a) )
 +>;
 
-*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, unsigned INTEGER and unsigned SMALL INT.
+*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, INTEGER UNSIGNED and SMALLINT UNSIGNED.
 
 --- SQL operation failed with errors.
 >>
@@ -348,7 +348,7 @@
 +>--HASH2 PARTITION BY(a)
 +>                    ;
 
-*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, unsigned INTEGER and unsigned SMALL INT.
+*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, INTEGER UNSIGNED and SMALLINT UNSIGNED.
 
 --- SQL operation failed with errors.
 >>-- (ADD LOCATION $$partition$$);
@@ -369,7 +369,7 @@
 +>--HASH2 PARTITION BY(a)
 +>                    ;
 
-*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, unsigned INTEGER and unsigned SMALL INT.
+*** ERROR[1510] IDENTITY column  can be of the following data types only: LARGEINT, INTEGER UNSIGNED and SMALLINT UNSIGNED.
 
 --- SQL operation failed with errors.
 >>-- (ADD LOCATION $$partition$$);
@@ -1896,7 +1896,7 @@
 >>insert into T025T003 values  	
 +>(DEFAULT,-3,0,1,'dasf',2.1,'dfaf','dfa','dfa','d');
 
-*** ERROR[8101] The operation is prevented by check constraint TRAFODION.S025.T025T003_662223558_9798 on table TRAFODION.S025.T025T003.
+*** ERROR[8101] The operation is prevented by check constraint TRAFODION.S025.T025T003_918519353_6685 on table TRAFODION.S025.T025T003.
 
 --- 0 row(s) inserted.
 >>
@@ -1906,7 +1906,7 @@
 +>(DEFAULT,-2,1,1,'dasf',2.1,'dfaf','dfa','dfa','d'),	
 +>(DEFAULT,-1,2,1,'dasf',2.1,'dfaf','dfa','dfa','d');
 
-*** ERROR[8101] The operation is prevented by check constraint TRAFODION.S025.T025T003_662223558_9798 on table TRAFODION.S025.T025T003.
+*** ERROR[8101] The operation is prevented by check constraint TRAFODION.S025.T025T003_918519353_6685 on table TRAFODION.S025.T025T003.
 
 --- 0 row(s) inserted.
 >>
@@ -2628,7 +2628,7 @@
 >>invoke T025T00V2;
 
 -- Definition of Trafodion volatile table T025T00V2
--- Definition current  Wed Apr 19 21:44:27 2017
+-- Definition current  Sun Jan  7 23:18:49 2018
 
   (
     SURROGATE_KEY                    SMALLINT UNSIGNED GENERATED BY DEFAULT AS
@@ -2816,7 +2816,7 @@
 >>invoke t025t010;
 
 -- Definition of Trafodion table TRAFODION.S025.T025T010
--- Definition current  Wed Apr 19 21:46:42 2017
+-- Definition current  Sun Jan  7 23:20:51 2018
 
   (
     A                                LARGEINT GENERATED ALWAYS AS IDENTITY NOT
diff --git a/core/sql/regress/seabase/EXPECTED027 b/core/sql/regress/seabase/EXPECTED027
index 21b74c9..67bcfd9 100644
--- a/core/sql/regress/seabase/EXPECTED027
+++ b/core/sql/regress/seabase/EXPECTED027
@@ -714,7 +714,7 @@
 >>-- cannot have col fam for aligned format tables
 >>create table t027t02 ("cf".a int) attribute aligned format;
 
-*** ERROR[4223] Column Family specification on columns of an aligned format table is not supported in this software version.
+*** ERROR[4223] Column Family specification on columns of an aligned format table is not supported in this software version or edition.
 
 --- SQL operation failed with errors.
 >>
diff --git a/core/sql/regress/seabase/EXPECTED031 b/core/sql/regress/seabase/EXPECTED031
index 2619e1a..43a3d2d 100644
--- a/core/sql/regress/seabase/EXPECTED031
+++ b/core/sql/regress/seabase/EXPECTED031
@@ -1089,11 +1089,11 @@
   stored as textfile
 ;
 
-REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.T031HIVET1;
-/* ObjectUID = 6327691840910526408 */
-
 /* Trafodion DDL */
 
+REGISTER /*INTERNAL*/ HIVE TABLE HIVE.HIVE.T031HIVET1;
+/* ObjectUID = 3677902230547698142 */
+
 CREATE EXTERNAL TABLE T031HIVET1
   FOR HIVE.HIVE.T031HIVET1
 ;
diff --git a/core/sql/regress/seabase/EXPECTED032 b/core/sql/regress/seabase/EXPECTED032
index 5a4aa97..b975865 100644
--- a/core/sql/regress/seabase/EXPECTED032
+++ b/core/sql/regress/seabase/EXPECTED032
@@ -333,8 +333,8 @@
 (EXPR)      
 ------------
 
-          11
-          21
+11
+21
 
 --- 2 row(s) selected.
 >>insert into t032t1 values ('3', 3, 3, date '2016-08-15',  time '10:11:12',
diff --git a/core/sql/runtimestats/SqlStats.cpp b/core/sql/runtimestats/SqlStats.cpp
index 4e5a4a7..0b64e02 100644
--- a/core/sql/runtimestats/SqlStats.cpp
+++ b/core/sql/runtimestats/SqlStats.cpp
@@ -64,10 +64,7 @@
       getStatsSegmentId(),
       baseAddr,
       ((sizeof(StatsGlobals)+16-1)/16)*16,
-      maxSegSize,
-      maxSegSize,
-      &segGlobals_,
-      0), // Zero secondary segments
+      maxSegSize),
       recentSikeys_(NULL),
       newestRevokeTimestamp_(0), // all new compilers are current.
       statsArray_(NULL)
diff --git a/core/sql/runtimestats/SqlStats.h b/core/sql/runtimestats/SqlStats.h
index acb75fc..0377c3d 100644
--- a/core/sql/runtimestats/SqlStats.h
+++ b/core/sql/runtimestats/SqlStats.h
@@ -507,7 +507,6 @@
   short cpu_;
   pid_t semPid_;    // Pid of the process that holds semaphore lock - This element is used for debugging purpose only
   Int64 semPidCreateTime_; // Creation timestamp - pid recycle workaround. 
-  NASegGlobals segGlobals_;
   NAHeap statsHeap_;
   NABoolean isSscpInitialized_;
   short rtsEnvType_; // 1 - Global Environment
diff --git a/core/sql/sort/Qsort.cpp b/core/sql/sort/Qsort.cpp
index a98dd14..d35cd7c 100644
--- a/core/sql/sort/Qsort.cpp
+++ b/core/sql/sort/Qsort.cpp
@@ -83,7 +83,6 @@
   rootRecord_ = (Record *)heap_->allocateMemory(sizeof(Record) * allocRunSize_);  
   recKeys_    = (RecKeyBuffer *)heap_->allocateMemory(sizeof(RecKeyBuffer) * allocRunSize_);  
  
-  // Below asserts useful in debug mode. Also asserts if longjmp did not happen.
   ex_assert(rootRecord_!= NULL, "Sort: Initial rootRecord_ allocation failed"); 
   ex_assert(recKeys_  != NULL, "Sort: Initial recKeys_ allocation failed");  
 
diff --git a/core/sql/sort/Record.cpp b/core/sql/sort/Record.cpp
index cf4bf07..7d3e1a9 100644
--- a/core/sql/sort/Record.cpp
+++ b/core/sql/sort/Record.cpp
@@ -48,7 +48,6 @@
     allocatedRec_ = FALSE_L;  
   }
   else{
-    //Allocation failure will cause longjmp to jmp handler in ex_sort.
     rec_  = new (heap_) char[recSize_+1];
     ex_assert(rec_ != NULL, "Record::Record: rec_ is NULL");
     allocatedRec_ = TRUE_L;
@@ -93,7 +92,6 @@
     allocatedRec_ = FALSE_L; 
  }
  else {
-    //Allocation failure will cause longjmp to jmp handler in ex_sort.
     rec_  = new (heap_) char[recsize+1];
     ex_assert(rec_ != NULL, "Record::initialize: rec_ is NULL");
     allocatedRec_ = TRUE_L;
diff --git a/core/sql/sort/TourTree.cpp b/core/sql/sort/TourTree.cpp
index 9b523d4..071da8d 100644
--- a/core/sql/sort/TourTree.cpp
+++ b/core/sql/sort/TourTree.cpp
@@ -85,7 +85,6 @@
       assert(scratch_ != NULL);
     }
 
-   //Allocation failure will cause longjmp to jmp handler in ex_sort.
    rootNode_ = (TreeNode*)heap_->allocateMemory(numRuns_ * sizeof(TreeNode));
    rootRecord_ = (Record*)heap_->allocateMemory(numRuns_ * sizeof(Record));
    keyOfLastWinner_ = (char*)heap_->allocateMemory(sizeof(char) * keysize);
diff --git a/core/sql/sqlcomp/CmpDDLCatErrorCodes.h b/core/sql/sqlcomp/CmpDDLCatErrorCodes.h
index 52eca31..e00eb91 100644
--- a/core/sql/sqlcomp/CmpDDLCatErrorCodes.h
+++ b/core/sql/sqlcomp/CmpDDLCatErrorCodes.h
@@ -179,52 +179,49 @@
                   , CAT_CANNOT_ALTER_WRONG_TYPE                   = 1146
                   , CAT_SYSTEM_COL_NOT_ALLOWED_IN_UNIQUE_CNSTRNT  = 1147
                   , CAT_SYSTEM_COL_NOT_ALLOWED_IN_RI_CNSTRNT      = 1148
-                  , CAT_COLUMN_DOES_NOT_EXIST_IN_TABLE            = 1149
-                  , CAT_COULD_NOT_GET_VOLUME_NAMES_FOR_POS        = 1150
-                  , CAT_POS_WAS_NOT_APPLIED                       = 1151
-                  , CAT_PARTITION_OFFLINE                         = 1152
-                  , CAT_COLSZ_NOTSET_IN_KEY                       = 1153
-                  , CAT_POS_TABLE_SIZE_TOO_BIG                    = 1154
+                  // unused                                       = 1149
+                  // unused                                       = 1150
+                  // unused                                       = 1151
+                  // unused                                       = 1152
+                  // unused                                       = 1153
+                  // unused                                       = 1154
                   , CAT_NOT_A_SYNONYM                             = 1155
                   , CAT_INCORRECT_OBJECT_TYPE                     = 1156
                   , CAT_ALTERING_TO_SAME_VALUE                    = 1157
-                  , CAT_OBJECT_ALREADY_EXISTS                     = 1158
-                  , CAT_SYNONYM_DOES_NOT_EXIST                    = 1159
-                  , CAT_INVALID_STORE_BY                          = 1160
-                  , CAT_SYS_COL_NOT_ALLOWED_IN_PARTITIONBY_CLAUSE = 1161
-                  , CAT_ID_ALREADY_OWNS_OBJECT                    = 1162
-                  , CAT_NOT_ALLOWED_TO_CHANGE_OWNER               = 1163
-                  , CAT_SEND_CONTROLS_FAILED                      = 1164
-                  , CAT_PARALLEL_OP_FAILED                        = 1165
-                  , CAT_DROP_FAILED_WITH_CLEANUP                  = 1166
-                  , CAT_OWNER_MUST_BE_SUPER_OR_SERVICES           = 1167
-                  , CAT_INVALID_OBJECT_UID                        = 1168
-                  , CAT_MISSING_SCHEMA_SECURITY                   = 1169
-                  , CAT_POS_INVALID_NUM_DISK_POOLS                = 1170
-                  , CAT_FETCH_DISK_SIZE_ERROR                     = 1171
-                  , CAT_CANNOT_SPECIFY_DDL_AT_OBJECT_LEVEL        = 1172
-                  , CAT_NO_VOLUMES_FOR_VOLATILE_TABLES            = 1173
+                  // unused                                       = 1158
+                  // unused                                       = 1159
+                  // unused                                       = 1160
+                  // unused                                       = 1161
+                  // unused                                       = 1162
+                  // unused                                       = 1163
+                  // unused                                       = 1164
+                  // unused                                       = 1165
+                  // unused                                       = 1166
+                  // unused                                       = 1167
+                  // unused                                       = 1168
+                  // unused                                       = 1169
+                  // unused                                       = 1170
+                  // unused                                       = 1171
+                  // unused                                       = 1172
+                  // unused                                       = 1173
                   , CAT_INVALID_COLUMN_DATATYPE                   = 1174
-                  , CAT_NO_IDENTITY_COLUMN_FOR_TABLE              = 1175
-                  , CAT_POS_MTS_SIZE_BIGGER_THAN_AMTS_SIZE        = 1176
-                  , CAT_UNKNOWN_TABLE_TYPE                        = 1177
-                  , CAT_INVALID_CATALOG_UID                       = 1178
-                  , CAT_INVALID_OBJECT_INFO                       = 1179
-                  //
-                  // 1150-1180  Queuing and publish/subscribe errors
-                  //
+                  // unused                                       = 1175
+                  // unused                                       = 1176
+                  // unused                                       = 1177
+                  // unused                                       = 1178
+                  // unused                                       = 1179
                   , CAT_EXTERNAL_NAME_MISMATCH                    = 1180
                   , CAT_EXTERNAL_SCHEMA_NAME_TOO_LONG             = 1181
-                  , CAT_METADATA_SQL_ERROR                        = 1183
-                  , CAT_INSUFFICIENT_PRIV_ON_COLUMN               = 1184
-                  , CAT_LOCATION_INVALID_OR_MISSING               = 1185
+                  // unused                                       = 1182
+                  // unused                                       = 1183
+                  // unused                                       = 1184
+                  // unused                                       = 1185
                   , CAT_INCOMPATIBLE_DATA_TYPE_IN_DEFAULT_CLAUSE  = 1186
                   , CAT_RESERVED_METADATA_SCHEMA_NAME             = 1187
                   , CAT_RI_CIRCULAR_DEPENDENCY                    = 1188
-                  , CAT_VIEW_NAME_VALID                           = 1189
+                  // unused                                       = 1189
 
-                  , CAT_DROP_LABEL_ERROR_FELABELBAD               = 1194
-                  , CAT_INVALID_SYSTEM_NAME                       = 1196
+                  , CAT_INVALID_NUM_OF_SALT_PARTNS                = 1196
                   , CAT_INVALID_SALTED_UNIQUE_IDX                 = 1201
                   , CAT_INVALID_SALT_LIKE_CLAUSE                  = 1202
                   , CAT_INVALID_HBASE_OPTIONS_CLAUSE              = 1203
@@ -232,96 +229,91 @@
                   , CAT_COMPONENT_NOT_SYSTEM                      = 1221
                   , CAT_AUTHORIZATION_NOT_ENABLED                 = 1222
                   , CAT_CANT_GRANT_TO_SELF_OR_ROOT                = 1223
-                  , CAT_INVALID_TYPE_FOR_PARAM                    = 1224
-                  , CAT_MIXED_PRIVILEGES                          = 1225
-                  , CAT_NO_PRIVILEGES_SPECIFIED                   = 1226
+                  // unused                                       = 1224
+                  // unused                                       = 1225
+                  // unused                                       = 1226
                   , CAT_NO_UNREG_USER_HAS_PRIVS                   = 1227
                   , CAT_ROLE_HAS_PRIVS_NO_DROP                    = 1228
                   , CAT_OPTION_NOT_SUPPORTED                      = 1229
                   , CAT_BY_CLAUSE_IN_PRIVATE_SCHEMA               = 1230
                   , CAT_UNABLE_TO_CREATE_ROUTINE                  = 1231
-                  , CAT_UNABLE_TO_SAVE_DDL                        = 1232
-                  , CAT_CREATE_SCHEMA_IN_SYSCAT_IS_PROHIBITED     = 1233
-                  , CAT_LABEL_FAILED_DUE_TO_EXTENT_OR_MAXEXTENT   = 1235
-                  , CAT_IMPROPER_SCHEMA_NAME                      = 1236
+                  // unused                                       = 1232
+                  // unused                                       = 1233
+                  // unused                                       = 1235
+                  // unused                                       = 1236
                   , CAT_NON_ISO88591_RANGE_PARTITION_COLUMN       = 1240
-                  , CAT_INIT_AUTHORIZATION_FAILED                 = 1244
-                  , CAT_FIRST_KEY_VALUE_INVALID                   = 1245
-                  , CAT_FIRST_KEY_VALUE_INCONSISTENT              = 1246
-                  , CAT_PARTITION_NAME_ALREADY_EXISTS             = 1248
-                  , CAT_DDL_OPERATION_IN_PROGRESS                 = 1250
+                  // unused                                       = 1244
+                  // unused                                       = 1245
+                  // unused                                       = 1246
+                  // unused                                       = 1248
+                  // unused                                       = 1249
+                  // unused                                       = 1250
                   , CAT_SCALE_OF_DEFAULT_VALUE_ADJUSTED           = 1251
-                  , CAT_INVALID_INDEX_DATA                        = 1252
+                  // unused                                       = 1252
                   , CAT_DUPLICATE_UNIQUE_CONSTRAINT_ON_SAME_COL   = 1254
-                  , CAT_UNABLE_TO_SET_UDR_OPTIONS                 = 1261
-                  , CAT_SCHEMA_IN_TRANSITION                      = 1262
-                  , CAT_RESERVED_UMD_PREFIX                       = 1263
+                  // unused                                       = 1261
+                  // unused                                       = 1262
+                  // unused                                       = 1263
                   , CAT_DUPLICATE_PRIVILEGES                      = 1264
-                  , CAT_DUPLICATE_GRANTEES                        = 1265
+                  // unused                                       = 1265
                   , CAT_ONLY_EXECUTE_PRIVILEGE_IS_ALLOWED_FOR_ROUTINE = 1266
                   , CAT_PRIVILEGE_NOT_ALLOWED_FOR_THIS_OBJECT_TYPE = 1267
                   , CAT_DUPLICATE_COLUMN_NAMES                    = 1268
-                  , CAT_LABEL_ALLOCATE_FAILED_DUE_TO_FS_ERROR     = 1270
-                  , CAT_LABEL_ALLOCATE_FAILED_GREATER_THAN_MAXEXTENTS = 1271
-                  , CAT_NOT_LICENSED_FOR_SQLMX_DDL                = 1272
-                  , CAT_MAXEXT_LESS_THAN_ALLOCATED_EXTENTS        = 1273
-                  , CAT_WARNING_MAXEXT_RESET                      = 1274
-                  , CAT_CANNOT_DROP_DUE_TO_UNIQUE_CONSTRAINT      = 1275
-                  , CAT_UNRECOGNIZED_PARTITIONING_SCHEME          = 1277
-                  , CAT_ALL_SCHEMAS_OP_IN_PROGRESS                = 1278
+                  , CAT_RESERVED_COLUMN_NAME                      = 1269
+                  // unused                                       = 1270
+                  // unused                                       = 1271
+                  // unused                                       = 1272
+                  // unused                                       = 1273
+                  // unused                                       = 1274
+                  // unused                                       = 1275
+                  // unused                                       = 1277
+                  // unused                                       = 1278
                   , CAT_VOLATILE_OPERATION_ON_REGULAR_OBJECT      = 1279
                   , CAT_REGULAR_OPERATION_ON_VOLATILE_OBJECT      = 1280
 		  , CAT_LOB_COLUMN_IN_VOLATILE_TABLE              = 1282
-                  , CAT_NOT_DROPPABLE_TABLE                       = 1284
-                  , CAT_NOT_DROPPABLE_SCHEMA                      = 1285
-                  , CAT_ONLY_SUPER_ID_CAN_INITIALIZE_SECURITY     = 1287
-                  , CAT_UNABLE_TO_INITIALIZE_SECURITY             = 1288
+                  // unused                                       = 1284
+                  // unused                                       = 1285
+                  // unused                                       = 1287
+                  // unused                                       = 1288
                   , CAT_CANNOT_ALTER_DEFINITION_METADATA_SCHEMA   = 1289
-                  , CAT_DELETE_FROM_TRANSLATION_TABLE_FAILED      = 1292
-                  , CAT_WRONG_SCHEMA_VERSION                      = 1293
-                  , CAT_WRONG_ISO_MAPPING                         = 1294
-                  , CAT_COLUMN_WRONG_DEFAULT_TYPE                 = 1295
-                  , CAT_COLUMN_MISMATCHED_DEFAULT_TYPES           = 1296
-                  , CAT_VOLATILE_SCHEMA_PRESENT                   = 1297
+                  // unused                                       = 1292
+                  // unused                                       = 1293
+                  // unused                                       = 1294
+                  // unused                                       = 1295
+                  // unused                                       = 1296
+                  // unused                                       = 1297
                   , CAT_UNABLE_TO_ALTER_SCHEMA                    = 1298
 
-                  // Restrict and No Action referential action Messages.
-                  , CAT_REF_CONSTRAINT_NO_ACTION_NOT_SUPPORTED    = 1301
-                  , CAT_REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT    = 1302
+                  // unused                                       = 1301
+                  // unused                                       = 1302
 
-                  // Anchor file access error
-                  , CAT_ANCHOR_FILE_ERROR                         = 1304
+                  // unused                                       = 1304
+                  // unused                                       = 1305
+                  // unused                                       = 1306
+                  // unused                                       = 1307
+                  // unused                                       = 1309
+                  // unused                                       = 1310
 
-                  // Schema subvol error/warnings
-                  , CAT_DUP_SCHEMA_SUBVOL_SPECIFIED               = 1305
-                  , CAT_DUP_SCHEMA_SUBVOL_GENERATED               = 1306
-                  , CAT_METADATA_SCHEMA_SUBVOL                    = 1307
-                  , CAT_INVALID_OBJECT_TYPE                       = 1309
-                  , CAT_TRIGGER_UNSUPPORTED_IN_COMPOUNDCREATE     = 1310
-
-                  , CAT_UNABLE_TO_XXX_CONSTRAINT_DUE_TO_ERRORS    = 1311
-                  , CAT_UNABLE_TO_XXX_DUE_TO_ERRORS               = 1312
+                  // unused                                       = 1311
+                  // unused                                       = 1312
                   , CAT_NOT_ENFORCED_RI_CONSTRAINT_WARNING        = 1313
 
-                  // Alter catalog, disable creates
-                  , CAT_CREATE_OPERATION_DISABLED                 = 1314
-                  , CAT_NO_DEFINITION_SCHEMA                      = 1315
-
-                  // Publish/unpublish errors
-                  , CAT_PUBLISH_NOT_SYSTEM_VIEW                   = 1316
-                  , CAT_PUBLISH_NO_DBA_USER_DEFINED               = 1317
-                  , CAT_PUBLISH_SYNONYM_NAME_TOO_LONG             = 1318
-                  , CAT_PUBLISH_MISMATCH_COLUMN_TABLE_PRIVS       = 1319
-                  , CAT_PUBLISH_MISMATCH_COLUMN_COLUMN_PRIVS      = 1320
-                  , CAT_PUBLISH_NO_PUBLIC_SCHEMA                  = 1321
-                  , CAT_PUBLISH_VIEW_NOT_REFERENCING_OBJECT       = 1322
+                  // unused                                       = 1314
+                  // unused                                       = 1315
+                  // unused                                       = 1316
+                  // unused                                       = 1317
+                  // unused                                       = 1318
+                  // unused                                       = 1319
+                  // unused                                       = 1320
+                  // unused                                       = 1321
+                  // unused                                       = 1322
                   , CAT_UNABLE_TO_GRANT_PRIVILEGES                = 1323
-                  , CAT_NO_SCHEMA_WGO_ALLOWED                     = 1325
-
-                  , CAT_REGULAR_OPERATION_ON_INMEMORY_OBJECT      = 1326
-                  , CAT_INMEMORY_OPERATION_ON_REGULAR_OBJECT      = 1327
+                  // unused                                       = 1324
+                  // unused                                       = 1325
+                  // unused                                       = 1326
+                  // unused                                       = 1327
                   , CAT_INVALID_PRIV_FOR_OBJECT                   = 1328
-                  , CAT_UNABLE_TO_CREATE_METADATA_VIEWS           = 1329
+                  // unused                                       = 1329
                   , CAT_ROLE_IS_GRANTED_NO_REVOKE                 = 1330
                   , CAT_LDAP_USER_NOT_FOUND                       = 1331
                   , CAT_LDAP_COMM_ERROR                           = 1332
@@ -333,89 +325,88 @@
                   , CAT_ROLE_NOT_EXIST                            = 1338
                   , CAT_IS_NOT_A_ROLE                             = 1339
                   , CAT_IS_NOT_A_USER                             = 1340
-                  , CAT_NO_UNREG_USER_OWNS_CATALOG                = 1341
-                  , CAT_NO_UNREG_USER_OWNS_SCHEMA                 = 1342
+                  // unused                                       = 1341
+                  // unused                                       = 1342
                   , CAT_NO_UNREG_USER_OWNS_OBJECT                 = 1343
-                  , CAT_NO_UNREG_USER_HAS_SCHEMA_PRIVS            = 1344
-                  , CAT_NO_UNREG_USER_HAS_TABLE_PRIVS             = 1345
-                  , CAT_NO_UNREG_USER_HAS_COLUMN_PRIVS            = 1346
+                  // unused                                       = 1344
+                  // unused                                       = 1345
+                  // unused                                       = 1346
                   , CAT_NO_UNREG_USER_OWNS_ROLES                  = 1347
                   , CAT_ROLE_IS_GRANTED_NO_DROP                   = 1348
                   , CAT_NO_UNREG_USER_GRANTED_ROLES               = 1349
-                  , CAT_ROLE_NOT_GRANTED_TO_USER                  = 1350
+                  // unused                                       = 1350
                   , CAT_DUPLICATE_ROLES_IN_LIST                   = 1351
                   , CAT_DUPLICATE_USERS_IN_LIST                   = 1352
-                  , CAT_NO_ROLE_WGO_ALLOWED                       = 1353
-                  , CAT_NO_ROLE_SCHEMA_GRANT_ALLOWED              = 1354
+                  // unused                                       = 1353
+                  // unused                                       = 1354
                   , CAT_NO_GRANT_ROLE_TO_PUBLIC_OR_SYSTEM         = 1355
                   , CAT_COMPONENT_PRIVILEGE_CODE_EXISTS           = 1356
                   , CAT_COMPONENT_PRIVILEGE_NAME_EXISTS           = 1357
-                  , CAT_COMPONENT_PRIVILEGE_NOT_FOUND             = 1358
+                  // unused                                       = 1358
                   , CAT_INVALID_PRIVILEGE_FOR_GRANT_OR_REVOKE     = 1359
-                  , CAT_DEPENDENT_COMPONENT_PRIVILEGES_EXIST      = 1360
+                  // unused                                       = 1360
                   , CAT_LIBRARY_DOES_NOT_EXIST                    = 1361
-                  , CAT_NOT_A_LIBRARY                             = 1362
-                  , CAT_LIBRARY_EXISTS                            = 1363
+                  // unused                                       = 1362
+                  // unused                                       = 1363
                   , CAT_DEPENDENT_ROLE_PRIVILEGES_EXIST           = 1364
-                  , CAT_COULDNT_LOCK_PARTICIPATING_AUTH_ID        = 1365
+                  // unused                                       = 1365
                   , CAT_DEPENDENT_ROUTINES_EXIST                  = 1366
-                  , CAT_ROUTINE_USES_LIBRARY                      = 1367
-                  , CAT_LIBRARY_MUST_BE_IN_SAME_CATALOG           = 1368
-                  , CAT_ONLY_UPDATE_OR_USAGE_PRIV_FOR_LIBRARY     = 1369
+                  // unused                                       = 1367
+                  // unused                                       = 1368
+                  // unused                                       = 1369
                   , CAT_INVALID_CHARS_IN_AUTH_NAME                = 1370
-                  , CAT_ONLY_SELECT_OR_INSERT_FOR_TABLE           = 1371
-                  , CAT_ALTER_TABLE_INSERT_ONLY_FAILED            = 1372
-                  , CAT_AUDIT_NOT_A_VALID_BOOL_VALUE              = 1373
-                  , CAT_AUDIT_NOT_A_VALID_LOG_TYPE                = 1374
-                  , CAT_AUDIT_INPUT_TOO_LARGE                     = 1375
-                  , CAT_AUDIT_INVALID_COLUMN_NUMBER               = 1376
-                  , CAT_AUDIT_ALTER_CONFIG_FAILED                 = 1377
-                  , CAT_AUDIT_REFRESH_RANGE                       = 1378
-                  , CAT_AUDIT_AGING_RANGE                         = 1379
-                  , CAT_AUDIT_THRESHOLD_RANGE                     = 1380
-                  , CAT_AUDIT_COLUMN_VALUE_COUNT_MISMATCH         = 1381
+                  // unused                                       = 1371
+                  // unused                                       = 1372
+                  // unused                                       = 1373
+                  // unused                                       = 1374
+                  // unused                                       = 1375
+                  // unused                                       = 1376
+                  // unused                                       = 1377
+                  // unused                                       = 1378
+                  // unused                                       = 1379
+                  // unused                                       = 1380
+                  // unused                                       = 1381
                   , CAT_JAR_NOT_FOUND                             = 1382
-                  , CAT_DISABLE_AUTHNAME_CHANGES                  = 1383
-                  , CAT_POS_UNEQUABLE_DISK_POOL_DEFINED           = 1384
-                  , CAT_POS_DISK_POOL_MAPPING_FAILED              = 1385
+                  // unused                                       = 1383
+                  // unused                                       = 1384
+                  // unused                                       = 1385
                   , CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION        = 1389
                   , CAT_TRAFODION_OBJECT_EXISTS                   = 1390
 
-                  , CAT_SHOWDDL_UNABLE_TO_CONVERT_COLUMN_DEFAULT_VALUE  = 1400
+                  // unused                                       = 1400
                   , CAT_UNABLE_TO_CONVERT_COLUMN_DEFAULT_VALUE_TO_CHARSET = 1401
                   , CAT_UNIQUE_INDEX_COLS_NOT_IN_DIV_EXPRS        = 1402
                   , CAT_ALTER_NOT_ALLOWED_IN_SMD                  = 1403
 
-                  // Detectable metadata inconsistencies
-                  , CAT_CATSYS_CATREF_MISMATCH                    = 1500
-                  , CAT_OBJECTS_REPLICAS_MISMATCH                 = 1502
-                  , CAT_OBJECTS_PARTITIONS_MISMATCH               = 1503
-                  , CAT_NO_OBJECTS_ROW_FOR_SMD                    = 1504
-                  , CAT_CATSYS_MISMATCH_ON_AUTOREF_NODE           = 1505
-                  , CAT_SCHEMA_MISMATCH_ON_AUTOREF_NODE           = 1506
+                  // unused                                       = 1500
+                  // unused                                       = 1502
+                  // unused                                       = 1503
+                  // unused                                       = 1504
+                  // unused                                       = 1505
+                  // unused                                       = 1506
 
                   // IDENTITY Column (Surrogate Key) related Errors.
                   , CAT_IDENTITY_COLUMN_DATATYPE_MISMATCH         = 1510
                   , CAT_IDENTITY_COLUMN_ONE_PER_TABLE             = 1511
-                  , CAT_IDENTITY_COLUMN_NOT_NULL_NOT_DROPPABLE    = 1512
-                  , CAT_IDENTITY_COLUMN_HASH_PARTITIONED_ONLY     = 1513
+                  // unused                                       = 1512
+                  // unused                                       = 1513
                   , CAT_IDENTITY_COLUMN_NO_ALTER_TABLE            = 1514
-                  , CAT_IDENTITY_COLUMN_2                         = 1515
+                  // unused                                       = 1515
 
-                  , CAT_CANNOT_DISABLE_NOT_DROPPABLE_CONSTRAINT   = 1517
-                  , CAT_CANNOT_ENABLE_CONSTRAINT                  = 1518
-                  , CAT_CANNOT_DISABLE_FK_CONSTRAINT              = 1519
-                  , CAT_SCHEMA_MISMATCH_ON_EXCEPTION_TABLE        = 1521
-                  , CAT_NOT_AN_EXCEPTION_TABLE                    = 1522
-                  , CAT_EXCEPTION_TABLE_DOES_NOT_EXIST            = 1523
-                  , CAT_NO_EXCEPTIONS_ON_METADATA                 = 1524
-                  , CAT_NO_EXCEPTIONS_ON_EXCEPTIONS               = 1525
-                  , CAT_EXCEPTION_NOT_IN_METADATA_TABLE           = 1526
-                  , CAT_NO_KEY_ON_BASE_TABLE                      = 1527
-                  , CAT_NO_EXCEPTION_TABLE_FOR_BASE_TABLE         = 1528
+                  // unused                                       = 1517
+                  // unused                                       = 1518
+                  // unused                                       = 1519
+                  // unused                                       = 1521
+                  // unused                                       = 1522
+                  // unused                                       = 1523
+                  // unused                                       = 1524
+                  // unused                                       = 1525
+                  // unused                                       = 1526
+                  // unused                                       = 1527
+                  // unused                                       = 1528
 
-                  , CAT_SG_NOT_IN_METADATA                        = 1529
-                  , CAT_LDAP_DEFAULTCONFIG_INSERT_ERROR           = 1530
+                  // unused                                       = 1529
+                  // unused                                       = 1530
 
                   , CAT_NO_POPULATE_VOLATILE_INDEX                = 1540
                   , CAT_LOB_COL_CANNOT_BE_INDEX_OR_KEY            = 1541
@@ -425,48 +416,23 @@
                   , CAT_SG_INCREMENT_BY_ZERO_ERROR                = 1571
                   , CAT_SG_NEGATIVE_ERROR                         = 1572
                   , CAT_SG_STARTWITH_MINVALUE_MAXVALUE_ERROR      = 1573
-                  , CAT_SG_CYCLE_NOT_SUPPORTED_ERROR              = 1574
+                  // unused                                       = 1574
                   , CAT_SG_INCREMENT_BY_MAXVALUE_ERROR            = 1575
                   , CAT_SG_MAXIMUM_DATATYPE_ERROR                 = 1576
 
-                  // Collation for Catlog & Schema
-                  , CAT_CATALOG_COLLATION_NOT_SUPPORTED           = 1580
-                  , CAT_SCHEMA_COLLATION_NOT_SUPPORTED            = 1581
-
                   // Sequence Generator alter table errors
                   , CAT_SG_ALTER_NOT_IDENTITY_COLUMN_ERROR        = 1590
-                  , CAT_SG_ALTER_MAXVALUE_NOT_GT_ERROR            = 1591
+                  // unused                                       = 1591
                   , CAT_SG_ALTER_UNSUPPORTED_OPTION_ERROR         = 1592
-                  , CAT_SG_ALTER_TOO_MANY_OPTIONS_ERROR           = 1593
-                  , CAT_SG_ALTER_CURRENT_VALUE_ERROR              = 1594
-                  , CAT_SG_ALTER_NO_MAXVALUE_ERROR                = 1595
-                  , CAT_SG_ALTER_RECALIBRATION_ERROR              = 1596
-                  , CAT_SG_ALTER_RECALIBRATION_MAXIMUM_ERROR      = 1597
-                  , CAT_SG_ALTER_RECALIBRATION_CURRENT_ERROR      = 1598
-                  , CAT_SG_ALTER_RECALIBRATION_SPECIFIED_ERROR    = 1599
-                  , CAT_SG_ALTER_RECALIBRATION_LOCKING_ERROR      = 1600
-                  , CAT_SG_ALTER_RECALIBRATION_NO_SELECT_ERROR    = 1601
-
-                  // UDF related errors
-                  , CAT_NOT_UUDF_OBJECT                           = 1700
-                  , CAT_TOO_MANY_PASS_THRU_INPUTS                 = 1701
-                  , CAT_ONLY_STRING_LITERAL                       = 1702
-                  , CAT_BINARY_ONLY_OPTION_WITH_UCS2              = 1703
-                  , CAT_BINARY_ONLY_OPTION_WITHIN_VALUE_FROM_FILE_CLAUSE = 1704
-                  , CAT_UNABLE_TO_OPEN_FILE                       = 1705
-                  , CAT_UNABLE_TO_READ_FILE                       = 1706
-                  , CAT_RA_ALREADY_EXISTS_UUDF                    = 1707
-                  , CAT_EXCEEDS_NUMBER_OF_OUTPUT_VALUES           = 1708
-                  , CAT_UNABLE_TO_DROP_UUDF_BEING_USED_BY_RA      = 1709
-                  , CAT_PASS_THRU_INPUT_WRONG_POSITION_SPECIFIED  = 1710
-                  , CAT_POSITION_SPECIFIED_EXCEEDS_NUMBER_OF_PASS_THRU_INPUTS = 1711
-                  , CAT_SPECIFIED_POSITION_APPEARS_MULTIPLE_TIMES = 1712
-                  , CAT_MISSING_UUDF_FUNCTION_NAME_CLAUSE         = 1713
-                  , CAT_INVALID_ROUTINE_ACTION_NAME               = 1714
-                  , CAT_UNABLE_TO_START_TRANSACTION               = 1715
-                  , CAT_SQL_STYLE_PARAMETER_EXCEEDS_LIMIT         = 1716
-                  , CAT_PASS_THRU_BINARY_INPUT_CANNOT_BE_EMPTY    = 1717
-                  , CAT_BINARY_TYPE_FILE_EMPTY                    = 1718
+                  // unused                                       = 1593
+                  // unused                                       = 1594
+                  // unused                                       = 1595
+                  // unused                                       = 1596
+                  // unused                                       = 1597
+                  // unused                                       = 1598
+                  // unused                                       = 1599
+                  // unused                                       = 1600
+                  // unused                                       = 1601
 
                   // Method validation failures
                   , CAT_CLASS_NOT_FOUND                           = 11205
diff --git a/core/sql/sqlcomp/CmpDescribe.cpp b/core/sql/sqlcomp/CmpDescribe.cpp
index 28590f4..26b9b3c 100644
--- a/core/sql/sqlcomp/CmpDescribe.cpp
+++ b/core/sql/sqlcomp/CmpDescribe.cpp
@@ -2411,7 +2411,12 @@
     }
 
   if (type == 2)
-    outputShortLine(space, ";");
+    {
+      outputShortLine(space, ";");
+
+      outputShortLine(space," ");
+      outputShortLine(space,"/* Trafodion DDL */");
+    }
 
   // if this hive table is registered in traf metadata, show that.
   if ((type == 2) &&
@@ -2455,9 +2460,6 @@
       QualifiedName qn(extName, 3);
       CorrName cn(qn);
 
-      outputShortLine(space," ");
-      outputShortLine(space,"/* Trafodion DDL */");
- 
       short rc = CmpDescribeSeabaseTable(cn, 
                                          type,
                                          dummyBuf, dummyLen, heap, 
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp b/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
index 2490e59..9bad515 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
@@ -2973,7 +2973,7 @@
 
     default:
       {
-        *CmpCommon::diags() << DgSqlCode(-1174);
+        *CmpCommon::diags() << DgSqlCode(-CAT_INVALID_COLUMN_DATATYPE);
         
         return -1; 
       }
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp b/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
index 124ec1b..de6f778 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
@@ -2011,7 +2011,7 @@
       if ((CmpCommon::getDefault(TRAF_ALLOW_RESERVED_COLNAMES) == DF_OFF) &&
           (ComTrafReservedColName(colArray[i]->getColumnName())))
         {
-          *CmpCommon::diags() << DgSqlCode(-1269)
+          *CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_COLUMN_NAME)
                               << DgString0(colArray[i]->getColumnName());
           
           deallocEHI(ehi);
@@ -2181,7 +2181,7 @@
       if (numSaltPartns <= 1 || numSaltPartns > 1024)
         {
           // number of salt partitions is out of bounds
-          *CmpCommon::diags() << DgSqlCode(-1196) 
+          *CmpCommon::diags() << DgSqlCode(-CAT_INVALID_NUM_OF_SALT_PARTNS) 
                               << DgInt0(2)
                               << DgInt1(1024);
           deallocEHI(ehi); 
@@ -5491,7 +5491,7 @@
   if ((CmpCommon::getDefault(TRAF_ALLOW_RESERVED_COLNAMES) == DF_OFF) &&
       (ComTrafReservedColName(colName)))
     {
-      *CmpCommon::diags() << DgSqlCode(-1269)
+      *CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_COLUMN_NAME)
                           << DgString0(colName);
       
       deallocEHI(ehi);
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLview.cpp b/core/sql/sqlcomp/CmpSeabaseDDLview.cpp
index b0e04ee..62a2c6b 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLview.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLview.cpp
@@ -1060,11 +1060,7 @@
   NADELETEBASIC(query, STMTHEAP);
   if (cliRC < 0)
     {
-      if (cliRC == -8402)
-        // string overflow, view text does not fit into metadata table
-        *CmpCommon::diags() << DgSqlCode(-1198);
-      else
-        cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
+      cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
 
       deallocEHI(ehi); 
       processReturn();
diff --git a/core/sql/sqlcomp/DefaultConstants.h b/core/sql/sqlcomp/DefaultConstants.h
index 3f837c8..b7e4d0d 100644
--- a/core/sql/sqlcomp/DefaultConstants.h
+++ b/core/sql/sqlcomp/DefaultConstants.h
@@ -3304,7 +3304,8 @@
   MEMORY_LIMIT_ROWSET_IN_MB,
 
   SUPPRESS_CHAR_LIMIT_CHECK,
-
+ 
+  BMO_MEMORY_ESTIMATE_OUTLIER_FACTOR,
   // This enum constant must be the LAST one in the list; it's a count,
   // not an Attribute (it's not IN DefaultDefaults; it's the SIZE of it)!
   __NUM_DEFAULT_ATTRIBUTES
diff --git a/core/sql/sqlcomp/QCache.cpp b/core/sql/sqlcomp/QCache.cpp
index cf958cd..81c9869 100644
--- a/core/sql/sqlcomp/QCache.cpp
+++ b/core/sql/sqlcomp/QCache.cpp
@@ -1912,7 +1912,6 @@
   nOfCacheHits_[A_PREPARSE] =
   nOfCacheHits_[A_PARSE] =
   nOfCacheHits_[A_BIND] = 0;  
-  heap_->setJmpBuf(CmpInternalErrorJmpBufPtr);
   heap_->setErrorCallback(&CmpErrLog::CmpErrLogCallback);
 
 #ifdef DBG_QCACHE
diff --git a/core/sql/sqlcomp/nadefaults.cpp b/core/sql/sqlcomp/nadefaults.cpp
index f672de1..a75c20b 100644
--- a/core/sql/sqlcomp/nadefaults.cpp
+++ b/core/sql/sqlcomp/nadefaults.cpp
@@ -461,6 +461,7 @@
   DDflte_(BMO_CITIZENSHIP_FACTOR,             "1."),
 
   DDflte_(BMO_MEMORY_EQUAL_QUOTA_SHARE_RATIO,        "0.5"),
+  DDui___(BMO_MEMORY_ESTIMATE_OUTLIER_FACTOR,        "10"),
   DDflte_(BMO_MEMORY_ESTIMATE_RATIO_CAP,             "0.7"),
   DDui___(BMO_MEMORY_LIMIT_LOWER_BOUND_HASHGROUPBY , "25"),
   DDui___(BMO_MEMORY_LIMIT_LOWER_BOUND_HASHJOIN,     "25"),
@@ -3080,9 +3081,8 @@
   DDui___(USTAT_HBASE_SAMPLE_RETURN_INTERVAL,   "10000000"), // Avoid scanner timeout by including on average at
                                                              //   least one row per this many when sampling within HBase.
   DDflt0_(USTAT_INCREMENTAL_FALSE_PROBABILITY,   "0.01"),
-  DDkwd__(USTAT_INCREMENTAL_UPDATE_STATISTICS,   "ON"),
-                                                         // to the non-audited sample table must be done on same
-                                                         // process it was created on.  This CQD is NOT externalized.
+  DDkwd__(USTAT_INCREMENTAL_UPDATE_STATISTICS,   "SAMPLE"), // "SAMPLE" ==> don't use Counting Bloom Filters
+
   DDkwd__(USTAT_INTERNAL_SORT,                  "HYBRID"),
 
   DDkwd__(USTAT_IS_IGNORE_UEC_FOR_MC,           "OFF"),   // if MCIS is ON, use IS to compute SC stats
diff --git a/core/sql/sqlcomp/parser.cpp b/core/sql/sqlcomp/parser.cpp
index 1c84c84..0112901 100644
--- a/core/sql/sqlcomp/parser.cpp
+++ b/core/sql/sqlcomp/parser.cpp
@@ -132,7 +132,6 @@
                         NAMemory::DERIVED_FROM_SYS_HEAP,
                         524288,
                         memLimit);
-    wHeap_->setJmpBuf(CmpInternalErrorJmpBufPtr);
     wHeap_->setErrorCallback(&CmpErrLog::CmpErrLogCallback);
   }
 
diff --git a/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java b/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
index 3cdd747..0c6936b 100644
--- a/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
+++ b/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
@@ -26,8 +26,8 @@
  * Invocation (all arguments are strings):
  *
  * select ... from udf(JDBC(
- *    <name of JDBC driver jar>, // Not really needed if the jar is stored in
- *                               // $TRAF_HOME/udr/public/external_libs
+ *    <name of JDBC driver jar>, // file name of the JDBC driver jar, stored
+ *                               // in $TRAF_HOME/udr/public/external_libs
  *    <name of JDBC driver class in the jar>,
  *    <connection string>,
  *    <user name>,
@@ -98,7 +98,7 @@
             Path driverJarPath = Paths.get(driverJar_);
 
             // for security reasons, we sandbox the allowed driver jars
-            // into $TRAF_HOME/export/lib/udr/external_libs
+            // into $TRAF_HOME/udr/public/external_libs
             driverJarPath = driverJarPath.normalize();
             if (driverJarPath.isAbsolute())
               {
@@ -107,7 +107,7 @@
                   throw new UDRException(
                     38010,
                     "The jar name of the JDBC driver must be a name relative to %s, got %s",
-                    System.getenv("TRAF_HOME")+"/udr/external_libs",
+                    LmUtility.getSandboxRootForUser(null).toString(),
                     driverJar_);
               }
             else
@@ -141,7 +141,7 @@
                 38020,
                 "JDBC driver class %s not found. Please make sure the JDBC driver jar is stored in %s. Message: %s",
                 driverClassName_,
-                System.getenv("TRAF_HOME") + "/udr/public/external_libs",
+                LmUtility.getSandboxRootForUser(null).toString(),
                 cnf.getMessage());
           }
           catch (SQLException se) {
diff --git a/core/sql/udrserv/UdrResultSet.cpp b/core/sql/udrserv/UdrResultSet.cpp
index 2c3388c..78beebc 100644
--- a/core/sql/udrserv/UdrResultSet.cpp
+++ b/core/sql/udrserv/UdrResultSet.cpp
@@ -545,7 +545,7 @@
   Int32 result = SQL_EXEC_SwitchContext((Lng32) getContextHandle(),
                                       &tmpCtx);
 
-  if (result != 0)
+  if (result < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_SwitchContext")
@@ -567,7 +567,7 @@
   SQLCTX_HANDLE tmpCtxHandle;
   Int32 result = SQL_EXEC_SwitchContext(ctxHandle, &tmpCtxHandle);
 
-  if (result != 0)
+  if (result < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_SwitchContext")
@@ -634,7 +634,7 @@
   outmodule->module_name =0;
 
   retcode = SQL_EXEC_AllocDesc(output_desc_, 500);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_AllocDesc")
@@ -645,7 +645,7 @@
 
   // Describe Statement.
   retcode = SQL_EXEC_DescribeStmt(stmt_id_, NULL, output_desc_);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_DescribeStmt")
@@ -657,7 +657,7 @@
   // Check how many columns there are in the output_desc
   ComUInt32 numColumns = 0;
   retcode = SQL_EXEC_GetDescEntryCount(output_desc_, (Lng32*) &numColumns);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_GetDescEntryCount")
@@ -751,7 +751,7 @@
   retcode = SQL_EXEC_GetDescItems2(output_desc_,
                                    (Lng32) numColumns * NUMDESC_ITEMS,
                                    desc_items);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_GetDescItems2")
@@ -1252,7 +1252,7 @@
                                                1,
                                                (Lng32) numColumns_,
                                                quad_fields_);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_SETROWSETDESCPOINTERS")
@@ -1286,7 +1286,7 @@
   if (stmt_id_->tag != 0)
   {
     retcode = SQL_EXEC_DisassocFileNumber(stmt_id_);
-    if (retcode != 0)
+    if (retcode < 0)
     {
       mainDiags << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
                 << DgString0("SQL_EXEC_DisassocFileNumber")
@@ -1321,7 +1321,7 @@
          ! tmpBuffer_->moreRowsToCopy())
   {
     retcode = SQL_EXEC_ClearDiagnostics(stmt_id_);
-    if (retcode != 0)
+    if (retcode < 0)
     {
       mainDiags << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
                 << DgString0("SQL_EXEC_ClearDiagnostics")
@@ -1416,7 +1416,7 @@
     }
 
     retcode = SQL_EXEC_ClearDiagnostics(stmt_id_);
-    if (retcode != 0)
+    if (retcode < 0)
     {
       mainDiags << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
                 << DgString0("SQL_EXEC_ClearDiagnostics")
@@ -1431,7 +1431,7 @@
     retcode = SQL_EXEC_GetDescItem(output_desc_, 1,
                                    SQLDESC_ROWSET_NUM_PROCESSED,
                                    &numRowsFetched, 0, 0, 0, 0);
-    if (retcode != 0)
+    if (retcode < 0)
     {
       mainDiags << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
                 << DgString0("SQL_EXEC_GetDescItem")
diff --git a/core/sql/udrserv/spinfo.cpp b/core/sql/udrserv/spinfo.cpp
index 224592e..5de87c5 100644
--- a/core/sql/udrserv/spinfo.cpp
+++ b/core/sql/udrserv/spinfo.cpp
@@ -893,7 +893,7 @@
   Lng32 retcode = 0;
 
   retcode = SQL_EXEC_ClearDiagnostics(NULL);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_ClearDiagnostics")
@@ -915,7 +915,7 @@
   stmt->handle = 0;
 
   retcode = SQL_EXEC_AllocStmt(stmt, 0);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_AllocStmt")
@@ -938,7 +938,7 @@
   sqlsrc_desc.identifier = 0;
   sqlsrc_desc.handle = 0;
   retcode = SQL_EXEC_AllocDesc(&sqlsrc_desc, 1);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_AllocDesc")
@@ -966,7 +966,7 @@
   desc_items[2].num_val_or_len = (Lng32) strlen(sql_str) + 1;
 
   retcode = SQL_EXEC_SetDescItems2(&sqlsrc_desc, 3, desc_items);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     d << DgSqlCode(-UDR_ERR_INTERNAL_CLI_ERROR)
       << DgString0("SQL_EXEC_SetDescItem2")
@@ -982,7 +982,7 @@
 
   // Prepare the statement; stmt has the prepared plan
   retcode = SQL_EXEC_Prepare(stmt, &sqlsrc_desc);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     SQL_EXEC_MergeDiagnostics_Internal(d);
 
@@ -1000,7 +1000,7 @@
 
   // Execute the statement
   retcode = SQL_EXEC_ExecClose(stmt, 0, 0, 0);
-  if (retcode != 0)
+  if (retcode < 0)
   {
     SQL_EXEC_MergeDiagnostics_Internal(d);
 
@@ -1285,7 +1285,7 @@
                     "Message carried a transaction. About to quiesce.");
       
       Lng32 sqlcode = SQL_EXEC_Xact(SQLTRANS_QUIESCE, NULL);
-      if (sqlcode != 0)
+      if (sqlcode < 0)
       {
         char msg[MAXERRTEXT];
         str_sprintf(msg, "SQL_EXEC_Xact returned error %d", (Int32) sqlcode);
diff --git a/core/sql/udrserv/udrserv.cpp b/core/sql/udrserv/udrserv.cpp
index a118b1c..9f0b18e 100644
--- a/core/sql/udrserv/udrserv.cpp
+++ b/core/sql/udrserv/udrserv.cpp
@@ -425,11 +425,6 @@
                                256 * 1024 // 256K block size
                                );
 
-  udrHeap->setJmpBuf(&UdrHeapLongJmpTgt);
-  Int32 udrJmpRc = setjmp(UdrHeapLongJmpTgt);
-  if (udrJmpRc)
-     UDR_ABORT("udrHeap allocation failed.");
-
   NAHeap *ipcHeap = new NAHeap("UDR IPC Heap",
                                NAMemory::DERIVED_FROM_SYS_HEAP,
                                256 * 1024 // 256K block size
@@ -440,11 +435,6 @@
     HeapLogRoot::control(LOG_START);
 #endif
 
-  ipcHeap->setJmpBuf(&IpcHeapLongJmpTgt);
-  Int32 ipcJmpRc = setjmp(IpcHeapLongJmpTgt);
-  if (ipcJmpRc)
-     UDR_ABORT("ipcHeap allocation failed.");
-
   UDR_GLOBALS = new (udrHeap) UdrGlobals(udrHeap, ipcHeap);
 
   // Move environment settings into the global LmJavaOptions object
@@ -2105,7 +2095,7 @@
     if (txRequired && result == LM_OK)
     {
       cliResult = SQL_EXEC_Xact(SQLTRANS_QUIESCE, NULL);
-      if (cliResult != 0)
+      if (cliResult < 0)
       {
         fprintf(f, "%s SQL_EXEC_Xact() returned %d\n",
                 prefix, cliResult);
diff --git a/core/sql/ustat/hs_globals.cpp b/core/sql/ustat/hs_globals.cpp
index 5fc1ce5..bb764b2 100644
--- a/core/sql/ustat/hs_globals.cpp
+++ b/core/sql/ustat/hs_globals.cpp
@@ -7093,16 +7093,20 @@
   }
 
   rowsAffected = 0;
+
+  // The most likely error on the DELETE would be due to a bad WHERE clause.
+  // (When CQD USTAT_INCREMENTAL_UPDATE_STATISTICS is set to 'SAMPLE', this is
+  // the first place that we attempt to use the user's WHERE clause.)
   if (transactional)
     {
-      retcode = HSFuncExecTransactionalQueryWithRetry(deleteQuery, -UERR_INTERNAL_ERROR,
+      retcode = HSFuncExecTransactionalQueryWithRetry(deleteQuery, -UERR_IUS_BAD_WHERE_CLAUSE,
                             &rowsAffected,
                             "IUS delete from PS where",
                             NULL, NULL);
     }
   else
     {
-      retcode = HSFuncExecQuery(deleteQuery, -UERR_INTERNAL_ERROR,
+      retcode = HSFuncExecQuery(deleteQuery, -UERR_IUS_BAD_WHERE_CLAUSE,
                             &rowsAffected,
                             "IUS delete from PS where",
                             NULL, NULL);
diff --git a/core/trafodion.spec b/core/trafodion.spec
index 34ceed2..a8db7fa 100644
--- a/core/trafodion.spec
+++ b/core/trafodion.spec
@@ -29,7 +29,7 @@
 Source0:        %{name}-%{version}.tar.gz
 BuildArch:	%{_arch}
 BuildRoot:	%{_tmppath}/%{name}-%{version}-%{release}
-URL:            http://trafodion.incubator.apache.org
+URL:            http://trafodion.apache.org
 
 
 %define _binary_filedigest_algorithm 1
diff --git a/dcs/src/main/java/org/trafodion/dcs/master/ServerManager.java b/dcs/src/main/java/org/trafodion/dcs/master/ServerManager.java
index 8594c36..522ee2d 100644
--- a/dcs/src/main/java/org/trafodion/dcs/master/ServerManager.java
+++ b/dcs/src/main/java/org/trafodion/dcs/master/ServerManager.java
@@ -23,14 +23,12 @@
 package org.trafodion.dcs.master;
 
 import java.net.InetAddress;
-
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.io.BufferedReader;
 import java.io.FileReader;
 import java.io.FileNotFoundException;
-
 import java.util.Scanner;
 import java.util.Collections;
 import java.util.Iterator;
@@ -47,17 +45,13 @@
 import java.util.Comparator;
 import java.util.Map;
 import java.util.HashMap;
-
 import java.text.DateFormat;
 
 import org.apache.zookeeper.*;
 import org.apache.zookeeper.data.Stat;
-
 import org.apache.hadoop.conf.Configuration;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
 import org.trafodion.dcs.master.RunningServer;
 import org.trafodion.dcs.master.RegisteredServer;
 import org.trafodion.dcs.master.Metrics;
@@ -66,7 +60,6 @@
 import org.trafodion.dcs.Constants;
 import org.trafodion.dcs.zookeeper.ZkClient;
 import org.trafodion.dcs.util.*;
-
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -176,8 +169,8 @@
                 // But, if we are DcsMaster follower that is taking over from
                 // failed one then ignore timestamp issues described above.
                 // See MasterLeaderElection.elect()
-                if ((master.isFollower() == false)
-                        && (serverStartTimestamp > startupTimestamp)) {
+                if ((master.isFollower() == false && serverStartTimestamp > startupTimestamp)
+                        || (master.isFollower() && runningServers.size() < configuredServers.size())) {
                     scriptContext.setHostName(hostName);
                     scriptContext
                             .setScriptName(Constants.SYS_SHELL_SCRIPT_NAME);
@@ -240,15 +233,17 @@
                         }
                     }
                 } else {
-                    if (LOG.isDebugEnabled())
-                        LOG.debug("No restart for "
-                                + znodePath
-                                + "\nbecause DcsServer start time ["
-                                + DateFormat.getDateTimeInstance().format(
-                                        new Date(serverStartTimestamp))
-                                + "] was before DcsMaster start time ["
-                                + DateFormat.getDateTimeInstance().format(
-                                        new Date(startupTimestamp)) + "]");
+                    StringBuffer sb = new StringBuffer();
+                    sb.append("No restart for ").append(znodePath).append(System.getProperty("line.separator"));
+                    sb.append("DCS Master isFollower [").append(master.isFollower()).append("], ");
+                    sb.append("DCS Master start time [")
+                            .append(DateFormat.getDateTimeInstance().format(new Date(startupTimestamp))).append("], ");
+                    sb.append("DCS Server start time [")
+                            .append(DateFormat.getDateTimeInstance().format(new Date(serverStartTimestamp))).append("], ");
+                    sb.append("running DCS Server num is [").append(runningServers.size())
+                            .append("], registered DCS Server num is [").append(registeredServers.size()).append("].");
+
+                    LOG.info(sb.toString());
                 }
             } catch (Exception e) {
                 e.printStackTrace();
@@ -314,6 +309,7 @@
             getServersFile();
             createServersPortMap();
             getZkRunning();
+            getUnwathedServers();
             getZkRegistered();
 
             while (true) {
@@ -506,6 +502,56 @@
         }
     }
 
+    private void getUnwathedServers() {
+        // In some situation, if DCS Server does not have znode info in zookeeper
+        // when DCS Master is starting, then server will never be watched by zookeeper,
+        // and if it downs, it will never be restarted.
+
+        // configuredServers
+        // hostName + ":" + lineNum + ":" + serverCount
+        // runningServers
+        // hostName + ":" + instance + ":" + infoPort + ":" + serverStartTimestamp
+        // eg : gy26.esgyncn.local:3:24413:1515056285028
+        // RestartHandler need to know hostName, instanceNum(lineNum), serverStartTimestamp(for if condition)
+        if (runningServers.size() == configuredServers.size()) {
+            if (LOG.isDebugEnabled()) {
+                LOG.debug("all dcs servers have started, no need to add watchers");
+            }
+            return;
+        }
+
+        boolean found = false;
+        for (String configured : configuredServers) {
+            Scanner configuredScn = new Scanner(configured);
+            configuredScn.useDelimiter(":");
+            String hostName = configuredScn.next();
+            int instance = Integer.parseInt(configuredScn.next());
+            int serverCount = Integer.parseInt(configuredScn.next());
+            configuredScn.close();
+            for (String running : runningServers) {
+                Scanner runningScn = new Scanner(running);
+                runningScn.useDelimiter(":");
+                String runningHostName = runningScn.next();
+
+                runningScn.close();
+                if (runningHostName.equals(hostName)) {
+                    found = true;
+                    break;
+                }
+            }
+            if (found) {
+                found = false;
+                continue;
+            } else {
+                LOG.error("DcsServer [" + hostName + ":" + instance + "] does not started when starting DcsMaster [" + master.getServerName() + "] add to restart queue.");
+                // add to the restart handler
+                String simulatePath = hostName + ":" + instance + ":0:" + System.currentTimeMillis();
+                RestartHandler handler = new RestartHandler(simulatePath, serverCount);
+                restartQueue.add(handler);
+            }
+        }
+    }
+
     private synchronized void restartServer(String znodePath) throws Exception {
         String child = znodePath.replace(parentZnode
                 + Constants.DEFAULT_ZOOKEEPER_ZNODE_SERVERS_RUNNING + "/", "");
diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/PropTest.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/PropTest.java
index a4a269e..e802949 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/PropTest.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/PropTest.java
@@ -61,8 +61,16 @@
             System.out.println("Catalog : " + conn.getCatalog());
             assertEquals("Catalog should be the same as the properties file defined",Utils.catalog, conn.getCatalog());
             System.out.println("testDefaultPropertiesConnection : PASS");
+            conn.close();
         }
         catch (Exception e) {
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException e1) {
+                    e1.printStackTrace();
+                }
+            }
         }
     }
 }
diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
index 1cab0b0..2a4163b 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
@@ -87,7 +87,7 @@
 		IndexInfo[] expIndexInfo = {

 				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C1", 0, 3, (short)0, (String)null),

 				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C2", 0, 3, (short)0, (String)null),

-				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)

+				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, true, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)

 		};

 		

 		try {

diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestNetworkTimeout.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestNetworkTimeout.java
index 14f9f44..e8f6cb1 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestNetworkTimeout.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestNetworkTimeout.java
@@ -55,7 +55,15 @@
 			es.shutdown();
 			int result = conn.getNetworkTimeout();
 			assertTrue("this is networkTimeout", result == 100);
+			conn.close();
 		} catch (SQLException e) {
+		    	if (conn != null) {
+				try {
+					conn.close();
+				} catch (SQLException e1) {
+					e1.printStackTrace();
+				}
+			}
 			e.printStackTrace();
 		}
 	}
@@ -67,7 +75,16 @@
 			conn = Utils.getUserConnection();
 			int result = conn.getNetworkTimeout();
 			assertTrue("this is networkTimeout", result == 0);
+			conn.close();
 		} catch (SQLException e) {
+			if (conn != null) {
+				try {
+					conn.close();
+				} catch (SQLException e1) {
+					e1.printStackTrace();
+				}
+			}
+
 			e.printStackTrace();
 		}
 	}
diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestWrap.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestWrap.java
index 4716f97..fe10e1b 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestWrap.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestWrap.java
@@ -51,7 +51,15 @@
             conn = Utils.getUserConnection();
             boolean result = conn.isWrapperFor(Connection.class);
             assertTrue("It is wrapper for this interface", result);
+            conn.close();
         } catch (SQLException e) {
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException e1) {
+                    e1.printStackTrace();
+                }
+            }
             e.printStackTrace();
         }
     }
@@ -64,6 +72,13 @@
             conn = Utils.getUserConnection();
             conn.close();
         } catch (SQLException e) {
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException e1) {
+                    e1.printStackTrace();
+                }
+            }
             e.printStackTrace();
         }
         conn.isWrapperFor(Connection.class) ;
@@ -79,7 +94,15 @@
             assertTrue("It is unwrape for this interface", result);
             result = conn.unwrap(Connection.class) instanceof TestWrap;
             assertTrue("It is unwrape for this interface", !result);
+            conn.close();
         } catch (SQLException e) {
+            if (conn != null) {
+                try {
+                    conn.close();
+                } catch (SQLException e1) {
+                    e1.printStackTrace();
+                }
+            }
             e.printStackTrace();
         }
     }
diff --git a/doap.rdf b/doap.rdf
index a5383e9..a65e102 100644
--- a/doap.rdf
+++ b/doap.rdf
@@ -21,23 +21,23 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 -->
-  <Project rdf:about="http://trafodion.incubator.apache.org">
+  <Project rdf:about="http://trafodion.apache.org">
     <created>2017-12-07</created>
     <license rdf:resource="http://spdx.org/licenses/Apache-2.0" />
     <name>Apache Trafodion</name>
-    <homepage rdf:resource="http://trafodion.incubator.apache.org" />
-    <asfext:pmc rdf:resource="http://incubator.apache.org" />
+    <homepage rdf:resource="http://trafodion.apache.org" />
+    <asfext:pmc rdf:resource="http://trafodion.apache.org" />
     <shortdesc>Webscale SQL-on-Hadoop solution enabling transactional or operational workloads on Apache Hadoop.</shortdesc>
     <description>Trafodion builds on the scalability, elasticity, and flexibility of Hadoop. Trafodion extends Hadoop to provide guaranteed transactional integrity, enabling new kinds of big data applications to run on Hadoop.</description>
     <bug-database rdf:resource="http://issues.apache.org/jira/browse/TRAFODION" />
-    <mailing-list rdf:resource="http://trafodion.incubator.apache.org/mail-lists.html" />
-    <download-page rdf:resource="http://trafodion.incubator.apache.org/download.html" />
+    <mailing-list rdf:resource="http://trafodion.apache.org/mail-lists.html" />
+    <download-page rdf:resource="http://trafodion.apache.org/download.html" />
     <programming-language>C++</programming-language>
     <category rdf:resource="http://projects.apache.org/category/big-data" />
     <repository>
       <GitRepository>
-        <location rdf:resource="http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git"/>
-        <browse rdf:resource="https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git"/>
+        <location rdf:resource="http://git-wip-us.apache.org/repos/asf/trafodion.git"/>
+        <browse rdf:resource="https://git-wip-us.apache.org/repos/asf?p=trafodion.git"/>
       </GitRepository>
     </repository>
     <maintainer>
diff --git a/docs/client_install/pom.xml b/docs/client_install/pom.xml
index 7d6d122..48b4660 100644
--- a/docs/client_install/pom.xml
+++ b/docs/client_install/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>

   <name>Trafodion Client Installation Guide</name>

   <description>This guide describes how to install different Trafodion client applications.</description>

-  <url>http://trafodion.incubator.apache.org</url>

+  <url>http://trafodion.apache.org</url>

   <inceptionYear>2015</inceptionYear>

 

   <parent>

@@ -61,9 +61,9 @@
   </issueManagement>

 

   <scm>

-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>

-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>

-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>

+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>

+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>

+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>

     <tag>HEAD</tag>

   </scm>

 

@@ -187,9 +187,9 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

-                <download-url>http://trafodion.incubator.apache.org/download.html</download-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

+                <download-url>http://trafodion.apache.org/download.html</download-url>

                 <build-date>${maven.build.timestamp}</build-date>

                 <google-analytics-account>UA-72491210-1</google-analytics-account>

               </attributes>

@@ -217,9 +217,9 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

-                <download-url>http://http://trafodion.incubator.apache.org/download.html</download-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

+                <download-url>http://http://trafodion.apache.org/download.html</download-url>

                 <build-date>${maven.build.timestamp}</build-date>

               </attributes>

             </configuration>

@@ -289,9 +289,9 @@
 

   <distributionManagement>

     <site>

-      <id>trafodion.incubator.apache.org</id>

-      <name>Trafodion Website at incubator.apache.org</name>

-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see

+      <id>trafodion.apache.org</id>

+      <name>Trafodion Website at apache.org</name>

+      <!-- On why this is the tmp dir and not trafodion.apache.org, see

       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866

       -->

       <url>file:///tmp</url>

diff --git a/docs/client_install/src/asciidoc/_chapters/introduction.adoc b/docs/client_install/src/asciidoc/_chapters/introduction.adoc
index 4b9172e..4a3c654 100644
--- a/docs/client_install/src/asciidoc/_chapters/introduction.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/introduction.adoc
@@ -38,7 +38,7 @@
 *NOTE:* The {project-name} Command Interface (trafci), DBVisualizer, and SQuirreL SQL Client require this driver to be installed on the client

 workstation.

 | *{project-name} Command Interface (trafci)* | A command-line interface that allows you to connect to a {project-name} database and run SQL statements and other commands interactively or from

-script files. For more information, see the http://trafodion.incubator.apache.org/docs/command_interface/index.html[{project-name} Command Interface Guide].

+script files. For more information, see the http://trafodion.apache.org/docs/command_interface/index.html[{project-name} Command Interface Guide].

 | *DBVisualizer* | A third-party database tool that allows you to connect to a {project-name} database. For more information, see the http://www.dbvis.com/[DbVisualizer website].

 | *SQuirreL SQL Client* | A third-party database tool that allows you to connect to a {project-name} database. For more information, see the 

 http://squirrel-sql.sourceforge.net/[SQuirreL SQL Client website].

diff --git a/docs/client_install/src/asciidoc/_chapters/jdbct4.adoc b/docs/client_install/src/asciidoc/_chapters/jdbct4.adoc
index 43dbf73..3b73d14 100644
--- a/docs/client_install/src/asciidoc/_chapters/jdbct4.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/jdbct4.adoc
@@ -92,7 +92,7 @@
 `<port-number>` is the location where the 

 {project-name} Database Connectivity Service (DCS) is running. (Default: *23400*).

 

-See the http://trafodion.incubator.apache.org/docs/dcs_reference/index.html[{project-name} Database Connectivity Services Reference Guide]

+See the http://trafodion.apache.org/docs/dcs_reference/index.html[{project-name} Database Connectivity Services Reference Guide]

 for information about how to configure the DCS port.

 

 *Example*

diff --git a/docs/client_install/src/asciidoc/_chapters/preparation.adoc b/docs/client_install/src/asciidoc/_chapters/preparation.adoc
index 90b9ebb..9b635b3 100644
--- a/docs/client_install/src/asciidoc/_chapters/preparation.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/preparation.adoc
@@ -103,7 +103,6 @@
 [cols="30%,70%", options="header"]

 |===

 | File                               | Usage

-| `DISCLAIMER`                       | {project-name} Apache incubation disclaimer.

 | `JDBCT4.zip`                       | {project-name} JDBC Type 4 Driver.

 | `LICENCE`                          | Apache license.

 | `NOTICE`                           | Apache notice.

@@ -137,7 +136,7 @@
 . Verify content of the `clients` directory:

 +

 ```

-DISCLAIMER JDBCT4.zip LICENSE NOTICE odb64_linux.tar.gz trafci.zip TRAF_ODBC_Linux_Driver_64.tar.gz

+JDBCT4.zip LICENSE NOTICE odb64_linux.tar.gz trafci.zip TRAF_ODBC_Linux_Driver_64.tar.gz

 ```

 +

 You use these files to install the different {project-name} clients.

@@ -166,10 +165,10 @@
 $ mkdir $HOME/trafodion

 $ cd $HOME/trafodion

 $ wget <link to package>

-$ tar -xzvf apache-trafodion_clients-*-incubating.tar.gz

+$ tar -xzvf apache-trafodion_clients-*.tar.gz

 $ cd clients

 $ ls

-DISCLAIMER  LICENSE  odb64_linux.tar.gz  TRAF_ODBC_Linux_Driver_64.tar.gz

+LICENSE  odb64_linux.tar.gz  TRAF_ODBC_Linux_Driver_64.tar.gz

 JDBCT4.zip  NOTICE   trafci.zip

 $

 ```

@@ -220,7 +219,7 @@
 .

 $ cd ..

 $ ls

-apache-trafodion_clients-2.0.1-incubating.tar.gz  clients  jdbct4  trafci

+apache-trafodion_clients-2.2.0.tar.gz  clients  jdbct4  trafci

 $

 ```

 

@@ -258,7 +257,7 @@
 .

 $ cd ..

 $ ls

-apache-trafodion_clients-2.0.1-incubating.tar.gz  clients  odb  odbc

+apache-trafodion_clients-2.2.0.tar.gz  clients  odb  odbc

 ```

 

 

diff --git a/docs/client_install/src/asciidoc/_chapters/sample_prog.adoc b/docs/client_install/src/asciidoc/_chapters/sample_prog.adoc
index 2562319..ab22ca8 100644
--- a/docs/client_install/src/asciidoc/_chapters/sample_prog.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/sample_prog.adoc
@@ -34,7 +34,7 @@
 

 === `basicsql.cpp` Source Code

 You can download the `basicsql.cpp` example from

-http://trafodion.incubator.apache.org/docs/client_install/resources/source/basicsql.cpp.

+http://trafodion.apache.org/docs/client_install/resources/source/basicsql.cpp.

 

 Alternatively, copy and paste the following code into a file named `basicsql.cpp`:

 

@@ -52,7 +52,7 @@
 

 ==== `build.bat` (Build Script)

 You can download the `build.bat` example from

-http://trafodion.incubator.apache.org/docs/client_install/resources/source/build.bat.

+http://trafodion.apache.org/docs/client_install/resources/source/build.bat.

 

 Alternatively, copy and paste the following code into a file named `build.bat`, which is used to build the sample program on Windows:

 

@@ -64,7 +64,7 @@
 

 ==== Run `run.bat`

 You can download the `run.bat` example from

-http://trafodion.incubator.apache.org/docs/client_install/resources/source/run.bat.

+http://trafodion.apache.org/docs/client_install/resources/source/run.bat.

 

 Alternatively, copy and paste the following code into a file named `run.bat`, which is used to run the sample program on Windows:

 

diff --git a/docs/command_interface/pom.xml b/docs/command_interface/pom.xml
index 064ac1d..546216d 100644
--- a/docs/command_interface/pom.xml
+++ b/docs/command_interface/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion Command Interface Guide</name>
   <description>This guide describes how to use the Trafodion Command Interface (TrafCI).</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -61,9 +61,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -187,8 +187,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -209,8 +209,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -287,9 +287,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/cqd_reference/pom.xml b/docs/cqd_reference/pom.xml
index e84a495..609f657 100644
--- a/docs/cqd_reference/pom.xml
+++ b/docs/cqd_reference/pom.xml
@@ -32,7 +32,7 @@
   <description>
      This guide describes Trafodion Control Query Defaults (CQDs) that are used to override system-level default settings. 
   </description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -63,9 +63,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -189,8 +189,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -211,8 +211,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -289,9 +289,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/jdbct4ref_guide/pom.xml b/docs/jdbct4ref_guide/pom.xml
index b5fdcd7..18bc5b2 100644
--- a/docs/jdbct4ref_guide/pom.xml
+++ b/docs/jdbct4ref_guide/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion JDBC Type 4 Programmer's Reference Guide</name>
   <description>This document describes how to use the JDBC Type 4 Driver.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -61,9 +61,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -187,8 +187,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -216,8 +216,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
               </attributes>
             </configuration>
@@ -287,9 +287,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/jdbct4ref_guide/src/asciidoc/_chapters/introduction.adoc b/docs/jdbct4ref_guide/src/asciidoc/_chapters/introduction.adoc
index 21840be..1034def 100644
--- a/docs/jdbct4ref_guide/src/asciidoc/_chapters/introduction.adoc
+++ b/docs/jdbct4ref_guide/src/asciidoc/_chapters/introduction.adoc
@@ -29,7 +29,7 @@
 {project-name}.
 
 *Supported Java Releases:* The Type 4 driver requires Java enabled
-platforms that support JDK 1.4.1 or higher.
+platforms that support JDK 1.7 or higher.
 
 
 [[type-4-driver-api-package]]
diff --git a/docs/jdbct4ref_guide/src/asciidoc/_chapters/properties_detail.adoc b/docs/jdbct4ref_guide/src/asciidoc/_chapters/properties_detail.adoc
index be403de..5f51e47 100644
--- a/docs/jdbct4ref_guide/src/asciidoc/_chapters/properties_detail.adoc
+++ b/docs/jdbct4ref_guide/src/asciidoc/_chapters/properties_detail.adoc
@@ -79,13 +79,13 @@
 <<how-to-specify-jdbc-type-4-properties, How to Specify JDBC Type 4 Properties>>.
 
 ```
-Data type: int
+Data type: short
 
 Units: seconds
 
 Default: -1 (Use the ConnTimeout value set on the server-side data source.)
 
-Range: -1, 0 to 2147483647
+Range: -1, 0 to 32767
 ```
 
 * Zero (0) specifies infinity as the timeout value.
diff --git a/docs/load_transform/pom.xml b/docs/load_transform/pom.xml
index 26eaa5c..ab05add 100644
--- a/docs/load_transform/pom.xml
+++ b/docs/load_transform/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion Load and Transform Guide</name>
   <description>This guide describes how to load and transform data into Trafodion.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -61,9 +61,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -187,8 +187,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -209,8 +209,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -287,9 +287,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/lob_guide/pom.xml b/docs/lob_guide/pom.xml
index 033fb24..9a7d7ce 100644
--- a/docs/lob_guide/pom.xml
+++ b/docs/lob_guide/pom.xml
@@ -31,7 +31,7 @@
   <name>Trafodion SQL Large Objects Guide</name>

   <description>This guide describes how to use Large Object (LOB) datatypes in Trafodion SQL.

   </description>

-  <url>http://trafodion.incubator.apache.org</url>

+  <url>http://trafodion.apache.org</url>

   <inceptionYear>2015</inceptionYear>

   <parent>

     <groupId>org.apache.trafodion</groupId>

@@ -61,9 +61,9 @@
   </issueManagement>

 

   <scm>

-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>

-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>

-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>

+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>

+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>

+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>

     <tag>HEAD</tag>

   </scm>

 

@@ -188,8 +188,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

                 <build-date>${maven.build.timestamp}</build-date>

                 <google-analytics-account>UA-72491210-1</google-analytics-account>

               </attributes>

@@ -210,8 +210,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

                 <build-date>${maven.build.timestamp}</build-date>

                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>

                 <pdf-style>trafodion</pdf-style>

@@ -288,9 +288,9 @@
 

   <distributionManagement>

     <site>

-      <id>trafodion.incubator.apache.org</id>

-      <name>Trafodion Website at incubator.apache.org</name>

-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see

+      <id>trafodion.apache.org</id>

+      <name>Trafodion Website at apache.org</name>

+      <!-- On why this is the tmp dir and not trafodion.apache.org, see

       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866

       -->

       <url>file:///tmp</url>

diff --git a/docs/lob_guide/src/asciidoc/_chapters/about.adoc b/docs/lob_guide/src/asciidoc/_chapters/about.adoc
index 167e308..b697827 100644
--- a/docs/lob_guide/src/asciidoc/_chapters/about.adoc
+++ b/docs/lob_guide/src/asciidoc/_chapters/about.adoc
@@ -190,6 +190,6 @@
 

 == Comments Encouraged

 We encourage your comments concerning this document. We are committed to providing documentation that meets your

-needs. Send any errors found, suggestions for improvement, or compliments to user@trafodion.incubator.apache.org.

+needs. Send any errors found, suggestions for improvement, or compliments to user@trafodion.apache.org.

 

-Include the document title and any comment, error found, or suggestion for improvement you have concerning this document.
\ No newline at end of file
+Include the document title and any comment, error found, or suggestion for improvement you have concerning this document.

diff --git a/docs/lob_guide/src/asciidoc/_chapters/working_with_lob.adoc b/docs/lob_guide/src/asciidoc/_chapters/working_with_lob.adoc
index 783c9b6..085b859 100644
--- a/docs/lob_guide/src/asciidoc/_chapters/working_with_lob.adoc
+++ b/docs/lob_guide/src/asciidoc/_chapters/working_with_lob.adoc
@@ -235,6 +235,8 @@
 An unnamed parameter can be used to prepare a statement and then during an execution, either a function or a simple string parameter can be passed in which will be converted to LOB data.

 

 * `EMPTY_BLOB()` or `EMPTY_CLOB()` 

++

+NOTE: If you want to insert `EMPTY_BLOB()` or `EMPTY_CLOB()` into a lob column, the CQD `TRAF_BLOB_AS_VARCHAR` or `TRAF_CLOB_AS_VARCHAR` which is *ON* by default must be turned *OFF* before creating the table, otherwise an error will be raised and the column definition of the lob column is VARCHAR.

 

 ** If `EMPTY_BLOB()` or `EMPTY_CLOB()` is specified, then a dummy lob handle is created. 

 

@@ -267,6 +269,61 @@
 [#examples]

 === Examples

 

+

+* This example uses a parameter.

++

+```

+PREPARE S FROM INSERT INTO t130lob2 VALUES (1, ?);

+EXECUTE S USING 'fgfgfhfhfhfhhfhfhfhjfkkfkffllflflfll';

+```

+

+* This example does not turn the CQD `TRAF_BLOB_AS_VARCHAR` OFF before creating the table test1, thus it fails to insert `EMPTY_BLOB()` into c2 whose column definition is VARCHAR.

++

+```

+>>CREATE TABLE test1(C1 INT, C2 BLOB);                    

+--- SQL operation complete.

+

+>>CQD TRAF_BLOB_AS_VARCHAR 'OFF';

+--- SQL operation complete.

+

+>>INSERT INTO test1 VALUES(1, EMPTY_BLOB());

+*** ERROR[4035] Type LOB cannot be cast to type VARCHAR(100000).

+*** ERROR[8822] The statement was not prepared.

+

+>>SHOWDDL TABLE test1; 

+CREATE TABLE TRAFODION.SEABASE.TEST1

+  (

+    C1   INT DEFAULT NULL NOT SERIALIZED

+  , C2   VARCHAR(100000) CHARACTER SET ISO88591 COLLATE DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT

+;

+--- SQL operation complete.

+```

+

+* This example turns the CQD `TRAF_CLOB_AS_VARCHAR` OFF before creating the table test2 and inserting `EMPTY_CLOB()` into c2 whose column definition is CLOB.

++

+```

+>>CQD TRAF_CLOB_AS_VARCHAR 'OFF';     

+--- SQL operation complete.

+

+>>CREATE TABLE test2 (C1 INT, C2 CLOB);

+--- SQL operation complete.

+

+>>INSERT INTO test2 VALUES(1, EMPTY_CLOB());

+--- 1 row(s) inserted.

+

+>>SHOWDDL TABLE test2;

+CREATE TABLE TRAFODION.SEABASE.TEST2

+  (

+    C1                               INT DEFAULT NULL NOT SERIALIZED

+  , C2                               CLOB DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT

+;

+--- SQL operation complete.

+```

+

 * This example uses the `STRINGTOLOB` function that converts a simple string literal into LOB format before inserting.

 +

 ```

@@ -293,19 +350,6 @@
 EXTERNALTOLOB('hdfs:///lobs/lob_input_a1.txt'));

 ```

 

-* This example uses a parameter.

-+

-```

-PREPARE S FROM INSERT INTO t130lob2 VALUES (1, ?);

-EXECUTE S USING 'fgfgfhfhfhfhhfhfhfhjfkkfkffllflflfll';

-```

-

-* This example uses the `EMPTY_BLOB` function to insert an empty lob and creates a dummy lob handle. 

-+

-```

-INSERT INTO t130lob2 VALUES (1, empty_blob());

-```

-

 [#inserting into a sql table containing lob columns using select clause]

 == Inserting into a SQL Table Containing LOB Columns Using Select Clause

 

@@ -519,7 +563,7 @@
 EMPTY_CLOB()

 ```

 

-For more information about examples, see http://trafodion.incubator.apache.org/docs/jdbct4ref_guide/index.html[*Trafodion JDBC Type 4 Programmer’s Reference Guide*].

+For more information about examples, see http://trafodion.apache.org/docs/jdbct4ref_guide/index.html[*Trafodion JDBC Type 4 Programmer’s Reference Guide*].

 

 [#considerations]

 === Considerations

@@ -775,7 +819,7 @@
 

 Drop works like any other drop table. All dependent tables are deleted. All files in hdfs (data and descriptor) files are also deleted.

 

-For more information, see <<drop_table_statement,DROP TABLE Statement>> in http://trafodion.incubator.apache.org/docs/sql_reference/index.html[Trafodion SQL Reference Manual].

+For more information, see <<drop_table_statement,DROP TABLE Statement>> in http://trafodion.apache.org/docs/sql_reference/index.html[Trafodion SQL Reference Manual].

 

 [#garbage collection]

 == Garbage Collection

@@ -803,7 +847,7 @@
 

 Cleanup works like cleanup of any other table. The command ensures all dependent SQL LOB tables and hdfs files are dropped ignoring errors if any.

 

-For more information, see <<cleanup_statement,CLEANUP Statement>> in http://trafodion.incubator.apache.org/docs/sql_reference/index.html[Trafodion SQL Reference Manual].

+For more information, see <<cleanup_statement,CLEANUP Statement>> in http://trafodion.apache.org/docs/sql_reference/index.html[Trafodion SQL Reference Manual].

 

 [#showddl for lob]

 == SHOWDDL for LOB

@@ -1156,4 +1200,4 @@
 TRAFODION.SCH.TLOB13   C4   External HDFS Location External HDFS File     0   0

 

 --- 3 row(s) selected.

-```
\ No newline at end of file
+```

diff --git a/docs/messages_guide/pom.xml b/docs/messages_guide/pom.xml
index 4a4b0dd..d3d2595 100644
--- a/docs/messages_guide/pom.xml
+++ b/docs/messages_guide/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion Messages Guide</name>
   <description>This guide describes user-visible messages produced by Trafodion.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -60,9 +60,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -186,8 +186,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -208,8 +208,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -286,9 +286,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
index be303f7..22327ee 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
@@ -1402,7 +1402,7 @@
 
 ```
 If a character literal was intended, you must use the single quote
-delimiter: <literal>. The use of double quotes causes {project-name} to
+delimiter: <literal>. The use of double quotes causes SQL to
 interpret <column-name> as a delimited identifier column name.
 ```
 
@@ -1864,50 +1864,35 @@
 
 *Recovery:* Correct the syntax and resubmit.
 
-[[SQL-4128]]
-== SQL 4128
-
-```
-Default volume and subvolume information could not be retrieved from=_DEFAULTS define - DEFINEINFO error <number>.
-```
-
-Where <number> is the error message.
-
-*Cause:* {project-name} was not able to retrieve
-default volume and subvolume information using the =_DEFAULTS define.
-
-*Effect:* The operation fails.
-
-*Recovery:* Check the =_DEFAULTS define and resubmit.
-
 [[SQL-4129]]
 == SQL 4129
 
 ```
-An IF statement should have the same set of output host variables on
-both sides of IF THEN statement list and the ELSE statement list.
+The character-typed result of the function <function-name> is longer than the maximum supported size.
 ```
 
-*Cause:* You specified an IF statement that does not have the same set
-of output host variables on both sides of its IF THEN statement list and
-its ELSE statement list.
+Where <function-name> is the name of a function you specified.
+
+*Cause:* You specified arguments to the given function that cause the result of the function
+to have a data type longer than the maximum supported character data type length.
 
 *Effect:* The operation fails.
 
-*Recovery:* Correct the syntax and resubmit.
+*Recovery:* Change the arguments to the function and resubmit.
 
 [[SQL-4130]]
 == SQL 4130
 
 ```
-SIGNAL parameter 3 must be of type string
+<attribute-name> is a read-only DEFAULTS attribute and cannot be updated.
 ```
 
-*Cause:* You specified a SIGNAL parameter of an incorrect type.
+*Cause:* You attempted to perform a CONTROL QUERY DEFAULT but {project-name} does not
+allow this particular attribute to be changed.
 
 *Effect:* The operation fails.
 
-*Recovery:* Correct the syntax and resubmit.
+*Recovery:* If a different attribute was intended, correct the syntax and resubmit.
 
 [[SQL-4133]]
 == SQL 4133
diff --git a/docs/messages_guide/src/asciidoc/_chapters/compiler_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/compiler_msgs.adoc
index b062199..20c6ea9 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/compiler_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/compiler_msgs.adoc
@@ -32,7 +32,7 @@
 == SQL 2004
 
 ```
-Internal error: error from MXCMP; cannot work on this query.
+Internal error: error from Compiler; cannot work on this query.
 ```
 
 *Cause:* {project-name} received an internal error from the {project-name} compiler.
@@ -45,7 +45,7 @@
 == SQL 2009
 
 ```
-The user transaction must be rolled back (or committed, if that makes sense in the application) before MXCMP can be restarted and proceed.
+The user transaction must be rolled back (or committed, if that makes sense in the application) before Compiler can be restarted and proceed.
 ```
 
 *Cause:* An outstanding transaction must be resolved before the {project-name} compiler can be restarted.
@@ -59,7 +59,7 @@
 == SQL 2011
 
 ```
-Server process could not be created - error <number> while resolving program file name <name>.
+Server process could not be created - Operating system error <number> while resolving program file name <name>.
 ```
 
 Where error <number> is the error number.
diff --git a/docs/messages_guide/src/asciidoc/_chapters/ddl_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/ddl_msgs.adoc
index 37394f8..e9ce886 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/ddl_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/ddl_msgs.adoc
@@ -363,6 +363,24 @@
 statements, you must first drop each of the dependent objects, then drop
 the object.
 
+[[SQL-1026]]
+== SQL 1026
+
+```
+Specified object name <object-name> is invalid for this command.
+```
+
+Where <object-name> is the name of the object you specified.
+
+*Cause:* You attempted to register or unregister a Hive or HBase table, but the object name
+was not of the appropriate kind. For example, REGISTER HIVE TABLE TRAFODION.SCH.T1 will get this
+error because objects in the TRAFODION catalog are native {project-name} objects.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
 [[SQL-1027]]
 == SQL 1027
 
@@ -380,7 +398,6 @@
 
 *Recovery:* Drop and recreate the view, then resubmit the grant request.
 
-<<<
 [[SQL-1028]]
 == SQL 1028
 
@@ -399,6 +416,7 @@
 *Recovery:* Either drop all the objects in <schema-name> and resubmit
 the statement, or resubmit the drop statement using the CASCADE option.
 
+<<<
 [[SQL-1029]]
 == SQL 1029
 
@@ -415,7 +433,6 @@
 
 *Recovery:* Apply the recovery of the accompanying error messages.
 
-<<<
 [[SQL-1030]]
 == SQL 1030
 
@@ -435,6 +452,7 @@
 
 *Recovery:* Use a shorter name and resubmit.
 
+<<<
 [[SQL-1031]]
 == SQL 1031
 
@@ -450,7 +468,6 @@
 
 *Recovery:* Apply the recovery of the accompanying error message.
 
-<<<
 [[SQL-1032]]
 == SQL 1032
 
@@ -467,6 +484,22 @@
 *Recovery:* If you wish to execute the query, resubmit without
 the DISPLAY command.
 
+<<<
+[[SQL-1033]]
+== SQL 1033
+
+```
+Unable to obtain comments.
+```
+
+*Cause:* You attempted a DDL or SHOWDDL operation but {project-name}
+could not retrieve comment information needed to process the request.
+Additional error messages may give insight to the cause.
+
+*Effect:* The operation is not executed.
+
+*Recovery:* None. Contact the {project-name} Developer Distribution List.
+
 [[SQL-1034]]
 == SQL 1034
 
@@ -913,6 +946,25 @@
 *Recovery:* For information about file system errors, see <<file_system_errors,File-System Errors>>.
 
 <<<
+[[SQL-1071]]
+== SQL 1071
+
+```
+View usage information for the following hive tables could not be set. Make sure that an external table either already exists or implicit creation has not been disabled. Hive tables: <hive-tables>
+```
+
+Where <hive-tables> is a list of Hive table names.
+
+*Cause:* You attempted to create a view referencing the listed Hive tables, but the Hive tables do not have external tables defined within {project-name}
+or have not been registered within {project-name}, and automatic registration has been turned off. (CQD HIVE_NO_REGISTER_OBJECTS controls whether automatic
+registration is enabled. If this CQD is 'ON' it has been disabled.)
+
+*Effect:* The operation fails.
+
+*Recovery:* Either register or create external tables within {project-name} for the Hive tables listed, then resubmit. Alternatively, set
+CQD HIVE_NO_REGISTER_OBJECTS 'OFF' and resubmit.
+
+<<<
 [[SQL-1073]]
 == SQL 1073
 
@@ -1479,7 +1531,7 @@
 == SQL 1143
 
 ```
-Validation for constraint <constraint-name> failed; incompatible data exists in referencing base table <referencing-table-name> and referenced base table <referenced-table-name>. To display the data violating the constraint, please use the following DML statement: <statement-text>.
+Validation for constraint <constraint-name> failed; incompatible data exists in referencing base table <referencing-table-name> and referenced base table <referenced-table-name>. To display the data that violates the constraint, please use the following DML statement: <statement-text>
 ```
 
 Where <constraint-name> is the name of a column constraint or table
@@ -1530,22 +1582,17 @@
 == SQL 1146
 
 ```
-Object <object-name> could not be altered because it is not a <object-type>.
+Object <object-name> could not be altered because it either does not exist or is not a table.
 ```
 
 Where <object-name> is the name of the object being requested.
 
-Where <object-type> is the type of object required for the DDL statement
-issued.
-
-*Cause:* The type of the object specified in the command is inconsistent
-with the DDL command being used. For example, this occurs if DROP TABLE
-is used, and the object you specify is an INDEX.
+*Cause:* You attempted to rename a column in a table, but the <object-name> specified
+either is not a table, or the table does not exist.
 
 *Effect:* The operation fails.
 
-*Recovery:* Check that the correct object name was specified and execute
-a statement that is consistent with that type of object.
+*Recovery:* Correct the <object-name> and resubmit.
 
 <<<
 [[SQL-1147]]
@@ -1585,78 +1632,6 @@
 *Recovery:* Do not use the SYSKEY as part of the referenced key.
 
 <<<
-[[SQL-1150]]
-== SQL 1150
-
-```
-Table <table-name> was not created as Partition Overlay Support could not generate volume names for the partitions to reside on.
-```
-
-Where <table-name> is the name of the table.
-
-*Cause:* When the Partition Overlay Support feature is enabled without
-setting volume names (through CQD POS_LOCATIONS) for table partitions to
-reside on, location names are generated automatically. However, the
-{project-name} database software could not generate the location names
-automatically and, because the CQD POS_RAISE_ERROR is set, the table is
-not created.
-
-*Effect:* The operation fails.
-
-*Recovery:* To correct the error, choose a recovery method:
-
-* Verify that the disk volumes are available on the current node and
-retry the request.
-* Specify the volume names where the partitions need to be created for
-the given CREATE TABLE statement through CONTROL QUERY DEFAULT
-POS_LOCATIONS, and then retry the request.
-* Do not set the CQD POS_RAISE_ERROR, in which case, a simple table
-without partitions is created when the volume names cannot be generated.
-
-<<<
-[[SQL-1151]]
-== SQL 1151
-
-```
-POS (Partition Overlay Support) was not applied as volume names could not be generated for the partitions. So a simple table <table-name> was created without partitions.
-```
-
-Where <table-name> is the name of the table.
-
-*Cause:* When the Partition Overlay Support feature is enabled without
-setting volume names (through CQD POS_LOCATIONS) for table partitions to
-reside on, location names are generated automatically. However, the
-{project-name} database software could not generate the location names
-automatically, and because the CQD POS_RAISE_ERROR is not set, the given
-table is created as a simple table without partitions as it would be if
-the Partition Overlay Support feature was not enabled.
-
-*Effect:* The POS feature was not applied. A simple table without
-partitions was created.
-
-*Recovery:* None if a non-partitioned table is requested. To request a
-partitioned table, delete the table, verify that disk volumes are
-available, and resubmit. You can also specify the volume names where the
-partitions need to be created for the given CREATE TABLE through
-CONTROL QUERY DEFAULT POS_LOCATIONS, and then retry the request.
-
-<<<
-[[SQL-1154]]
-== SQL 1154
-
-```
-Cannot create object <object-name> as the table size is too big to fit on the system.
-```
-
-*Cause:* {project-name} could not create the object
-because the requested table size is bigger than the total amount of disk
-space available on the system.
-
-*Effect:* The SQL operation fails.
-
-*Recovery:* Check that the table size requested is big enough to fit on
-the system and retry the statement.
-
 [[SQL-1155]]
 == SQL 1155
 
@@ -1675,15 +1650,14 @@
 == SQL 1156
 
 ```
-Synonym <name> does not exist or object type is invalid for the current operation.
+Object <object-name> does not have columns.
 ```
 
-*Cause:* You specified a create or alter of synonym <name> on an object
-that is not a table or view.
+*Cause:* You attempted to grant or revoke column privileges, but <object-name> is not a table or a view.
 
 *Effect:* The operation fails.
 
-*Recovery:* Correct the syntax so that the correct <name> is used.
+*Recovery:* Correct the syntax so that the correct <object-name> is used.
 
 [[SQL-1157]]
 == SQL 1157
@@ -1699,184 +1673,59 @@
 
 *Recovery:* None.
 
-<<<
-[[SQL-1158]]
-== SQL 1158
+[[SQL-1174]]
+== SQL 1174
 
 ```
-Synonym <name> already exists.
+An unsupported data type was encountered on this operation.
 ```
 
-*Cause:* You specified a create of synonym <name> on an object, but a
-synonym of that name already exists.
+*Cause:* You performed a DDL-related operation and one of the objects involved
+has a column with a data type that is not supported for this operation. This is
+due to an incomplete implementation of support for a particular column type.
 
 *Effect:* The operation fails.
 
-*Recovery:* Correct the syntax so that the correct <name> is used.
-
-[[SQL-1159]]
-== SQL 1159
-
-```
-The specified object <name> is not a table or a view. Please verify that the correct object was specified.
-```
-
-*Cause:* You specified an alter or drop of synonym <name>, but a synonym of that name already exists.
-
-*Effect:* The operation fails.
-
-*Recovery:* Correct the syntax so that the correct <name> is used.
-
-<<<
-[[SQL-1160]]
-== SQL 1160
-
-```
-A mismatch between the NOT DROPPABLE PRIMARY KEY constraint and the STORE BY clause was detected. When both clauses are specified, the STORE BY key column list must be the same as, or a prefix of , the PRIMARY KEY column list. This mismatch is caused by differences between the columns themselves, the order of columns, or the ASC/DESC attribute.
-```
-
-*Cause:* You attempted to create a table that contains both a NOT
-DROPPABLE PRIMARY KEY constraint and a STORE BY clause. The syntax
-specified is not correct. The STORE BY column list must be the same as,
-or a prefix of, the NOT DROPPABLE PRIMARY KEY column list.
-
-*Effect:* The operation fails.
-
-*Recovery:* If you want the STORE BY column list that specifies the
-clustering key to be different than the PRIMARY KEY, specify a DROPPABLE
-PRIMARY KEY. If you want the PRIMARY KEY to be the same as the STORE BY
-key, do not specify a STORE BY clause. Correct the syntax and resubmit.
-
-[[SQL-1161]]
-== SQL 1161
-
-```
-System generated column SYSKEY cannot be specified as part of the PARTITION BY clause.
-```
-
-*Cause:* {project-name} could not create the object
-because system-generated column SYSKEY is not allowed as part of the
-PARTITION BY clause.
-
-*Effect:* The DDL operation fails.
-
-*Recovery:* Remove the system added column SYSKEY from the PARTITION BY
-clause and retry the DDL statement.
+*Recovery:* None. Report this error and the DDL operation to the {project-name} User Distribution List.
 
 <<<
 [[SQL-1180]]
 == SQL 1180
 
 ```
-The required subvolume name for locations in schema <schema-name> is <directory-name>.
+Trying to create an external <kind> table with a different schema or table name (<table-name>) than the source table (<source-table-name>). The external schema and table name must be the same as the source.
 ```
 
-Where <schema-name> is the schema in which the object is being created.
+Where <kind> is the kind of the source table (e.g. HBASE or HIVE).
 
-Where <directory-name> is the designated subvolume name for that schema.
+Where <table-name> is the name of a {project-name} external table that you are trying to create.
 
-*Cause:* One or more LOCATION clauses in the CREATE or MODIFY statement
-contained a {project-name} platform location whose subvolume did not match
-the designated subvolume for the schema in which the object was being
-created.
+Where <source-table-name> is the name of a non-{project-name} table (e.g. an HBase or Hive table).
 
-*Effect:* The object was not created.
+*Cause:* You attempted a CREATE EXTERNAL TABLE statement, but you used a different schema name for the {project-name} external
+table than the original. This might happen because you let the schema name of the external table default, and the default is not the 
+same as the source table schema.
 
-*Recovery:* Either correct the invalid LOCATION clauses or remove them
-and allow the system to generate the {project-name} platform locations.
+*Effect:* The external table was not created.
+
+*Recovery:* Either supply the correct schema name or change the default schema name.
 
 [[SQL-1181]]
 == SQL 1181
 
 ```
-Label <file-name> could not be dropped. (file error <error>).
+Trying to create a schema with name <schema-name> to store the definition of a native HIVE or HBASE table and the name is too long. Maximum length supported is <max-length>.
 ```
 
-Where <file-name> is the name of the table, index, view, or routine
-being dropped.
+Where <schema-name> is the name of the schema you are trying to create.
 
-Where <error> is the returned file system error number.
+Where <max-length> is the maximum length of schema names supported by {project-name}.
 
-*Cause:* The object you attempted to drop resulted in file system error
-<error>.
-
-*Effect:* The DDL DROP operation fails.
-
-*Recovery:* See previous messages in this {project-name} database software
-operation to determine the necessary corrective actions. Also, use the
-file system error <error-number> to analyze the cause. For information
-about file system errors, see <<file_system_errors, File-System Errors>>.
-Fix the error and resubmit.
-
-<<<
-[[SQL-1182]]
-== SQL 1182
-
-```
-Error <error> was returned by the file system on resource fork <file-name>.
-```
-
-Where <error> is the error returned.
-
-Where <file-name> is the name of the file.
-
-*Cause:* File system error.
+*Cause:* You attempted to create an external table for an HBase or Hive table but the schema name is too long.
 
 *Effect:* The operation fails.
 
-*Recovery:* For information about file system errors, see <<file_system_errors,File-System Errors>>.
-
-[[SQL-1183]]
-== SQL 1183
-
-```
-Error <error> was returned by the file system on metadata table <ANSI-name> (file name <file-name>).
-```
-
-Where <error> is the error returned.
-
-Where <ANSI-name> is the metadata table.
-
-Where <file-name> is the name of the file.
-
-*Cause:* File system error.
-
-*Effect:* The operation fails.
-
-*Recovery:* For information about file system errors, see <<file_system_errors,File-System Errors>>.
-
-<<<
-[[SQL-1184]]
-== SQL 1184
-
-```
-You do not have the required privilege(s) on <column-name>.
-```
-
-Where <column-name> is the name of a column specified in the references
-part of a referential integrity constraint.
-
-*Cause:* You attempted to establish a referential integrity constraint
-on a column for which the executing user ID has no REFERENCES
-privileges.
-
-*Effect:* The operation fails.
-
-*Recovery:* Establish correct column privileges and resubmit.
-
-[[SQL-1185]]
-== SQL 1185
-
-```
-The location name is either invalid or missing.
-```
-
-*Cause:* A partition location is either invalid or missing.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify the correct location for all partitions involved in
-the affected command.
+*Recovery:* None.
 
 <<<
 [[SQL-1186]]
@@ -1904,7 +1753,7 @@
 == SQL 1187
 
 ```
-The schema name <schema-name> is reserved for {project-name} metadata.
+The schema name <schema-name> is reserved for SQL metadata.
 ```
 
 Where <schema-name> is the name of a {project-name} database schema.
@@ -1940,50 +1789,371 @@
 
 *Recovery:* None. You cannot define a referential constraint that creates a circular dependency.
 
-[[SQL-1224]]
-== SQL 1224
+[[SQL-1190]]
+== SQL 1190
 
 ```
-An invalid data type was specified for routine parameter <parameter-name>.
+Failed to initialize Hive metadata. Call to <function> returned error <optional-symbol>(<error-number>). Cause: <diagnostics>.
 ```
 
-Where <parameter-name> is the name of the parameter.
+Where <function> is a {project-name} internal method call.
 
-*Cause:* You specified a data type for this routine parameter that is
-not supported.
+Where <optional-symbol> is an internal symbol representing the error.
 
-*Effect:* {project-name} is unable to complete the operation.
+Where <error-number> is the error code returned by <function>.
 
-*Recovery:* Specify a different data type.
+Where <diagnositics> is Java exception information.
+
+*Cause:* You attempted to access a Hive object but {project-name} was unable to access the Hive metadata for that object. A common
+cause of this error is that the Hive metadata server is down.
+
+*Effect:* The operation fails.
+
+*Recovery:* Take corrective action to insure that the Hive subsystem is functioning correctly. Then resubmit.
 
 <<<
-[[SQL-1225]]
-== SQL 1225
+[[SQL-1191]]
+== SQL 1191
 
 ```
-Mixing EXECUTE with other privileges is not allowed.
+SERIALIZE option is not yet supported for <data-type> datatype.
 ```
 
-*Cause:* EXECUTE and another privilege were specified in the same GRANT
-or REVOKE statement, which is not allowed.
+Where <data-type> is the data type of a column that you tried to create.
+
+*Cause:* You attempted to create a column with the SERIALIZED attribute (for example, while submitting a CREATE TABLE statement) 
+but {project-name} does not presently
+support columns of the given data type with the SERIALIZED attribute.
 
 *Effect:* The operation fails.
 
-*Recovery:* Use separate GRANT or REVOKE statements for EXECUTE and other privileges.
+*Recovery:* Correct the statement by either changing the data type or removing the SERIALIZED attribute. Then resubmit.
 
-[[SQL-1226]]
-== SQL 1226
+[[SQL-1192]]
+== SQL 1192
 
 ```
-No valid combination of privileges was specified.
+Failed to retrieve data from Hive metastore. Call to <function> returned error <optional-symbol>(<error-number>). Cause: <diagnostics>.
 ```
 
-*Cause:* The GRANT or REVOKE statement did not specify a valid
-combination of privileges.
+Where <function> is a {project-name} internal method call.
+
+Where <optional-symbol> is an internal symbol representing the error.
+
+Where <error-number> is the error code returned by <function>.
+
+Where <diagnositics> is Java exception information.
+
+*Cause:* An error occurred when {project-name} attempted to read metadata about a Hive object from the Hive metastore. The <diagnostics>
+contains specific information about the error.
 
 *Effect:* The operation fails.
 
-*Recovery:* Specify a valid combination of privileges in the GRANT or REVOKE statement.
+*Recovery:* The <diagnostics> may suggest appropriate corrective action. If not, report this error and the associated SQL
+operation to the {project-name} User Distribution List.
+
+<<<
+[[SQL-1193]]
+== SQL 1193
+
+```
+The <type-of-string> specified in the <clause-name> clause must be identical to the primary key for a Trafodion table.
+```
+
+Where <type-of-string> indicates some syntactic construct (e.g. "clusering key").
+
+Where <clause-name> is a SQL clause (e.g. "STORE BY").
+
+*Cause:* You specified two syntatic constructs (e.g. "STORE BY" and "PRIMARY KEY") in a DDL statement (e.g. "CREATE TABLE") that
+overlap in meaning. {project-name} requires the former construct to match the latter in this case.
+
+*Effect:* The operation fails.
+
+*Recovery:* Either remove the <clause-name> clause (as it is redundant) or change it to match the primary key. Then resubmit.
+
+[[SQL-1195]]
+== SQL 1195
+
+```
+Column <column-name> is not allowed as a salt column. Only primary key columns or STORE BY columns are allowed.
+```
+
+Where <column-name> is the name of a column specified in a SALT clause.
+
+*Cause:* You specified a column in a SALT clause (e.g. on a CREATE TABLE statement) that is not part of the primary or clustering key.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the column name then resubmit.
+
+<<<
+[[SQL-1196]]
+== SQL 1196
+
+```
+The number of salt partitions must be between <lower-bound> and <upper-bound> inclusive.
+```
+
+Where <lower-bound> is the smallest allowable number of salt partitions supported.
+
+Where <upper-bound> is the largest allowable number of salt partitions supported.
+
+*Cause:* You specified a SALT clause (e.g. on a CREATE TABLE statement) with a number of partitions outside the range that
+{project-name} supports.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the SALT clause then resubmit.
+
+[[SQL-1197]]
+== SQL 1197
+
+```
+The location <location-1> for <stored-procedure-name> does not match with another location <location-2> specified. All location specifications must be identical.
+```
+
+Where <location-1> and <location-2> are execution locations for an internal stored procedure.
+
+Where <stored-procedure-name> is the name of an internal stored procedure.
+
+*Cause:* This is a {project-name} internal error.
+
+*Effect:* The operation fails.
+
+*Recovery:* None. Report this error and the associated SQL
+operation to the {project-name} User Distribution List.
+
+<<<
+[[SQL-1199]]
+== SQL 1199
+
+```
+The PARTITION BY clause is not allowed for a Trafodion table.
+```
+
+*Cause:* You specified a PARTITION BY clause when trying to create a {project-name} table.
+
+*Effect:* The operation fails.
+
+*Recovery:* Remove the PARTITION BY clause from the DDL statement and resubmit.
+
+[[SQL-1200]]
+== SQL 1200
+
+```
+An error occurred while reading HDFS file statistics for Hive table <table-name>. Cause: <diagnostics>.
+```
+
+Where <table-name> is the name of a Hive table.
+
+Where <diagnostics> give diagnostics as to the cause of the error.
+
+*Cause:* The {project-name} engine attempted to read file statistics from HDFS for a particular Hive table.
+A failure occurred at the HDFS or Hive level. The diagnostics contain more information about the failure.
+
+*Effect:* The operation fails.
+
+*Recovery:* Address the indicated issue at the HDFS or Hive level, then resubmit.
+
+<<<
+[[SQL-1201]]
+== SQL 1201
+
+```
+Salted index <index-name> cannot be unique.
+```
+
+Where <index-name> is the name of a {project-name} index that you are trying to create.
+
+*Cause:* You attempted a CREATE UNIQUE INDEX statement that included the SALT LIKE TABLE clause.
+SALT LIKE TABLE is not allowed for unique indexes.
+
+*Effect:* The operation fails.
+
+*Recovery:* Remove either the UNIQUE keyword or the SALT LIKE TABLE clause and resubmit.
+
+[[SQL-1202]]
+== SQL 1202
+
+```
+Index <index-name> cannot be salted like a table since table <table-name> is not salted.
+```
+
+Where <index-name> is the name of a {project-name} index that you are trying to create.
+
+Where <table-name> is a {project-name} table.
+
+*Cause:* You attempted a CREATE INDEX statement that included the SALT LIKE TABLE clause on
+a table that is not salted.
+
+*Effect:* The operation fails.
+
+*Recovery:* Remove the SALT LIKE TABLE clause and resubmit.
+
+<<<
+[[SQL-1203]]
+== SQL 1203
+
+```
+HBASE_OPTIONS clause in CREATE statement is longer than 6000 characters. Object <object-name> was not created.
+```
+
+Where <object-name> is the name of a {project-name} object that you are trying to create.
+
+*Cause:* The HBASE_OPTIONS clause specified in the CREATE statement contained more than 6000
+characters of HBase options, exceeding the length limit that {project-name} supports.
+
+*Effect:* The operation fails.
+
+*Recovery:* Shorten the set of options specified in HBASE_OPTIONS and resubmit.
+
+[[SQL-1204]]
+== SQL 1204
+
+```
+Unsupported Hive datatype <Hive-datatype>.
+```
+
+Where <Hive-datatype> is a Hive data type.
+
+*Cause:* You attempted to reference a Hive table that has a column with a data type
+that {project-name} does not support.
+
+*Effect:* The operation fails.
+
+*Recovery:* If the wrong Hive table was referenced, correct the reference and resubmit.
+
+<<<
+[[SQL-1214]]
+== SQL 1214
+
+```
+Error <Hive-exception-info> encountered when executing HiveQL statement <Hive-statement>.
+```
+
+Where <Hive-exception-info> is error diagnostic information from Hive.
+
+Where <Hive-statement> is the Hive SQL statement you specified.
+
+*Cause:* You attempted to execute a Hive SQL statement via the PROCESS HIVE STATEMENT command,
+and Hive detected errors in that statement.
+
+*Effect:* The operation fails.
+
+*Recovery:* Determine the proper recovery action from the <Hive-exception-info> then resubmit.
+
+[[SQL-1215]]
+== SQL 1215
+
+```
+An error occurred while determining host, port, or file name for HDFS URI <HDFS-URI>. Cause: <diagnostics>.
+```
+
+Where <HDFS-URI> is a Uniform Resource Identifier for a Hive table partition.
+
+Where <diagnostics> are diagnostics from HDFS.
+
+*Cause:* You attempted to access a Hive table but {project-name} could not obtain location information
+about that object.
+
+*Effect:* The operation fails.
+
+*Recovery:* Determine the proper recovery action from the <diagnostics> then resubmit.
+
+<<<
+[[SQL-1220]]
+== SQL 1220
+
+```
+Code must contain two non-blank characters.
+```
+
+*Cause:* You attempted to create a component privilege but you specified a component code that is not two non-blank characters.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1222]]
+== SQL 1222
+
+```
+Command not supported when authorization is not enabled.
+```
+
+*Cause:* You attempted to perform a privilege-related command (e.g. GRANT), but authorization
+is not enabled on this {project-name} instance.
+
+*Effect:* The operation fails.
+
+*Recovery:* Enable authorization on the instance (e.g. INITIALIZE AUTHORIZATION), register users and resubmit.
+
+[[SQL-1223]]
+== SQL 1223
+
+```
+Grant to self or DB__ROOT is not allowed.
+```
+
+*Cause:* You attempted to grant a privilege to yourself or to DB__ROOT.
+
+*Effect:* The operation fails.
+
+*Recovery:* None.
+
+<<<
+[[SQL-1227]]
+== SQL 1227
+
+```
+Cannot unregister user. User <user-name> has been granted privileges on <object-name>.
+```
+
+Where <user-name> is the name of a user that you are trying to unregister.
+
+Where <object-name> is the name of a {project-name} object.
+
+*Cause:* You attempted to unregister a user, but the user has privileges on a {project-name} object.
+
+*Effect:* The operation fails.
+
+*Recovery:* Use REVOKE to remove any privileges granted to the user, then resubmit.
+
+[[SQL-1228]]
+== SQL 1228
+
+```
+Cannot drop role. Role <role-name> has been granted privileges on <object-name>.
+```
+
+Where <role-name> is the name of a role that you are trying to drop.
+
+Where <object-name> is the name of a {project-name} object.
+
+*Cause:* You attempted to drop a role that has privileges. All privileges granted to a role
+must be dropped before the role can be dropped.
+
+*Effect:* The operation fails.
+
+*Recovery:* Use SHOWDDL on the <object-name> to see what privileges are granted to the role.
+Then REVOKE those privileges, and resubmit the DROP ROLE statement.
+
+<<<
+[[SQL-1230]]
+== SQL 1230
+
+```
+Object owner must be the schema owner in private schemas.
+```
+
+*Cause:* You attempted to create an object on behalf of another user but you attempted 
+to place the object in a private schema owned by a different user.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the schema name of the new object or correct the user name in the BY clause
+to match the owner of the schema.
 
 <<<
 [[SQL-1231]]
@@ -2002,112 +2172,20 @@
 *Recovery:* Fix the error conditions identified in messages preceding
 this message and reissue the CREATE PROCEDURE statement.
 
-[[SQL-1232]]
-== SQL 1232
+[[SQL-1234]]
+== SQL 1234
 
 ```
-A file error occurred when saving dropped table DDL for table <table-name> to <path-name>.
+Authorization needs to be reinitialized due to missing or corrupted privilege manager metadata. To reinitialize, do 'initialize authorization, drop' followed by 'initialize authorization'. This deletes and recreates privilege manager metadata. Trafodion metadata is not affected.
 ```
 
-Where <table-name> is the table being dropped whose DDL was to be saved.
+*Cause:* Privilege metadata has become corrupted.
 
-Where <path-name> is the pathname of the file to which the DDL was to
-be saved.
+*Effect:* Some or all authorization operations on the {project-name} instance will fail.
 
-*Cause:* A file system error occurred when you attempted to save a table
-DDL. Possible reasons are:
-
-* The directory `/usr/tandem/sqlmx/ddl` did not exist or could not be
-created.
-* The system did not have write access to `/usr/tandem/sqlmx/ddl`.
-* Insufficient file space was available.
-
-*Effect:* The table is not dropped.
-
-*Recovery:* Either correct the file system problem and drop the table or
-perform a SHOWDDL on the existing table, capture the output, set.
-SAVE_DROPPED_TABLE_DDL to "OFF," and drop the table. For information
-about file system errors, see <<file_system_errors,File-System Errors>>.
+*Recovery:* Reinitialize authorization by performing INITIALIZE AUTHORIZATION, DROP followed by INITIALIZE AUTHORIZATION.
 
 <<<
-[[SQL-1233]]
-== SQL 1233
-
-```
-Creating schema in {project-name} system catalog <catalog-name> is prohibited.
-```
-
-Where <catalog-name> is the name of the {project-name} database catalog.
-
-*Cause:* You attempted to create a schema in the system catalog.
-
-*Effect:* The operation fails.
-
-*Recovery:* Choose a different catalog name and reissue the CREATE
-statement.
-
-[[SQL-1235]]
-== SQL 1235
-
-```
-An invalid combination of EXTENT sizes and MAXEXTENTS was specified
-```
-for table or index <table-name>.
-
-*Cause:* A CREATE or ALTER statement specified an invalid combination of
-EXTENT sizes and MAXEXTENTS.
-
-*Effect:* The operation fails.
-
-*Recovery:* Correct the error and retry the operation.
-
-<<<
-[[SQL-1236]]
-== SQL 1236
-
-```
-The schema name specified for SQL object <object-name> is not valid. The schema name must be the same as the schema being created.
-```
-
-Where <object-name> is the name of the SQL object.
-
-*Cause:* You specified a schema name for an object that is different
-from the name of the schema being created.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify a schema name for the object that matches the name
-of the schema being created.
-
-[[SQL-1238]]
-== SQL 1238
-
-```
-The character set for TRIGGER text must be ISO88591.
-```
-
-*Cause:* You specified a literal with a character set other than
-ISO88591 in the text of a create trigger statement.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify only ISO88591 literals in the command.
-
-<<<
-[[SQL-1239]]
-== SQL 1239
-
-```
-The character set for string literals in VIEW text must be ISO88591.
-```
-
-*Cause:* You specified a literal with a character set other than
-ISO88591 in the text of a create view statement.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify only ISO88591 literals in the command.
-
 [[SQL-1240]]
 == SQL 1240
 
@@ -2122,37 +2200,6 @@
 
 *Recovery:* Specify only ISO88591 literals as partition keys.
 
-<<<
-[[SQL-1241]]
-== SQL 1241
-
-```
-The character set for HEADING must be ISO88591.
-```
-
-*Cause:* You specified a literal with a character set other than
-ISO88591 in a HEADING clause.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify only ISO88591 literals in HEADING clauses.
-
-[[SQL-1242]]
-== SQL 1242
-
-```
-The character set for string literals in CONSTRAINT must be ISO88591.
-```
-
-*Cause:* You specified a literal with a character set other than
-
-ISO88591 in the text of a constraint.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify only ISO88591 literals in constraints.
-
-<<<
 [[SQL-1243]]
 == SQL 1243
 
@@ -2167,90 +2214,7 @@
 
 *Recovery:* Do not specify hexadecimals in this type of command.
 
-[[SQL-1245]]
-== SQL 1245
-
-```
-The supplied partition key value (<key-value>) for column <column-name> of object <object-name> is not valid.
-```
-
-Where <key-value> is the specified first key value.
-
-Where <column-name> is the column of <object-name> that corresponds to
-the invalid key value.
-
-Where <object-name> is the name of the affected object.
-
-*Cause:* A utility command specified an invalid key value.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify valid key values and resubmit.
-
 <<<
-[[SQL-1246]]
-== SQL 1246
-
-```
-The supplied partition key value (<key-value>) is inconsistent
-```
-with the data type of column <column-name> of object <object-name>.
-
-Where <key-value> is the specified first key value.
-
-Where <column-name> is the column of <object-name> that corresponds to
-the invalid key value.
-
-Where <object-name> is the name of the affected object.
-
-*Cause:* A utility command specified a key value that is inconsistent
-with the data type of the column that corresponds to the key value.
-
-*Effect:* The operation fails.
-
-*Recovery:* Specify valid key values and resubmit.
-
-[[SQL-1248]]
-== SQL 1248
-
-```
-Partition name <partition-name> has already been used. Each partition in an object should have a unique name.
-```
-
-Where <partition-name> is the name specified for a partition.
-
-*Cause:* Partition names are required to be unique within the set of
-partitions of an object.
-
-*Effect:* The CREATE statement fails.
-
-*Recovery:* Choose a name that is not the same as any other partition
-name in this object and reissue the CREATE statement.
-
-<<<
-[[SQL-1250]]
-== SQL 1250
-
-```
-DROP cannot be performed on object <object-name> because a utility operation (<operation-type>) associated with DDL_LOCK <lock-name> is currently running.
-```
-
-Where <object-name> is the ANSI name of the object named in the DROP
-command.
-
-Where <operation-type> is the type of utility operation.
-
-Where <lock-name> is the ANSI name of the DDL lock object that is
-associated with the utility operation.
-
-*Cause:* You attempted to drop an object that a utility command is
-using.
-
-*Effect:* The operation fails.
-
-*Recovery:* Wait until the utility operation has finished, and then
-retry the DROP operation.
-
 [[SQL-1251]]
 == SQL 1251
 
@@ -2271,22 +2235,6 @@
 *Recovery:* None. This is a warning message only.
 
 <<<
-[[SQL-1252]]
-== SQL 1252
-
-```
-The existing index <index-name> to be used by a unique or primary constraint has not been populated. Please populate the index and then try to add the constraint again.
-```
-
-Where <index-name> is the name of an existing unpopulated index.
-
-*Cause:* An existing index that is not populated has been chosen for a
-unique or primary constraint, which is not allowed.
-
-*Effect:* The operation fails.
-
-*Recovery:* Populate the index and resubmit.
-
 [[SQL-1254]]
 == SQL 1254
 
@@ -2305,43 +2253,57 @@
 set of columns.
 
 <<<
-[[SQL-1262]]
-== SQL 1262
+[[SQL-1255]]
+== SQL 1255
 
 ```
-The command cannot be executed because <operation> is in progress for <schema>.
+Constraint <constraint-name> is the clustering key constraint for table <table-name> and cannot be dropped.
 ```
 
-Where <operation> is a schema-level operation.
+Where <constraint-name> refers to a constraint that you are trying to drop.
 
-Where <schema> is the ANSI name of the affected schema.
+Where <table-name> is the name of a {project-name} table.
 
-*Cause:* You attempted a DDL or utility operation on a database object
-while <operation> was in progress for that object's <schema>.
+*Cause:* You attempted to drop a constraint on a {project-name} table, but the constraint is implied by
+the clustering key and therefore cannot be dropped. This can occur, for example, if the primary key
+was used as the clustering key, and you attempt to drop the primary key constraint.
 
-*Effect:* The DDL or utility operation fails.
+*Effect:* The operation fails.
 
-*Recovery:* Wait until the operation has finished, and then retry the
-DDL or utility operation.
+*Recovery:* If you genuinely wish to remove this constraint, recreate the table with a different
+clustering key.
 
-[[SQL-1263]]
-== SQL 1263
+[[SQL-1256]]
+== SQL 1256
 
 ```
-Table, view, and stored procedure names that start with <text> are reserved for {project-name} metadata.
+PRIMARY KEY constraint cannot be added since table <table-name> already has a user specified clustering key. 
 ```
 
-Where <text> is the disallowed name that was specified.
+Where <table-name> is the name of a {project-name} table.
 
-*Cause:* You specified a {project-name} database reserved object name.
+*Cause:* You attempted to add a primary key constraint, but the table already has a user-specified
+clustering key. This can happen, for example, when adding a primary key to a table that already has one.
 
-*Effect:* The object is not created.
+*Effect:* The operation fails.
 
-*Recovery:* See the {docs-url}/sql_reference/index.html[_{project-name} SQL Reference Manual_] for the list of
-reserved object names. Choose a name that is not reserved and reissue
-the CREATE statement.
+*Recovery:* If you genuinely wish to add this primary key, recreate the table with the primary key as
+its clustering key.
 
 <<<
+[[SQL-1260]]
+== SQL 1260
+
+```
+Debugging of UDRs is only allowed for the DB__ROOT user. Connect as DB__ROOT, preferably using the sqlci tool, and try again.
+```
+
+*Cause:* You attempted to debug a User Defined Routine, but you were not connected as DB__ROOT.
+
+*Effect:* The operation fails.
+
+*Recovery:* Connect as DB__ROOT and resubmit.
+
 [[SQL-1264]]
 == SQL 1264
 
@@ -2357,21 +2319,6 @@
 *Recovery:* Reissue the GRANT or REVOKE statement, specifying a single
 privilege or a list of distinct privileges.
 
-[[SQL-1265]]
-== SQL 1265
-
-```
-Duplicate grantees not allowed in a GRANT or REVOKE statement.
-```
-
-*Cause:* You specified duplicate grantees in a GRANT or REVOKE
-statement.
-
-*Effect:* The operation fails.
-
-*Recovery:* Reissue the GRANT or REVOKE statement specifying a single
-grantee or a list of distinct grantees.
-
 <<<
 [[SQL-1266]]
 == SQL 1266
@@ -2392,9 +2339,11 @@
 == SQL 1267
 
 ```
-EXECUTE privilege is incompatible with this object type.
+<privilege-type> privilege is incompatible with this object type.
 ```
 
+Where <privilege-type> is a type of privilege (e.g. EXECUTE).
+
 *Cause:* You specified a privilege that is not supported for the object
 type in a GRANT statement.
 
@@ -2419,271 +2368,778 @@
 *Recovery:* Correct the GRANT or REVOKE statement by specifying a single
 column name or a list of distinct column names.
 
-[[SQL-1270]]
-== SQL 1270
+<<<
+[[SQL-1269]]
+== SQL 1269
 
 ```
-ALLOCATE or DEALLOCATE failed for object <sql-object-name> due to
-file error <file-system-error-number> on <file-name>.
+Column name <column-name> is reserved for internal system usage. It cannot be specified as a user column.
 ```
 
-Where <sql-object-name> is the ANSI name of the affected SQL database
-object.
+Where <column-name> is the column name you specified.
 
-Where <file-system-error-number> is a {project-name} platform file system
-error code.
-
-*Cause:* A CREATE or ALTER operation encountered a file system error
-<error-number> during processing of the ALLOCATE or DEALLOCATE attribute.
+*Cause:* You attempted to use a column name in a DDL statement that is reserved by {project-name} 
+for internal use. For example,
+you attempted to create a table with a column named SYSKEY.
 
 *Effect:* The operation fails.
 
-*Recovery:* For information about file system errors, see
-<file-system-errors,File-System Errors>.
+*Recovery:* Correct the DDL statement and resubmit.
+
+[[SQL-1276]]
+== SQL 1276
+
+```
+Unable to select partition <partition-number> from table <table-name>.
+```
+
+Where <partition-number> is a partition number you specified.
+
+Where <table-name> is the table name you specified.
+
+*Cause:* You attempted to select data from a subset of the partitions of a table, but the
+table in question is not salted or the table name given refers to a view.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
 
 <<<
-[[SQL-1271]]
-== SQL 1271
+[[SQL-1279]]
+== SQL 1279
 
 ```
-ALLOCATE failed for object <object-name> because extents to be allocated <number> is greater than the MAXEXTENTS for a partition of the object.
+A volatile DDL statement cannot be used on regular objects.
 ```
 
-Where <object-name> is the ANSI name of the affected database object.
-
-Where <number> is the specified number of extents.
-
-*Cause:* A CREATE or ALTER operation specified an ALLOCATE attribute
-value that was greater than the MAXEXTENTS value for the affected
-database object, which is not allowed.
+*Cause:* You specified VOLATILE when performing a DDL operation on a non-volatile object. For example, you
+may have specified DROP VOLATILE TABLE on a table that is not volatile.
 
 *Effect:* The operation fails.
 
-*Recovery:* If possible, alter the MAXEXTENTS attribute value to be
-greater than the
+*Recovery:* Correct the statement and resubmit.
 
-desired number of extents to be allocated. The current maximum value for
-MAXEXTENTS is 768.
-
-[[SQL-1273]]
-== SQL 1273
+[[SQL-1280]]
+== SQL 1280
 
 ```
-The specified MAXEXTENTS value must be greater than the number of extents allocated.
+A regular DDL statement cannot be used on volatile objects.
 ```
 
-*Cause:* The value that you specified to change MAXEXTENTS is less than
-or equal to the allocated extents.
+*Cause:* You attempted an unsupported ALTER statement on a volatile object. Altering a column
+on a volatile table, for example, is not presently supported.
 
 *Effect:* The operation fails.
 
-*Recovery:* Use a MAXEXTENTS value greater than the allocated extents.
+*Recovery:* Correct the statement and resubmit.
 
 <<<
-[[SQL-1274]]
-== SQL 1274
+[[SQL-1282]]
+== SQL 1282
 
 ```
-The specified or default MAXEXTENTS value is not sufficient. The MAXEXTENTS value has been automatically set to the new value of <value> for the file <file-name>.
+A LOB column cannot be specified in a volatile table.
 ```
 
-*Cause:* You specified an insufficient MAXEXTENTS value while creating
-an index.
-
-*Effect:* The SQL operation completed with a warning.
-
-*Recovery:* This is a warning only.
-
-[[SQL-1275]]
-== SQL 1275
-
-```
-Constraint <constraint-1> cannot be dropped because it is needed by unique constraint <constraint-2>.
-```
-
-Where <constraint-1> is the constraint you are trying to drop.
-
-Where <constraint-2> is the UNIQUE constraint.
-
-*Cause:* You attempted to drop a constraint that is needed by a UNIQUE
-constraint, which is not allowed because it would leave a UNIQUE
-constraint on a column, but without a NOT NULL constraint for that
-column.
+*Cause:* You attempted to create a volatile table with a BLOB or CLOB column. {project-name} presently
+does not support this. (Note: By default, {project-name} maps BLOB and CLOB columns to VARCHAR columns,
+which are supported. This particular error message can only occur if this mapping is turned off. The
+mapping is turned off when CQD TRAF_BLOB_AS_VARCHAR and/or CQD TRAF_CLOB_AS_VARCHAR is set to 'OFF'.)
 
 *Effect:* The operation fails.
 
-*Recovery:* To drop the constraint, you must remove the UNIQUE
-constraint or add an additional NOT NULL constraint to the columns that
-contain UNIQUE constraints and only one NOT NULL constraint for that
-column.
+*Recovery:* Correct the statement and resubmit.
 
-<<<
-[[SQL-1277]]
-== SQL 1277
+[[SQL-1283]]
+== SQL 1283
 
 ```
-Unrecognized partitioning scheme for object <object-name>.
+The specified constraint or file option is not supported on a volatile table.
 ```
 
-<object-name> is the name of the SQL object (table or index).
-
-*Cause:* The {project-name} database does not recognize the partitioning
-scheme stored in the metadata for the named object.
-
-*Effect:* The named object is considered corrupt and is inaccessible.
-
-*Recovery:* None. Contact the {project-name} User Distribution List Consider
-running the VERIFY operation on the named object to check for
-inconsistencies in the metadata with respect to the partitioning scheme.
-
-[[SQL-1278]]
-== SQL 1278
-
-```
-The command cannot be executed because <operation> is in progress for all schemas in catalog <catalog>.
-```
-
-<operation> is a schema level operation, currently UPGRADE or DOWNGRADE,
-that uses the ALL SCHEMAS IN CATALOG flavor. <catalog> is a catalog name
-that is affected by <operation>.
-
-*Cause:* An attempt was made to execute a DDL or utility operation that
-affected <catalog>, or a schema in that catalog.
+*Cause:* You attempted to create a volatile table with a feature that {project-name} does not support
+on volatile tables. For example, unique constraints are not supported on volatile table columns.
 
 *Effect:* The operation fails.
 
-*Recovery:* Wait until <operation> is finished, then retry the failed
-DDL or utility operation.
+*Recovery:* Correct the statement and resubmit.
 
 <<<
-[[SQL-1301]]
-== SQL 1301
+[[SQL-1289]]
+== SQL 1289
 
 ```
-NO ACTION referential action for <referential-triggered-action> clause is not yet supported as specified by ANSI SQL-99 standard. To alter the behavior, set an appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT default.
+The use of ALTER on reserved schemas and metadata schemas is not permitted.
 ```
 
-Where <referential-triggered-action> can either be ON DELETE or ON
-UPDATE.
-
-*Cause:* NO ACTION referential action is specified in the referential
-integrity definition, and the CONTROL QUERY DEFAULT value for
-REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT is 'OFF.'
-
-*Effect:* The NO ACTION referential action cannot be defined.
-
-*Recovery:* To alter the behavior of NO ACTION referential action, set
-the appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT
-default.
-
-[[SQL-1302]]
-== SQL 1302
-
-```
-NO ACTION referential action for <referential-triggered-action> clause behaves like RESTRICT referential action. To alter the behavior, set the appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT default.
-```
-
-Where <referential-triggered-action> can either be ON DELETE or ON
-UPDATE.
-
-*Cause:* NO ACTION referential action is specified in the referential
-definition and the CONTROL QUERY DEFAULT value for
-REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT is 'SYSTEM.'
-
-*Effect:* The NO ACTION referential action has RESTRICT referential
-action semantics. It is recorded as NO ACTION in the metadata table.
-
-*Recovery:* To alter the behavior of the NO ACTION referential action,
-set the appropriate value for the REF_CONSTRAINT_NO_ACTION_LIKE_RESTRICT
-default.
-
-<<<
-[[SQL-1305]]
-== SQL 1305
-
-```
-The specified schema location <directory> is already in use by schema <schema>.
-```
-
-Where <directory> is the {project-name} platform subvolume name specified in
-the LOCATION clause of the CREATE SCHEMA command.
-
-Where <schema> is the ANSI name of an existing schema that already uses
-<directory> as a schema subvolume.
-
-*Cause:* In a CREATE SCHEMA command, you specified <directory> in the
-LOCATION clause; however, this subvolume is already being used as schema
-subvolume by <schema>.
-
-*Effect:* The statement fails unless it includes the optional REUSE
-clause to allow reuse of the same schema subvolume name. If the REUSE
-clause is used, this is a warning message and the schema is created
-successfully. The warning can be issued repeatedly for a single CREATE
-SCHEMA command if multiple schemas already exist with <directory> as the
-schema subvolume.
-
-*Recovery:* Only schemas that are RDF replicated to another node should
-have the same subvolume names as their corresponding schemas on the
-other node. To create these, use the optional REUSE clause in the CREATE
-SCHEMA statement. All other schemas should have unique subvolume names.
-Schemas that are, or will be, related as RDF primary and backup schemas
-must have identical schema names and subvolumes on the primary and
-backup nodes.
-
-[[SQL-1307]]
-== SQL 1307
-
-```
-The schema location <directory> is reserved for {project-name} metadata.
-```
-
-Where <directory> is the {project-name} platform subvolume name specified in
-the LOCATION clause of the CREATE SCHEMA command.
-
-*Cause:* In a CREATE SCHEMA command, you specified subvolume in the
-LOCATION clause, however subvolume names with the format
-ZSD<digit><anything> are reserved for {project-name} database software
-metadata schemas.
+*Cause:* You attempted to ALTER an object in a reserved or metadata schema. This is not
+permitted.
 
 *Effect:* The operation fails.
 
-*Recovery:* Specify a subvolume name using the format
-ZSD<letter><anything> in the LOCATION clause, and resubmit.
+*Recovery:* If your intent was to alter a different object, correct the object name and resubmit.
 
-<<<
-[[SQL-1309]]
-== SQL 1309
+[[SQL-1298]]
+== SQL 1298
 
 ```
-Object type for <object-name> is not valid for the current operation.
+Schema <schema-name> could not be altered. <reason>
 ```
 
-Where <object-name> is the name of the object.
+Where <schema-name> is the name of the schema you specified.
 
-*Cause:* You specified an object for an SQL operation that does not
-support its object type.
+Where <reason> gives further information about the nature of the error.
+
+*Cause:* You attempted an ALTER SCHEMA statement but one or more objects in the schema are in an
+invalid state. The <reason> gives more details concerning this invalid state.
 
 *Effect:* The operation fails.
 
-*Recovery:* Specify an object of valid object type and retry the
-operation.
+*Recovery:* The <reason> may suggest possible recoveries. For example, you may need to
+use CLEANUP on certain objects in the schema, then resubmit the ALTER SCHEMA statement.
 
-[[SQL-1310]]
-== SQL 1310
+<<<
+[[SQL-1313]]
+== SQL 1313
 
 ```
-The CREATE SCHEMA statement does not support the creation of triggers.
+The referential integrity constraint <constraint-name> has been created with the NOT ENFORCED attribute and will not be enforced during INSERT, UPDATE, OR DELETE statements.
 ```
 
-*Cause:* You specified creation of triggers in the CREATE SCHEMA
-statement.
+Where <constraint-name> is the name of the constraint you specified.
+
+*Cause:* You created a table with a referential constraint but specified NOT ENFORCED for that constraint. NOT ENFORCED is commonly
+used when specifying a constraint for documentation purposes only.
+
+*Effect:* This is a warning message only. The DDL succeeds, however the constraint named is not enforced at run time.
+
+*Recovery:* None needed.
+
+[[SQL-1323]]
+== SQL 1323
+
+```
+Unable to grant privileges while creating <object-name>.
+```
+
+Where <object-name> is the name of a {project-name} object.
+
+*Cause:* While creating the given object, {project-name} encountered an error when setting the privileges for that object. This error
+is usually accompanied by other error messages that give details on the cause.
 
 *Effect:* The operation fails.
 
-*Recovery:* Remove the creation of triggers from the CREATE SCHEMA
-statement and resubmit. Create triggers in separate statements.
+*Recovery:* Recovery depends on the specific cause of the error, as described in the accompanying error messages.
 
 <<<
+[[SQL-1330]]
+== SQL 1330
+
+```
+Cannot revoke role <role-name> from authorization ID <user-name>. Role has been granted to another authorization ID.
+```
+
+Where <role-name> is the role name you specified.
+
+Where <user-name> is the user name you specified.
+
+*Cause:* While executing a REVOKE ROLE command to revoke the given role from the given user, {project-name} discovered that
+the user had granted that role to yet another user.
+
+*Effect:* The operation fails.
+
+*Recovery:* Have the user revoke the role, then resubmit.
+
+[[SQL-1331]]
+== SQL 1331
+
+```
+User <directory-service-user-name> does not exist in the directory service.
+```
+
+Where <directory-service-user-name> is a directory service user name that you specified.
+
+*Cause:* You attempted a REGISTER USER or ALTER USER EXTERNAL NAME statement, but the directory service user name
+does not exist in the directory service.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the directory service user name and resubmit.
+
+<<<
+[[SQL-1332]]
+== SQL 1332
+
+```
+Error while communicating with the directory service.
+```
+
+*Cause:* You attempted a REGISTER USER or ALTER USER EXTERNAL NAME statement, but the directory service is unavailable.
+
+*Effect:* The operation fails.
+
+*Recovery:* Restore availability of the directory service, then resubmit.
+
+[[SQL-1333]]
+== SQL 1333
+
+```
+User <user-name> does not exist.
+```
+
+Where <user-name> is the name of the user you specified.
+
+*Cause:* You specified an UNREGISTER or ALTER USER statement for a user that does not exist.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name and resubmit.
+
+<<<
+[[SQL-1334]]
+== SQL 1334
+
+```
+User or role <user-or-role-name> already exists.
+```
+
+Where <user-or-role-name> is the name of the user or role you specified.
+
+*Cause:* You specified a REGISTER statement for a user or role that already exists.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user or role name and resubmit.
+
+[[SQL-1335]]
+== SQL 1335
+
+```
+Directory service user <directory-service-user-name> already defined in the database.
+```
+
+Where <directory-service-user-name> is a directory service user name that you specified.
+
+*Cause:* You specified a directory service user name in a REGISTER statement or in
+the EXTERNAL NAME clause of an ALTER USER statement, and the directory service user name
+already is defined in the {project-name} instance.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the external user name and resubmit.
+
+<<<
+[[SQL-1337]]
+== SQL 1337
+
+```
+<user-name> is a reserved authorization identifier.
+```
+
+Where <user-name> is a user name that you specified.
+
+*Cause:* The <user-name> you specified in a REGISTER, ALTER USER or UNREGISTER statement
+is a reserved identifier. For example, "PUBLIC" is reserved; an attempt to REGISTER or 
+UNREGISTER "PUBLIC" will fail with this error.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name and resubmit.
+
+[[SQL-1338]]
+== SQL 1338
+
+```
+Role <role-name> is not defined in the database.
+```
+
+Where <role-name> is a role name that you specified.
+
+*Cause:* You specified a role name in a GRANT, REVOKE or DROP ROLE statement that
+does not exist in the database.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the role name and resubmit.
+
+<<<
+[[SQL-1339]]
+== SQL 1339
+
+```
+<role-name> is not a grantable role.
+```
+
+Where <role-name> is a role name that you specified.
+
+*Cause:* You attempted to grant a role that is not grantable. For example, "PUBLIC" is not a grantable role.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the role name and resubmit.
+
+[[SQL-1340]]
+== SQL 1340
+
+```
+<user-name> is not a user.
+```
+
+Where <user-name> is a user name that you specified.
+
+*Cause:* You specified a name in a GET PRIVILEGES FOR USER statement that cannot be 
+a user name. For example, GET PRIVILEGES FOR USER "PUBLIC" will generate this error.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name and resubmit.
+
+<<<
+[[SQL-1347]]
+== SQL 1347
+
+```
+Cannot unregister user. User owns one or more roles.
+```
+
+*Cause:* You specified an UNREGISTER command, but the user name specified owns one 
+or more roles.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name and resubmit.
+
+[[SQL-1348]]
+== SQL 1348
+
+```
+Cannot drop role. Role is granted to one or more users.
+```
+
+*Cause:* You attempted a DROP ROLE statement but the role you are attempting to
+drop is granted to one or more users.
+
+*Effect:* The operation fails.
+
+*Recovery:* Revoke the role from any users, then resubmit.
+
+<<<
+[[SQL-1349]]
+== SQL 1349
+
+```
+Cannot unregister user. User granted one or more roles.
+```
+
+*Cause:* You specified an UNREGISTER command, but the user name specified has been granted one 
+or more roles.
+
+*Effect:* The operation fails.
+
+*Recovery:* Revoke the roles from the user, then resubmit.
+
+[[SQL-1351]]
+== SQL 1351
+
+```
+Role <role-name> appears multiple times in list.
+```
+
+Where <role-name> is one of the role names you specified.
+
+*Cause:* You specified a list of role names in a GRANT ROLE or REVOKE ROLE statement, but
+the given role name appears more than once in the list.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the role name list (for example, removing the duplication), then resubmit.
+
+<<<
+[[SQL-1352]]
+== SQL 1352
+
+```
+User <user-name> appears multiple times in list.
+```
+
+Where <user-name> is one of the user names you specified.
+
+*Cause:* You specified a list of user names in a GRANT ROLE or REVOKE ROLE statement, but
+the given user name appears more than once in the list.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name list (for example, removing the duplication), then resubmit.
+
+<<<
+[[SQL-1355]]
+== SQL 1355
+
+```
+Granting a role to PUBLIC or _SYSTEM is not allowed.
+```
+
+*Cause:* You attempted to grant a role to PUBLIC or _SYSTEM in a GRANT ROLE statement.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement, then resubmit.
+
+[[SQL-1356]]
+== SQL 1356
+
+```
+Cannot create the component privilege specified. Component privilege code <privilege-code> for the component already exists. 
+```
+
+Where <privilege-code> is the privilege code you specified.
+
+*Cause:* You attempted to create a component privilege (CREATE COMPONENT PRIVILEGE statement),
+but the privilege code you specified is already defined.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement, then resubmit.
+
+<<<
+[[SQL-1357]]
+== SQL 1357
+
+```
+Cannot create the component privilege specified. Component privilege name <privilege-name> for the component already exists. 
+```
+
+Where <privilege-name> is the privilege name you specified.
+
+*Cause:* You attempted to create a component privilege (CREATE COMPONENT PRIVILEGE statement),
+but the privilege name you specified is already defined.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement, then resubmit.
+
+[[SQL-1364]]
+== SQL 1364
+
+```
+Cannot revoke role <role-name>. Object <object-name-1> depends on privileges on object <object-name-2>. 
+```
+
+Where <role-name> is the role name you specified in a REVOKE ROLE statement.
+
+Where <object-name-1> and <object-name-2> are objects in the database.
+
+*Cause:* You attempted to revoke a role from a particular user, but this is not allowed because the object <object-name-1>
+owned by that user depends on an object <object-name-2>, and the user has access to the latter object through the role
+privileges. This can happen, for example, if the user created a schema, then in that schema, created a view that references
+an object not owned by the user, but accessible to the user through the role.
+
+*Effect:* The operation fails.
+
+*Recovery:* Drop the dependent object <object-name-1>. Then resubmit the REVOKE ROLE statement.
+
+<<<
+[[SQL-1366]]
+== SQL 1366
+
+```
+Request failed. One or more dependent procedures exist.
+```
+
+*Cause:* You attempted to drop a library (DROP LIBRARY statement), but there is a user-defined function or procedure
+that references this library.
+
+*Effect:* The operation fails.
+
+*Recovery:* Drop any functions or procedures that reference this library. Then resubmit the DROP LIBRARY statement.
+
+[[SQL-1370]]
+== SQL 1370
+
+```
+The authorization name "<authorization-name>" contains one or more invalid characters. A valid name can only contain these characters: [a-zA-Z_0-9-@./]
+```
+
+Where <authorization-name> is the user name you specified.
+
+*Cause:* You attempted to execute a REGISTER USER command, but the user name you specified contains characters that
+are not permitted.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the user name and resubmit.
+
+<<<
+[[SQL-1382]]
+== SQL 1382
+
+```
+JAR or DLL file <file-name> not found.
+```
+
+Where <file-name> is the file name you specified.
+
+*Cause:* You attempted to execute a CREATE LIBRARY statement, but the library file does not exist.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the file name and resubmit.
+
+[[SQL-1388]]
+== SQL 1388
+
+```
+Object <hive-object-name> does not exist in hive metadata.
+```
+
+Where <hive-object-name> is the name of an Apache Hive object.
+
+*Cause:* You specified the name of a Hive object in a {project-name} DML statement, but that Hive object
+is not defined in the Hive metadata.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the object name and resubmit.
+
+<<<
+[[SQL-1389]]
+== SQL 1389
+
+```
+Object <object-name> does not exist in Trafodion.
+```
+
+Where <object-name> is the name of a {project-name} object.
+
+*Cause:* You specified the given object name in a DML or DDL statement, but the object does not 
+exist in the {project-name} database.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the object name and resubmit.
+
+[[SQL-1390]]
+== SQL 1390
+
+```
+Object <object-name> already exists in Trafodion.
+```
+
+Where <object-name> is the name of a {project-name} object.
+
+*Cause:* You specified the given object name in a CREATE statement, but the object already 
+exists in the {project-name} database.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the object name and resubmit.
+
+<<<
+[[SQL-1391]]
+== SQL 1391
+
+```
+<object-name> is a <object-kind> object and cannot be updated.
+```
+
+Where <object-name> is the name of a {project-name} object that you specified.
+
+Where <object-kind> is a description of what kind of an object it is (e.g. "metadata").
+
+*Cause:* You attempted an INSERT, UPDATE, DELETE or other write DML statement on a table
+where that is not allowed.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the object name and resubmit.
+
+[[SQL-1392]]
+== SQL 1392
+
+```
+Trafodion is already initialized on this system. No action is needed.
+```
+
+*Cause:* You attempted an INITIALIZE TRAFODION statement on a {project-name} instance that has already been initialized.
+
+*Effect:* The operation does nothing.
+
+*Recovery:* None needed; the instance is already initialized. If your intent was to re-initialize the instance, first
+perform INITIALIZE TRAFODION, DROP to destroy the old instance, then INITIALIZE TRAFODION to initialize the new instance. Note that
+destroying your old instance destroys all data in the instance.
+
+<<<
+[[SQL-1393]]
+== SQL 1393
+
+```
+Trafodion is not initialized on this system. Do 'initialize trafodion' to initialize it.
+```
+
+*Cause:* You attempted a SQL statement on a {project-name} instance that has not yet been initialized.
+
+*Effect:* The operation fails.
+
+*Recovery:* Execute INITIALIZE TRAFODION to initialize the instance, then resubmit.
+
+[[SQL-1394]]
+== SQL 1394
+
+```
+Trafodion needs to be reinitialized on this system due to missing or corrupted metadata objects. Do 'initialize trafodion, drop' followed by 'initialize trafodion' to reinitialize Trafodion. This will delete all metadata and user objects from the Trafodion database and recreate metadata.
+```
+
+*Cause:* The metadata objects do not have the structure that {project-name} expects. This may be due to corruption. Another possibility
+is an older release of {project-name} software has been installed and brought up. At this time {project-name} does not support downgrade.
+
+*Effect:* The operation fails.
+
+*Recovery:* The recovery depends upon the cause and on whether there is a desire to keep the data in the instance. If corruption has happened
+it may be possible to manually restore the metadata tables. This may be difficult, however; contact the {project-name} dlist for help.
+If it is due to the improper installation of software, installing the correct version and restarting the instance may solve the problem.
+If there is no need to keep data from the instance, you can do INITIALIZE TRAFODION, DROP followed by INITIALIZE TRAFODION.
+
+<<<
+[[SQL-1395]]
+== SQL 1395
+
+```
+Trafodion needs to be upgraded on this system due to metadata version mismatch. Do 'initialize trafodion, upgrade' to upgrade metadata. Or do 'initialize trafodion, drop' followed by 'initialize trafodion'. Be aware that the second option will delete all metadata and user objects from Trafodion database.
+```
+
+*Cause:* A new version of the {project-name} software has been installed, and a metadata upgrade is required as a result.
+
+*Effect:* The operation fails.
+
+*Recovery:* Perform INITIALIZE TRAFODION, UPGRADE to upgrade the metadata. Note: {project-name} supports upgrade only in single release
+increments. So, if you desire to upgrade your instance by multiple release increments, you will need to install each of the intervening
+releases serially and do a metadata upgrade on each. If you do not care about keeping your data, you can instead drop the metadata using INITIALIZE TRAFODION, DROP and then do an INITIALIZE TRAFODION. Please note though that all data in your instance will be lost if you choose this option.
+
+[[SQL-1396]]
+== SQL 1396
+
+```
+Trafodion needs to be reinitialized on this system due to data format version mismatch.
+```
+
+*Cause:* A new version of the {project-name} software has been installed, and that version uses a different data format. Note: The
+data format has never yet changed in {project-name}, so this error should never be seen.
+
+*Effect:* The operation fails.
+
+*Recovery:* Contact the {project-name} dlist.
+
+<<<
+[[SQL-1397]]
+== SQL 1397
+
+```
+Software version of objects being used is different than the version of software running on the system. Make sure that objects being used are built with the same version as that running on the system. Version of Trafodion software is determined by file sqenvcom.sh.
+```
+
+*Cause:* This error is most likely to be seen in a {project-name} development environment at the beginning of a new release
+development cycle. The
+software version information in the built objects differs from that in sqenvcom.sh. This can happen, for example, if you have 
+created a new branch in which sqenvcom.sh has been updated, but you bring up software that was built on the previous branch.
+
+*Effect:* The operation fails.
+
+*Recovery:* Assuming this is a development environment, rebuild the {project-name} software to get the correct software version numbers.
+If this is not a development environment, contact the {project-name} dlist.
+
+[[SQL-1398]]
+== SQL 1398
+
+```
+Error <error-number> occured while accessing the HBase subsystem. Fix that error and make sure HBase is up and running. Error Details: <HBase-diagnostics>. 
+```
+
+Where <error-number> is an internal {project-name} error code.
+
+Where <HBase-diagnostics> is a Java exception stack trace from HBase, which indicates the cause of the error.
+
+*Cause:* An error occurred when {project-name} attempted an HBase access. The HBase diagnostics may give an indication of the cause.
+
+*Effect:* The operation fails.
+
+*Recovery:* Take action as indicated by the HBase diagnostics. Insure that the HBase subsystem is up and running correctly.
+
+<<<
+[[SQL-1399]]
+== SQL 1399
+
+```
+Metadata has already been upgraded. No action is needed.
+```
+
+*Cause:* You executed an INITIALIZE TRAFODION, UPGRADE statement but the metadata version is already up-to-date.
+
+*Effect:* The operation does nothing.
+
+*Recovery:* None needed.
+
+[[SQL-1401]]
+== SQL 1401
+
+```
+The default value of column <column-name> contains characters that cannot be converted to character set <character-set-name>.
+```
+
+Where <column-name> is a column name you specified.
+
+Where <character-set-name> is the character set of the column you specified.
+
+*Cause:* In a CREATE TABLE or ALTER TABLE ADD COLUMN statement, you specified a default value for the given column that 
+cannot be converted to that column's character set.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the DEFAULT clause literal and resubmit.
+
+<<<
+[[SQL-1402]]
+== SQL 1402
+
+```
+Unique index <index-name> could not be created with the DIVISION LIKE TABLE option. Only non-unique indexes are supported with this option.
+```
+
+Where <index-name> is an index name you specified.
+
+*Cause:* You specified both UNIQUE and DIVISION LIKE TABLE in a CREATE INDEX statement. {project-name} does not support unique indexes
+with the DIVISION LIKE TABLE option.
+
+*Effect:* The operation fails.
+
+*Recovery:* Either remove UNIQUE or remove DIVSION LIKE TABLE from the CREATE INDEX statement and resubmit.
+
+[[SQL-1403]]
+== SQL 1403
+
+```
+This ALTER command is not allowed on a reserved system schema object.
+```
+
+*Cause:* You specified an ALTER INDEX statement on an index in a reserved schema (e.g. the metadata schema).
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the index schema name and resubmit.
+
+<<<
+[[SQL-1404]]
+== SQL 1404
+
+```
+Column <column-name> cannot be altered. Reason: <reason>
+```
+
+*Cause:* You specified an ALTER TABLE ALTER COLUMN statement, but the column cannot be altered. The reason gives details on why.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
 [[SQL-1420]]
 == SQL 1420
 
@@ -2697,6 +3153,7 @@
 
 *Recovery:* Correct the column name or table name as appropriate and resubmit.
 
+<<<
 [[SQL-1421]]
 == SQL 1421 
 
@@ -2744,10 +3201,10 @@
 == SQL 1424 
 
 ```
-Column <column-name> cannot be dropped as that would leave the table with no user defined columns.
+Column <column-name> cannot be dropped as that would leave the table with no user defined or user updatable columns.
 ```
 
-*Cause:* You attempted to drop the only remaining user-defined column in the table.
+*Cause:* You attempted to drop the only remaining user-defined or user-updatable column in the table.
 
 *Effect:* The operation fails.
 
@@ -2772,7 +3229,7 @@
 == SQL 1426
 
 ```
-An invalid HBase column name <column-name> was specified. A valid name must be of the format:   <ColumnFamily>:<ColumnName>
+An invalid HBase column name <column-name> was specified. A valid name must be of the format: <ColumnFamily>:<ColumnName>
 ```
 
 *Cause:* When accessing an external HBase table (for example, using _ROW_ format), you specified
@@ -2825,4 +3282,404 @@
 
 *Recovery:* Correct the statement and resubmit.
 
+[[SQL-1430]]
+== SQL 1430
+
+```
+A schema name that starts and ends with an "_"(underscore) is reserved for internal usage. It cannot be used to create a user schema.
+```
+
+*Cause:* You attempted to create a schema with a name beginning and ending with an underscore. These names are
+reserved by {project-name} for internal use.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1431]]
+== SQL 1431
+
+```
+Object <object-name> exists in HBase. This could be due to a concurrent transactional ddl operation in progress on this table.
+```
+
+Where <object-name> is an object name you specified.
+
+*Cause:* You attempted to create an object, but the object already exists in HBase. It may be the case that the object is being created
+(or dropped) in another concurrent transaction.
+
+*Effect:* The operation fails.
+
+*Recovery:* Check to see if the object exists. If not, resubmit the statement again. If the transaction is long-running, you may need
+to attempt this multiple times.
+
+[[SQL-1432]]
+== SQL 1432
+
+```
+Input LOB type <storage-type-1> does not match column's storage type: <storage-type-2> Column name: <column-name> .
+```
+
+Where <storage-type-1> is the storage type of an input LOB value you specified.
+
+Where <storage-type-2> is the storage type of the target LOB column.
+
+Where <column-name> is the name of the target LOB column.
+
+*Cause:* You attempted to store a LOB value into a LOB column, but the storage type of the LOB value and the target column are not
+compatible. 
+
+*Effect:* The operation fails.
+
+*Recovery:* Consult the Trafodion SQL Large Objects Guide for more information about LOB storage types. Correct the statement
+and resubmit.
+
+<<<
+[[SQL-1510]]
+== SQL 1510
+
+```
+IDENTITY column <column-name> can be of the following data types only: LARGEINT, INTEGER UNSIGNED and SMALLINT UNSIGNED.
+```
+
+Where <column-name> is a column name you specified.
+
+*Cause:* You attempted to define an IDENTITY column but you specified a data type other than LARGEINT, INTEGER UNSIGNED or SMALLINT UNSIGNED.
+
+*Effect:* The operation fails.
+
+*Recovery:* Change the data type of the column to LARGEINT, INTEGER UNSIGNED or SMALLINT UNSIGNED and resubmit.
+
+[[SQL-1511]]
+== SQL 1511
+
+```
+There can only be one IDENTITY column for a table.
+```
+
+*Cause:* You attempted to define more than one IDENTITY column in a table.
+
+*Effect:* The operation fails.
+
+*Recovery:* Change the column definitions so that there is at most one IDENTITY column and resubmit.
+
+<<<
+[[SQL-1514]]
+== SQL 1514
+
+```
+Cannot add an IDENTITY column using ALTER TABLE command.
+```
+
+*Cause:* You attempted to add an IDENTITY column to a table using the ALTER TABLE command. {project-name} does not presently support this.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1540]]
+== SQL 1540
+
+```
+The NO POPULATE clause is not allowed for index <index-name> on volatile table <table-name>.
+```
+
+*Cause:* You attempted to execute a CREATE INDEX statement to create an index on a volatile table, and you specified the NO POPULATE option. That option
+is not allowed for indexes on volatile tables.
+
+*Effect:* The operation fails.
+
+*Recovery:* Remove the NO POPULATE clause from the statement and resubmit.
+
+<<<
+[[SQL-1541]]
+== SQL 1541
+
+```
+Use of BLOB/CLOB datatype as a key of the base table or an index is not allowed.
+```
+
+*Cause:* You attempted to execute a CREATE TABLE statement with a BLOB or CLOB column in the primary key, or you attempted to execute a CREATE INDEX
+statement that referenced a CLOB or BLOB column. This is not allowed.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1550]]
+== SQL 1550
+
+```
+Failed to create UDF <udf-name> with <number> parameters. A scalar UDF can have a maximum of 32 parameters.
+```
+
+Where <udf-name> is the name of a user defined function that you specified.
+
+Where <number> is the number of parameters in the function signature that you specified.
+
+*Cause:* You attempted to execute a CREATE FUNCTION statement and you specified more than 32 parameters in the function signature. {project-name}
+only supports up to 32 parameters.
+
+*Effect:* The operation fails.
+
+*Recovery:* Reduce the number of parameters in the function and resubmit.
+
+<<<
+[[SQL-1570]]
+== SQL 1570
+
+```
+MAXVALUE must be greater than MINVALUE for <purpose>.
+```
+
+Where <purpose> is either "an IDENTITY column" or "CREATE SEQUENCE".
+
+*Cause:* When creating an IDENTITY column or when executing a CREATE SEQUENCE statement, you specified a MAXVALUE that is less than or equal to the MINVALUE.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1571]]
+== SQL 1571
+
+```
+<value-name> value cannot be zero for <purpose>.
+```
+
+Where <value-name> is one of "INCREMENT BY", "MINVALUE" or "MAXVALUE".
+
+Where <purpose> is one of "an IDENTITY column", "ALTER SEQUENCE" or "CREATE SEQUENCE".
+
+*Cause:* When creating an IDENTITY column or when executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a value of zero for INCREMENT BY,
+MINVALUE or MAXVALUE.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1572]]
+== SQL 1572
+
+```
+<value-name> value cannot be a negative number for <purpose>.
+```
+
+Where <value-name> is one of "INCREMENT BY", "MINVALUE" or "MAXVALUE".
+
+Where <purpose> is one of "an IDENTITY column", "ALTER SEQUENCE" or "CREATE SEQUENCE".
+
+*Cause:* When creating an IDENTITY column or when executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a negative value for INCREMENT BY,
+MINVALUE or MAXVALUE.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1573]]
+== SQL 1573
+
+```
+START WITH value must be greater than or equal to MINVALUE and less than or equal to MAXVALUE for <purpose>.
+```
+
+Where <purpose> is one of "an IDENTITY column", "ALTER SEQUENCE" or "CREATE SEQUENCE".
+
+*Cause:* When creating an IDENTITY column or when executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a value for START WITH that is
+less than the value for MINVALUE or greater than the value for MAXVALUE.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1575]]
+== SQL 1575
+
+```
+INCREMENT BY value cannot be greater than the difference between MINVALUE and MAXVALUE for <purpose>.
+```
+
+Where <purpose> is one of "an IDENTITY column", "ALTER SEQUENCE" or "CREATE SEQUENCE".
+
+*Cause:* When creating an IDENTITY column or when executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a value for INCREMENT BY that is
+greater than the difference between the values for MINVALUE and MAXVALUE.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1576]]
+== SQL 1576
+
+```
+<value-name> value is greater than maximum allowed for this sequence.
+```
+
+Where <value-name> is one of "INCREMENT BY", "MAXVALUE" or "START WITH".
+
+*Cause:* When creating an IDENTITY column or when executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a value that is larger than
+permitted for the data type of the column or sequence.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1577]]
+== SQL 1577
+
+```
+CACHE value must be greater than 1 and less than or equal to (maxValue-startValue+1)/incrementValue for <purpose>.
+```
+
+Where <purpose> is "ALTER SEQUENCE" or "CREATE SEQUENCE".
+
+*Cause:* When executing an ALTER SEQUENCE or CREATE SEQUENCE statement, you specified a value for CACHE that is less than one
+or is greater than (maxValue - startValue + 1)/incrementValue.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1579]]
+== SQL 1579
+
+```
+This sequence has reached its max and cannot provide a new value.
+```
+
+*Cause:* When executing a DML statement that uses the SEQNUM to obtain a value from a sequence, or when
+inserting a row into a table having an IDENTITY column, the underlying sequence had already reached its
+maximum value.
+
+*Effect:* The operation fails.
+
+*Recovery:* If desired, use ALTER SEQUENCE to increase the maximum value for the sequence, then resubmit.
+
+<<<
+[[SQL-1582]]
+== SQL 1582
+
+```
+Internal Error: Sequence information does not exist in metadata.
+```
+
+*Cause:* When executing a DML statement that references a sequence, {project-name} could not find
+the sequence definition in the {project-name} metadata.
+
+*Effect:* The operation fails.
+
+*Recovery:* Contact the {project-name} distribution list. If the DML statement involves an IDENTITY
+column it is likely the existing data is not at risk, however it may not be possible to create new
+rows using the IDENTITY column's underlying sequence. If the DML statement involves getting new
+values from a sequence via the SEQNUM function, it may be possible to create a new sequence with
+a MINVALUE just greater to the last value taken from the sequence, and use the new sequence instead.
+
+[[SQL-1583]]
+== SQL 1583
+
+```
+Sequence metadata could not be updated.
+```
+
+*Cause:* When executing a DML statement that references a sequence, the sequence metadata could
+not be updated.
+
+*Effect:* The operation fails.
+
+*Recovery:* Contact the {project-name} distribution list.
+
+<<<
+[[SQL-1584]]
+== SQL 1584
+
+```
+Timestamp mismatch detected on Sequence object.
+```
+
+*Cause:* When executing a DML statement that references a sequence, the {project-name} run-time
+engine discovered a mismatch between the expected state of the sequence and the state found
+at run-time.
+
+*Effect:* The operation fails.
+
+*Recovery:* Contact the {project-name} distribution list. If this is a PREPAREd statement,
+re-PREPARing it may cure the error.
+
+[[SQL-1590]]
+== SQL 1590
+
+```
+Column <column-name> is not an IDENTITY column.
+```
+
+Where <column-name> is the column name you specified.
+
+*Cause:* You attempted to execute an ALTER TABLE ALTER COLUMN statement, setting an IDENTITY-column-related
+value such as INCREMENT BY. However, the column specified is not an IDENTITY column.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the table name or column name in the statement and resubmit.
+
+<<<
+[[SQL-1592]]
+== SQL 1592
+
+```
+<value-name> cannot be specified for <purpose>.
+```
+
+Where <value-name> is "MINVALUE" or "START WITH".
+
+Where <purpose> is "ALTER SEQUENCE".
+
+*Cause:* You attempted to modify the MINVALUE or START WITH values for a sequence in an ALTER SEQUENCE statement but
+this is not allowed.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+<<<
+[[SQL-1719]]
+== SQL 1719
+
+```
+Access Type '<access>' is not supported.
+```
+
+Where <access> is an access type you specified in a FOR ACCESS clause.
+
+*Cause:* The access type you specified is not presently supported by {project-name}.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+[[SQL-1720]]
+== SQL 1720
+
+```
+Isolation Level '<level>' is not supported.
+```
+
+Where <level> is an isolation level you specified in a SET TRANSACTION ISOLATION LEVEL statement.
+
+*Cause:* The isolation level you specified is not presently supported by {project-name}.
+
+*Effect:* The operation fails.
+
+*Recovery:* Correct the statement and resubmit.
+
+
+
+
+
+
+
+
 
diff --git a/docs/messages_guide/src/asciidoc/_chapters/parser_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/parser_msgs.adoc
index 4c18674..33b29ac 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/parser_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/parser_msgs.adoc
@@ -230,15 +230,15 @@
 == SQL 3014
 
 ```
-Precision of numeric, <value>, cannot exceed 18.
+Precision of numeric, <value>, cannot exceed <limit>.
 ```
 
 *Cause:* You defined a numeric item, <value>, with precision greater
-than 18, which is not supported.
+than the limit, which is not supported.
 
 *Effect:* The operation fails.
 
-*Recovery:* Redefine the item with a precision value of 18 or less.
+*Recovery:* Redefine the item with a lower precision value.
 
 <<<
 [[SQL-3015]]
@@ -677,7 +677,7 @@
 == SQL 3045
 
 ```
-The date <value> is not valid.
+The date '<value>' is not valid.
 ```
 
 *Cause:* You specified an invalid date <value>.
@@ -690,7 +690,7 @@
 == SQL 3046
 
 ```
-The time <value> is not valid.
+The time '<value>' is not valid.
 ```
 
 *Cause:* You specified an invalid time <value>.
@@ -704,7 +704,7 @@
 == SQL 3047
 
 ```
-The timestamp <value> is not valid.
+The timestamp '<value>' is not valid.
 ```
 
 *Cause:* You specified an invalid timestamp <value>.
@@ -2850,7 +2850,7 @@
 == SQL 3209
 
 ```
-Maximum length of LONGWVARCHAR is <value>.
+The maximum length of LONGWVARCHAR is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname
@@ -2868,7 +2868,7 @@
 == SQL 3210
 
 ```
-Minimum length of LONGWVARCHAR is <value>.
+The minimum length of LONGWVARCHAR is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname
@@ -2885,7 +2885,7 @@
 == SQL 3211
 
 ```
-Maximum length of LONG VARBINARY is <value>.
+The maximum length of LONG VARBINARY is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname long
@@ -2902,7 +2902,7 @@
 == SQL 3212
 
 ```
-Minimum length of LONG VARBINARY is <value>.
+The minimum length of LONG VARBINARY is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname long
@@ -2918,7 +2918,7 @@
 == SQL 3213
 
 ```
-Maximum length of LONG VARCHAR is <value>.
+The maximum length of LONG VARCHAR is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname long
@@ -2935,7 +2935,7 @@
 == SQL 3214
 
 ```
-Minimum length of LONG VARCHAR is <value>.
+The minimum length of LONG VARCHAR is <value>.
 ```
 
 *Cause:* You attempted a CREATE TABLE table name (columnname long
@@ -3003,7 +3003,7 @@
 == SQL 3218
 
 ```
-A table reference cannot contain DELETE or UPDATE in a CREATE VIE statement.
+A table reference cannot contain DELETE or UPDATE in a CREATE VIEW statement.
 ```
 
 *Cause:* A create view statement was issued with a delete or update
@@ -3013,23 +3013,6 @@
 
 *Recovery:* Do not use delete or update in a CREATE view statement.
 
-[[SQL-3220]]
-== SQL 3220
-
-```
-SHOWDDL failed for object <object-name> because an SQL/MP display option was used on a {project-name} object.
-```
-
-Where <object-name> specifies the ANSI name of a table, view, or ALIAS.
-
-*Cause:* SHOWDDL was given the display option with a {project-name} 
-table given as an argument.
-
-*Effect:* SHOWDDL does not display output.
-
-*Recovery:* Reissue the command without the display option or specify an
-object.
-
 <<<
 [[SQL-3225]]
 == SQL 3225
diff --git a/docs/messages_guide/src/asciidoc/_chapters/update_stats_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/update_stats_msgs.adoc
index c660148..51c7e3e 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/update_stats_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/update_stats_msgs.adoc
@@ -62,7 +62,7 @@
 == SQL 9202
 
 ```
-UPDATE STATISTICS has located previously generated histograms that are not being regenerated. This may affect the plans that will be generated. Missing column lists are <column-list>.
+UPDATE STATISTICS has located previously generated histograms that are not being regenerated. This may affect the plans that will be generated. Missing columns lists are <column-list>.
 ```
 
 *Cause:* UPDATE STATISTICS has located previously generated histograms that are not being regenerated.
@@ -456,7 +456,7 @@
 == SQL 9245
 
 ```
-Unexpected out-of-order data encountered during histogram construction on column <column-name> this might result in later 6004 warnings.
+Unexpected out-of-order data encountered during histogram construction on column <column-name>; this might result in later 6004 warnings.
 ```
 
 *Cause:* {project-name} detected out-of-order data while generating histograms. This is likely an
diff --git a/docs/odb_user/pom.xml b/docs/odb_user/pom.xml
index f8de8fa..8251ad9 100644
--- a/docs/odb_user/pom.xml
+++ b/docs/odb_user/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion odb User Guide</name>
   <description>This guide describes how to odb, a multi-threaded, ODBC-based command-line tool, to perform various operations on a Trafodion database.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
   <parent>
     <groupId>org.apache.trafodion</groupId>
@@ -60,9 +60,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -186,8 +186,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -208,8 +208,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -286,9 +286,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/provisioning_guide/pom.xml b/docs/provisioning_guide/pom.xml
index b48f592..20bcb5a 100644
--- a/docs/provisioning_guide/pom.xml
+++ b/docs/provisioning_guide/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion Provisioning Guide</name>
   <description>This guide describes how to provision Trafodion.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -61,9 +61,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -187,9 +187,9 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
-                <download-url>http://trafodion.incubator.apache.org/download.html</download-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
+                <download-url>http://trafodion.apache.org/download.html</download-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -210,9 +210,9 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
-                <download-url>http://trafodion.incubator.apache.org/download.html</download-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
+                <download-url>http://trafodion.apache.org/download.html</download-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>
                 <pdf-style>trafodion</pdf-style>
@@ -289,9 +289,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/about.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/about.adoc
index ddce5bf..a98cad3 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/about.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/about.adoc
@@ -28,7 +28,7 @@
 (see {docs-url}/client_install/index.html[{project-name} Client Installation Guide]) or via application code you write.
 
 If you want to install a Trafodion developer-build environment, then please refer to the 
-http://trafodion.incubator.apache.org/contributing-redirect.html[Trafodion Contributor Guide] for instructions.
+http://trafodion.apache.org/contributing-redirect.html[Trafodion Contributor Guide] for instructions.
 
 == Intended Audience
 This guide assumes that you are well-versed in Linux and Hadoop administration. If you don't have such experience, then
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/enable_security.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/enable_security.adoc
index df6ced7..e315f72 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/enable_security.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/enable_security.adoc
@@ -495,10 +495,10 @@
 ----------------------------------
 AUTHENTICATION     enabled
 AUTHORIZATION      enabled
-CURRENT DIRECTORY  /.../incubator-trafodion/install/installer
+CURRENT DIRECTORY  /.../trafodion/install/installer
 LIST_COUNT         4294967295
 LOG FILE
-MESSAGEFILE        /.../incubator-trafodion/core/sqf/export/ ...
+MESSAGEFILE        /.../trafodion/core/sqf/export/ ...
 MESSAGEFILE LANG   US English
 MESSAGEFILE VRSN   {2016-06-14 22:27 LINUX:host/user} 
 SQL CATALOG        TRAFODION
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/introduction.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/introduction.adoc
index 67cc082..b16eea3 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/introduction.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/introduction.adoc
@@ -158,7 +158,7 @@
 ```
 $ mkdir $HOME/trafodion-installer
 $ cd $HOME/trafodion-downloads
-$ tar -zxf apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz -C $HOME/trafodion-installer
+$ tar -zxf apache-trafodion-pyinstaller-2.2.0.tar.gz -C $HOME/trafodion-installer
 $
 ```
 
@@ -268,7 +268,7 @@
 The {project-name} Installer supports a minimum configuration to quick start your installation in two steps.
 1. Copy {project-name} server binary file to your installer directory.
 ```
-cp /path/to/apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz python-installer/
+cp /path/to/apache-trafodion_server-2.2.0-RH-x86_64.tar.gz python-installer/
 ```
 2. Modify configuration file `my_config`, add the Hadoop Distribution Manager URL in `mgr_url`.
 ```
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/quickstart.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/quickstart.adoc
index fbdac4f..e457d72 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/quickstart.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/quickstart.adoc
@@ -53,31 +53,31 @@
 $ mkdir $HOME/trafodion-download
 $ cd $HOME/trafodion-download
 $ # Download the Trafodion Installer binaries
-$ wget http://apache.cs.utah.edu/incubator/trafodion/trafodion-2.1.0.incubating/apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz
+$ wget http://apache.cs.utah.edu/trafodion/trafodion-2.2.0/apache-trafodion-pyinstaller-2.2.0.tar.gz
 Resolving http://apache.cs.utah.edu... 192.168.1.56
 Connecting to http://apache.cs.utah.edu|192.168.1.56|:80... connected.
 HTTP request sent, awaiting response... 200 OK
 Length: 68813 (67K) [application/x-gzip]
-Saving to: "apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz"
+Saving to: "apache-trafodion-pyinstaller-2.2.0.tar.gz"
 
 100%[=====================================================================================================================>] 68,813       124K/s   in 0.5s
 
-2016-02-14 04:19:42 (124 KB/s) - "apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz" saved [68813/68813]
+2016-02-14 04:19:42 (124 KB/s) - "apache-trafodion-pyinstaller-2.2.0.tar.gz" saved [68813/68813]
 
-$ wget http://apache.cs.utah.edu/incubator/trafodion/trafodion-2.1.0.incubating/apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz
+$ wget http://apache.cs.utah.edu/trafodion/trafodion-2.2.0/apache-trafodion_server-2.2.0-RH-x86_64.tar.gz
 Resolving http://apache.cs.utah.edu... 192.168.1.56
 Connecting to http://apache.cs.utah.edu|192.168.1.56|:80... connected.
 HTTP request sent, awaiting response... 200 OK
 Length: 214508243 (205M) [application/x-gzip]
-Saving to: "apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz"
+Saving to: "apache-trafodion_server-2.2.0-RH-x86_64.tar.gz"
 
 100%[=====================================================================================================================>] 214,508,243 3.90M/s   in 55s
 
-2016-02-14 04:22:14 (3.72 MB/s) - "apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz" saved [214508243/214508243]
+2016-02-14 04:22:14 (3.72 MB/s) - "apache-trafodion_server-2.2.0-RH-x86_64.tar.gz" saved [214508243/214508243]
 
 $ ls -l
--rw-rw-r--. 1 centos centos     74237 Feb 13 14:53 apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz
--rw-rw-r--. 1 centos centos 183114066 Feb 10 22:34 apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz
+-rw-rw-r--. 1 centos centos     74237 Feb 13 14:53 apache-trafodion_pyinstaller-2.2.0.tar.gz
+-rw-rw-r--. 1 centos centos 183114066 Feb 10 22:34 apache-trafodion_server-2.2.0-RH-x86_64.tar.gz
 $
 ```
 
@@ -92,10 +92,10 @@
 ```
 $ mkdir $HOME/trafodion-installer
 $ cd $HOME/trafodion-downloads
-$ tar -zxf apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz -C $HOME/trafodion-installer
-$ cp -f apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz $HOME/trafodion-installer
+$ tar -zxf apache-trafodion-pyinstaller-2.2.0.tar.gz -C $HOME/trafodion-installer
+$ cp -f apache-trafodion_server-2.2.0-RH-x86_64.tar.gz $HOME/trafodion-installer
 $ ls $HOME/trafodion-installer/python-installer
-apache-trafodion_server-2.1.0-RH-x86_64-incubating.tar.gz  db_install.py    DISCLAIMER    LICENSE  prettytable.py  scripts
+apache-trafodion_server-2.2.0-RH-x86_64.tar.gz  db_install.py    LICENSE  prettytable.py  scripts
 configs                                                    db_uninstall.py  discovery.py  NOTICE   README.md
 $
 ```
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/requirements.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/requirements.adoc
index d49d8d5..64501fd 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/requirements.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/requirements.adoc
@@ -319,7 +319,7 @@
 [[requirements-recommended-hbase-configuration-changes]]
 === Recommended HBase Configuration Changes
 
-[cols="30%l,20%,50%a",options="header"]
+[options="header"]
 |===
 | Configuration Property | Recommended Setting | Guidance
 | hbase.rpc.timeout | 10 minutes | This setting depends on the tables' size. Sixty (60) seconds is the default. 
@@ -331,13 +331,38 @@
 from this setting. The underlying issue is the length of the execution of the coprocessor within HBase.
  +
 NOTE: HBase uses the smaller of `hbase.rpc.timeout` and `hbase.client.scanner.timeout.period` to calculate the scanner timeout. 
-| hbase.snapshot.master.timeoutMillis and hbase.snapshot.region.timeout | 10 minutes | HBase's default setting is 60000 milliseconds. 
+| hbase.snapshot.master.timeoutMillis 
+
+and 
+
+hbase.snapshot.region.timeout | 10 minutes | HBase's default setting is 60000 milliseconds. 
 If you experience timeout issues with HBase snapshots when you use the {project-name} Bulk Loader or other statements, 
 you can set the value for these two HBase properties to 10 minutes (600,000 milliseconds).
-| hbase.hregion.max.filesize | 107374182400 bytes | HBase's default setting is 10737418240 (10 GB). We have increased the setting to 
-107374182400 (100 GB), which reduces the number of HStoreFiles per table and appears to reduce disruptions to active transactions from 
+| hbase.hregion.max.filesize | 107374182400 bytes | HBase's default setting is 10737418240 bytes (10 GB). You can increase the setting to 
+107374182400 bytes (100 GB), which reduces the number of HStoreFiles per table and appears to reduce disruptions to active transactions from 
 region splitting.
-| hbase.hstore.blockingStoreFiles | 10 | http://gbif.blogspot.com/2012/07/optimizing-writes-in-hbase.html
-| hbase.regionserver.handler.count | <num> | This setting should match the number of concurrent sessions (mxosrvr). The default is 10.
-|===
+| hbase.hregion.memstore.block.multiplier | 7
 
+When you have enough memory, you can increase this value to 7 so that more data can be temporarily accepted before flushing to disk instead of blocking writes.
+|This property blocks any further writes from clients to memstores if the memstores exceed the value of `multiplier * flush size`.
+
+Default value: 2
+| hbase.hregion.memstore.flush.size | 536870912 bytes | HBase uses memstore to buffer data before writing it to disk. Once the data in memstore has outgrown this size, it is flushed as an HFile to disk.
+
+Default value: 134217728 bytes (128M)
+| hbase.hstore.blockingStoreFiles | 200 | http://gbif.blogspot.com/2012/07/optimizing-writes-in-hbase.html
+
+This property blocks any further writes from memstores to HFile, after the number of existing HFile hits this limit, until compactions are completed.
+
+Default value: 7
+
+| hbase.regionserver.handler.count | <num> | This setting should match the number of concurrent sessions (mxosrvr). The default is 10.
+| hbase.regionserver.region.split.policy | 
+`org.apache.hadoop.hbase.regionserver.
+ConstantSizeRegionSplitPolicy`
+
+Once the regions reach the maximum size configured by `hbase.hregion.max.filesize` property, they are split into halves. 
+| Specify the split policy of HBase.
+
+Default value: `IncreasingToUpperBoundRegionSplitPolicy`
+|===
\ No newline at end of file
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/script_install.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/script_install.adoc
index 1bc5573..d5b42e5 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/script_install.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/script_install.adoc
@@ -43,7 +43,7 @@
 ```
 $ mkdir $HOME/trafodion-installer
 $ cd $HOME/trafodion-downloads
-$ tar -zxf apache-trafodion_pyinstaller-x.x.x-incubating.tar.gz -C $HOME/trafodion-installer
+$ tar -zxf apache-trafodion_pyinstaller-x.x.x.tar.gz -C $HOME/trafodion-installer
 $
 ```
 
@@ -74,7 +74,7 @@
 TASK: Environment Discover ***************************************************************
 
 Time Cost: 0 hour(s) 0 minute(s) 4 second(s)
-Enter full path to Trafodion tar file [/data/python-installer/apache-trafodion_server-x.x.x-RH-x86_64-incubating.tar.gz]:
+Enter full path to Trafodion tar file [/data/python-installer/apache-trafodion_server-x.x.x-RH-x86_64.tar.gz]:
 Enter directory name to install trafodion to [apache-trafodion-x.x.x]:
 Enter trafodion user password:
 Confirm Enter trafodion user password:
@@ -103,7 +103,7 @@
 | node_list        | node-1,node-2                                                                     |
 | scratch_locs     | $TRAF_VAR                                                                         |
 | traf_dirname     | apache-trafodion-x.x.x                                                            |
-| traf_package     | /data/python-installer/apache-trafodion_server-x.x.x-RH-x86_64-incubating.tar.gz  |
+| traf_package     | /data/python-installer/apache-trafodion_server-x.x.x-RH-x86_64.tar.gz             |
 | traf_start       | Y                                                                                 |
 | traf_user        | trafodion                                                                         |
 +------------------+-----------------------------------------------------------------------------------+
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/script_remove.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/script_remove.adoc
index 5ad55a7..b402327 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/script_remove.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/script_remove.adoc
@@ -74,7 +74,7 @@
 Shutdown in progress
 
 # of SQ processes: 0
-SQ Shutdown (normal) from /home/trafodion/apache-trafodion-1.3.0-incubating-bin/sql/scripts Successful
+SQ Shutdown (normal) from /home/trafodion/apache-trafodion-2.2.0-bin/sql/scripts Successful
 Mon Feb 15 07:49:26 UTC 2016
 [trafodion@trafodion-1 scripts]$ exit
 [admin@trafodion-1 ~]$
diff --git a/docs/provisioning_guide/src/asciidoc/_chapters/script_upgrade.adoc b/docs/provisioning_guide/src/asciidoc/_chapters/script_upgrade.adoc
index c6a05f6..4eb72a6 100644
--- a/docs/provisioning_guide/src/asciidoc/_chapters/script_upgrade.adoc
+++ b/docs/provisioning_guide/src/asciidoc/_chapters/script_upgrade.adoc
@@ -55,7 +55,7 @@
 ```
 $ mkdir $HOME/trafodion-installer
 $ cd $HOME/trafodion-downloads
-$ tar -zxf apache-trafodion-pyinstaller-2.1.0-incubating.tar.gz -C $HOME/trafodion-installer
+$ tar -zxf apache-trafodion-pyinstaller-2.2.0.tar.gz -C $HOME/trafodion-installer
 $
 ```
 
@@ -149,13 +149,13 @@
 $ cp my_config my_config_2.0
 $ # Pre edit content
 
-traf_package = "/home/centos/trafodion-download/apache-trafodion-2.0.0-incubating.tar.gz"
+traf_package = "/home/centos/trafodion-download/apache-trafodion-2.2.0.tar.gz"
 
 $ # Use your favorite editor to modify my_config_2.0
 $ emacs my_config_2.0
 $ # Post edit changes
 
-traf_package = "/home/centos/trafodion-download/apache-trafodion-2.1.0-incubating.tar.gz"
+traf_package = "/home/centos/trafodion-download/apache-trafodion-2.2.0.tar.gz"
 ```
 
 NOTE: The {project-name} Installer performs the same configuration changes as it does for an installation,
diff --git a/docs/shared/license.txt b/docs/shared/license.txt
index 5808b25..3dd96d1 100644
--- a/docs/shared/license.txt
+++ b/docs/shared/license.txt
@@ -9,8 +9,3 @@
 "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the License for the
 specific language governing permissions and limitations under the License.
 
-*Disclaimer:* _Apache Trafodion is an effort undergoing incubation at the Apache Software Foundation (ASF), sponsored by 
-the Apache Incubator PMC. Incubation is required of all newly accepted projects until a further review indicates that 
-the infrastructure, communications, and decision making process have stabilized in a manner consistent with other 
-successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, 
-it does indicate that the project has yet to be fully endorsed by the ASF._
diff --git a/docs/spj_guide/pom.xml b/docs/spj_guide/pom.xml
index 51f7177..46e4a15 100644
--- a/docs/spj_guide/pom.xml
+++ b/docs/spj_guide/pom.xml
@@ -30,7 +30,7 @@
   <packaging>pom</packaging>
   <name>Trafodion SPJ Guide</name>
   <description>This guide describes how to develop, deploy, and manage Stored Procedures in Java (SPJs) on the Trafodion database.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <parent>
@@ -61,9 +61,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -187,8 +187,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
                 <google-analytics-account>UA-72491210-1</google-analytics-account>
               </attributes>
@@ -216,8 +216,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>
                 <project-name>Trafodion</project-name>
                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>
-                <project-support>user@trafodion.incubator.apache.org</project-support>
-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>
+                <project-support>user@trafodion.apache.org</project-support>
+                <docs-url>http://trafodion.apache.org/docs</docs-url>
                 <build-date>${maven.build.timestamp}</build-date>
               </attributes>
             </configuration>
@@ -287,9 +287,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/docs/spj_guide/src/asciidoc/_chapters/deploy_spjs.adoc b/docs/spj_guide/src/asciidoc/_chapters/deploy_spjs.adoc
index 7f55713..23b84aa 100644
--- a/docs/spj_guide/src/asciidoc/_chapters/deploy_spjs.adoc
+++ b/docs/spj_guide/src/asciidoc/_chapters/deploy_spjs.adoc
@@ -66,7 +66,7 @@
 +
 ```
 $ # Set up the environmental variables.
-$ cd $HOME/trafodion-incubator
+$ cd $HOME/trafodion
 $ source env.sh
 $ # Prepare the class jar file
 $ cd $HOME/trafodion-spjs
diff --git a/docs/spj_guide/src/resources/source/Inventory.java b/docs/spj_guide/src/resources/source/Inventory.java
index 76c5359..1cfd397 100644
--- a/docs/spj_guide/src/resources/source/Inventory.java
+++ b/docs/spj_guide/src/resources/source/Inventory.java
@@ -7,7 +7,7 @@
    // supplier's name, street, city, state, and post code to separate output
    // parameters.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#supplierinfo-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#supplierinfo-procedure
    // for more documentation.
    public static void supplierInfo( BigDecimal suppNum
                                   , String[] suppName
@@ -46,7 +46,7 @@
    // quantities of available parts in inventory to separate output
    // parameters.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#supplyquantities-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#supplyquantities-procedure
    // for more documentation.
    public static void supplyQuantities( int[] avgQty
                                       , int[] minQty
@@ -79,7 +79,7 @@
    // set of location codes that have the exact quantity and a set of location
    // codes that have more than that quantity.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#partlocations-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#partlocations-procedure
    // for more documentation.
    public static void partLocations( int partNum
                                    , int quantity
diff --git a/docs/spj_guide/src/resources/source/Payroll.java b/docs/spj_guide/src/resources/source/Payroll.java
index 89c28a5..13b8d1c 100644
--- a/docs/spj_guide/src/resources/source/Payroll.java
+++ b/docs/spj_guide/src/resources/source/Payroll.java
@@ -8,7 +8,7 @@
    // percentage. This method also returns the updated salary to an output  
    // parameter.    
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#adjustsalary-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#adjustsalary-procedure
    // for more documentation.
    public static void adjustSalary( BigDecimal empNum
                                   , double percent
@@ -48,7 +48,7 @@
    // The EMPLOYEEJOB procedure accepts an employee number and returns a job
    // code or null value to an output parameter.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#employeejob-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#employeejob-procedure
    // for more documentation.
    public static void employeeJob( int empNum
                                  , java.lang.Integer[] jobCode
@@ -82,7 +82,7 @@
    // employee number, first name, last name, and location of the employees
    // assigned to that project.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#projectteam-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#projectteam-procedure
    // for more documentation.
    public static void projectTeam( int projectCode
                                  , ResultSet[] members
@@ -110,7 +110,7 @@
    // figures of the top five sales representatives who had the highest sales
    // (unit_price * qty_ordered) that quarter.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#topsalesreps-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#topsalesreps-procedure
    // for more documentation.
    public static void topSalesReps( int whichQuarter
                                   , ResultSet[] topReps
diff --git a/docs/spj_guide/src/resources/source/Sales.java b/docs/spj_guide/src/resources/source/Sales.java
index fd47df8..c611ad1 100644
--- a/docs/spj_guide/src/resources/source/Sales.java
+++ b/docs/spj_guide/src/resources/source/Sales.java
@@ -7,7 +7,7 @@
    // is, have less than 50 orders) and lowers the price of these items in the
    // database by 10 percent.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#lowerprice-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#lowerprice-procedure
    // for more documentation.
    public static void lowerPrice() throws SQLException
    {
@@ -50,7 +50,7 @@
    // The DAILYORDERS procedure accepts a date and returns the number of
    //orders on that date to an output parameter.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#dailyorders-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#dailyorders-procedure
    // for additional documenation.
    public static void numDailyOrders( Date date
                                     , int[] numOrders 
@@ -80,7 +80,7 @@
    // The MONTHLYORDERS procedure accepts an integer representing the month
    // and returns the number of orders during that month to an output parameter.
    // 
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#monthlyorders-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#monthlyorders-procedure
    // for more documentation.
    public static void numMonthlyOrders( int month
                                       , int[] numOrders
@@ -122,7 +122,7 @@
    // of an item, calculates the total price, including tax and shipping
    // charges, and returns the total price to an input/output parameter.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#totalprice-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#totalprice-procedure
    // for more documentation.
    public static void totalPrice( BigDecimal qtyOrdered
                                 , String shippingSpeed
@@ -175,7 +175,7 @@
    // * A result set that contains rows from the PARTSUPP table for suppliers who carry this part.
    // * A result set that contains rows from the EMPLOYEE table for sales reps who have sold this part.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#partdata-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#partdata-procedure
    // for more documentation.
    public static void partData( int partNum
                               , String[] partDescription
@@ -281,7 +281,7 @@
    //   contains fields for the order number, part number, unit price, quantity
    //   ordered, and part description.
    //
-   // See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#ordersummary-procedure
+   // See http://trafodion.apache.org/docs/spj_guide/index.html#ordersummary-procedure
    // for more documentation.
    public static void orderSummary( java.lang.String onOrAfter
                                   , long[] numOrders
diff --git a/docs/spj_guide/src/resources/source/adjustSalary.java b/docs/spj_guide/src/resources/source/adjustSalary.java
index cc2b095..46df21c 100644
--- a/docs/spj_guide/src/resources/source/adjustSalary.java
+++ b/docs/spj_guide/src/resources/source/adjustSalary.java
@@ -3,7 +3,7 @@
 // percentage. This method also returns the updated salary to an output  
 // parameter.    
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#adjustsalary-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#adjustsalary-procedure
 // for more documentation.
 public static void adjustSalary( BigDecimal empNum
 			       , double percent
diff --git a/docs/spj_guide/src/resources/source/employeeJob.java b/docs/spj_guide/src/resources/source/employeeJob.java
index ce5cdab..e9f083a 100644
--- a/docs/spj_guide/src/resources/source/employeeJob.java
+++ b/docs/spj_guide/src/resources/source/employeeJob.java
@@ -1,7 +1,7 @@
 // The EMPLOYEEJOB procedure accepts an employee number and returns a job
 // code or null value to an output parameter.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#employeejob-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#employeejob-procedure
 // for more documentation.
 public static void employeeJob( int empNum
 			      , java.lang.Integer[] jobCode
diff --git a/docs/spj_guide/src/resources/source/lowerPrice.java b/docs/spj_guide/src/resources/source/lowerPrice.java
index b49ff2f..db00775 100644
--- a/docs/spj_guide/src/resources/source/lowerPrice.java
+++ b/docs/spj_guide/src/resources/source/lowerPrice.java
@@ -2,7 +2,7 @@
 // is, have less than 50 orders) and lowers the price of these items in the
 // database by 10 percent.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#lowerprice-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#lowerprice-procedure
 // for more documentation.
 public static void lowerPrice() throws SQLException
 {
diff --git a/docs/spj_guide/src/resources/source/numDailyOrders.java b/docs/spj_guide/src/resources/source/numDailyOrders.java
index 568d71f..642ab85 100644
--- a/docs/spj_guide/src/resources/source/numDailyOrders.java
+++ b/docs/spj_guide/src/resources/source/numDailyOrders.java
@@ -1,7 +1,7 @@
 // The DAILYORDERS procedure accepts a date and returns the number of
 //orders on that date to an output parameter.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#dailyorders-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#dailyorders-procedure
 // for additional documenation.
 public static void numDailyOrders( Date date
 				 , int[] numOrders 
diff --git a/docs/spj_guide/src/resources/source/numMonthlyOrders.java b/docs/spj_guide/src/resources/source/numMonthlyOrders.java
index 942ea01..7654d12 100644
--- a/docs/spj_guide/src/resources/source/numMonthlyOrders.java
+++ b/docs/spj_guide/src/resources/source/numMonthlyOrders.java
@@ -1,7 +1,7 @@
 // The MONTHLYORDERS procedure accepts an integer representing the month
 // and returns the number of orders during that month to an output parameter.
 // 
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#monthlyorders-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#monthlyorders-procedure
 // for more documentation.
 public static void numMonthlyOrders( int month
 				   , int[] numOrders
diff --git a/docs/spj_guide/src/resources/source/orderSummary.java b/docs/spj_guide/src/resources/source/orderSummary.java
index 8b8e5bb..50cdf74 100644
--- a/docs/spj_guide/src/resources/source/orderSummary.java
+++ b/docs/spj_guide/src/resources/source/orderSummary.java
@@ -11,7 +11,7 @@
 //   contains fields for the order number, part number, unit price, quantity
 //   ordered, and part description.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#ordersummary-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#ordersummary-procedure
 // for more documentation.
 public static void orderSummary( java.lang.String onOrAfter
 			       , long[] numOrders
diff --git a/docs/spj_guide/src/resources/source/partData.java b/docs/spj_guide/src/resources/source/partData.java
index 6d7b54b..db0f8dc 100644
--- a/docs/spj_guide/src/resources/source/partData.java
+++ b/docs/spj_guide/src/resources/source/partData.java
@@ -8,7 +8,7 @@
 // * A result set that contains rows from the PARTSUPP table for suppliers who carry this part.
 // * A result set that contains rows from the EMPLOYEE table for sales reps who have sold this part.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#partdata-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#partdata-procedure
 // for more documentation.
 public static void partData( int partNum
 			   , String[] partDescription
diff --git a/docs/spj_guide/src/resources/source/partLocations.java b/docs/spj_guide/src/resources/source/partLocations.java
index 88cdaae..873b40d 100644
--- a/docs/spj_guide/src/resources/source/partLocations.java
+++ b/docs/spj_guide/src/resources/source/partLocations.java
@@ -2,7 +2,7 @@
 // set of location codes that have the exact quantity and a set of location
 // codes that have more than that quantity.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#partlocations-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#partlocations-procedure
 // for more documentation.
 public static void partLocations( int partNum
 				, int quantity
diff --git a/docs/spj_guide/src/resources/source/projectTeam.java b/docs/spj_guide/src/resources/source/projectTeam.java
index 0d150c4..024c348 100644
--- a/docs/spj_guide/src/resources/source/projectTeam.java
+++ b/docs/spj_guide/src/resources/source/projectTeam.java
@@ -2,7 +2,7 @@
 // employee number, first name, last name, and location of the employees
 // assigned to that project.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#projectteam-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#projectteam-procedure
 // for more documentation.
 public static void projectTeam( int projectCode
 			      , ResultSet[] members
diff --git a/docs/spj_guide/src/resources/source/supplierInfo.java b/docs/spj_guide/src/resources/source/supplierInfo.java
index c98a392..01a1dd8 100644
--- a/docs/spj_guide/src/resources/source/supplierInfo.java
+++ b/docs/spj_guide/src/resources/source/supplierInfo.java
@@ -2,7 +2,7 @@
 // supplier's name, street, city, state, and post code to separate output
 // parameters.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#supplierinfo-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#supplierinfo-procedure
 // for more documentation.
 public static void supplierInfo( BigDecimal suppNum
 			       , String[] suppName
diff --git a/docs/spj_guide/src/resources/source/supplyQuantities.java b/docs/spj_guide/src/resources/source/supplyQuantities.java
index 59a6911..c29d593 100644
--- a/docs/spj_guide/src/resources/source/supplyQuantities.java
+++ b/docs/spj_guide/src/resources/source/supplyQuantities.java
@@ -2,7 +2,7 @@
 // quantities of available parts in inventory to separate output
 // parameters.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#supplyquantities-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#supplyquantities-procedure
 // for more documentation.
 public static void supplyQuantities( int[] avgQty
 				   , int[] minQty
diff --git a/docs/spj_guide/src/resources/source/topSalesReps.java b/docs/spj_guide/src/resources/source/topSalesReps.java
index beee8ca..18eb455 100644
--- a/docs/spj_guide/src/resources/source/topSalesReps.java
+++ b/docs/spj_guide/src/resources/source/topSalesReps.java
@@ -4,7 +4,7 @@
 // figures of the top five sales representatives who had the highest sales
 // (unit_price * qty_ordered) that quarter.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#topsalesreps-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#topsalesreps-procedure
 // for more documentation.
 public static void topSalesReps( int whichQuarter
 			       , ResultSet[] topReps
diff --git a/docs/spj_guide/src/resources/source/totalPrice.java b/docs/spj_guide/src/resources/source/totalPrice.java
index 5e1ff15..4eafbb4 100644
--- a/docs/spj_guide/src/resources/source/totalPrice.java
+++ b/docs/spj_guide/src/resources/source/totalPrice.java
@@ -2,7 +2,7 @@
 // of an item, calculates the total price, including tax and shipping
 // charges, and returns the total price to an input/output parameter.
 //
-// See http://trafodion.incubator.apache.org/docs/spj_guide/index.html#totalprice-procedure
+// See http://trafodion.apache.org/docs/spj_guide/index.html#totalprice-procedure
 // for more documentation.
 public static void totalPrice( BigDecimal qtyOrdered
 			     , String shippingSpeed
diff --git a/docs/sql_reference/pom.xml b/docs/sql_reference/pom.xml
index c5ebf3c..0d6f3c8 100644
--- a/docs/sql_reference/pom.xml
+++ b/docs/sql_reference/pom.xml
@@ -33,7 +33,7 @@
                functions, and other SQL language elements supported by the Trafodion project’s 

                database software.

   </description>

-  <url>http://trafodion.incubator.apache.org</url>

+  <url>http://trafodion.apache.org</url>

   <inceptionYear>2015</inceptionYear>

   <parent>

     <groupId>org.apache.trafodion</groupId>

@@ -63,9 +63,9 @@
   </issueManagement>

 

   <scm>

-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>

-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>

-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>

+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>

+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>

+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>

     <tag>HEAD</tag>

   </scm>

 

@@ -190,8 +190,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

                 <build-date>${maven.build.timestamp}</build-date>

                 <google-analytics-account>UA-72491210-1</google-analytics-account>

               </attributes>

@@ -212,8 +212,8 @@
                 <project-version>${env.TRAFODION_VER}</project-version>

                 <project-name>Trafodion</project-name>

                 <project-logo>${basedir}/../shared/trafodion-logo.jpg</project-logo>

-                <project-support>user@trafodion.incubator.apache.org</project-support>

-                <docs-url>http://trafodion.incubator.apache.org/docs</docs-url>

+                <project-support>user@trafodion.apache.org</project-support>

+                <docs-url>http://trafodion.apache.org/docs</docs-url>

                 <build-date>${maven.build.timestamp}</build-date>

                 <pdf-stylesdir>${basedir}/../shared</pdf-stylesdir>

                 <pdf-style>trafodion</pdf-style>

@@ -290,9 +290,9 @@
 

   <distributionManagement>

     <site>

-      <id>trafodion.incubator.apache.org</id>

-      <name>Trafodion Website at incubator.apache.org</name>

-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see

+      <id>trafodion.apache.org</id>

+      <name>Trafodion Website at apache.org</name>

+      <!-- On why this is the tmp dir and not trafodion.apache.org, see

       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866

       -->

       <url>file:///tmp</url>

diff --git a/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc b/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
index 91ff943..4ec556d 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
@@ -260,7 +260,7 @@
 

 * `_inline-window-specification_`

 +

-specifies_the_window_over_which_the_avg_is_computed. The

+specifies the window over which the AVG is computed. The

 _inline-window-specification_ can contain an optional partition by

 clause, an optional ORDER BY clause and an optional window frame clause.

 The PARTITION BY clause specifies how the intermediate result is

@@ -483,7 +483,7 @@
 [[first_value_window_function]]

 == FIRST_VALUE Window Function

 

-The FIRST_VALUE window function returns the first value from a sorted partition.
+The FIRST_VALUE window function returns the first value from a sorted partition.

 

 `FIRST_VALUE (expression)`

 

diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
index d111292..0d5c3c7 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
@@ -213,6 +213,24 @@
 
 See the individual entry for the function.
 
+[[lob_functions]]
+== LOB Functions
+
+Trafodion provides following LOB functions to process LOB data.
+
+[cols="25%,75%"]
+|===
+| <<emptyblob_function,EMPTY_BLOB() Function>>   | Creates a dummy LOB handle of type BLOB.
+| <<emptyclob_function,EMPTY_CLOB() Function>>   | Creates a dummy LOB handle of type CLOB.
+| <<stringtolob_function,STRINGTOLOB Function>>  | Converts a simple string literal into LOB format.
+| <<filetolob_function,FILETOLOB Function>>      | Converts data from a local linux/hdfs file into LOB format.
+| <<buffertolob_function,BUFFERTOLOB_Function>>     | Takes the address and size of an input buffer, and converts the contents of that buffer into LOB format.
+| <<externaltolob_function,EXTERNALTOLOB_Function>> | Converts data from an external file into LOB format.
+| LOBTOSTRING                     | Converts LOB data into a simple string literal.
+| LOBTOFILE                       | Converts LOB data into a linux/hdfs file.
+| LOBTOBUFFER                     | Converts LOB data into a buffer.
+|===
+
 [[mathematical_functions]]
 == Mathematical Functions
 
@@ -927,6 +945,45 @@
 ```
 
 <<<
+[[buffertolob_function]]
+== BUFFERTOLOB Function 
+
+[[buffertolob_function_syntax]]
+=== Syntax Descriptions of BUFFERTOLOB Function
+
+The BUFFERTOLOB function takes the address and size of an input buffer, and converts the contents of that buffer into LOB. This function can be used in an INSERT or UPDATE statement.
+
+BUFFERTOLOB function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion SQL Large Objects Guide].
+
+```
+BUFFERTOLOB(LOCATION lob source buffer address, LENGTH lob length value)   
+```
+
+* _lob source buffer address_
++
+The int64 value of the user buffer address.
+
+* _lob length value_
++
+The int64 value of the user specified lob buffer.
+                 
+[[buffertolob_function_examples]]
+=== Examples of BUFFERTOLOB Function
+* This example takes an int64 value as an input which is an address to a buffer and a size parameter. The buffer contents are converted to LOB format and stored in HDFS.
++
+```
+insert into tlob1 values (1, buffertolob(LOCATION 124647474, SIZE 2048));
+```
+
+* In the table tlob1, this example updates (overwrites) the c2 to the buffer location at 1254674 with 4000-byte length.
++
+```
+update tlob1 set c2=buffertolob(LOCATION 12546474, SIZE 4000);
+```
+
+<<<
 [[case_expression]]
 == CASE (Conditional) Expression
 
@@ -1435,7 +1492,7 @@
 
 <<<
 [[concat_function]]
-=== CONCAT Function
+== CONCAT Function
 
 The CONCAT function returns the concatenation of two character value
 expressions as a character string value. You can also use the
@@ -1477,7 +1534,7 @@
 === Considerations for CONCAT
 
 [[operands]]
-=== Operands
+==== Operands
 
 
 A string value can be specified by any character value expression, such
@@ -1490,7 +1547,7 @@
 
 
 [[sql-parameters]]
-=== SQL Parameters
+==== SQL Parameters
 
 You can concatenate an SQL parameter and a character value expression.
 The concatenated parameter takes on the data type attributes of the
@@ -3294,7 +3351,6 @@
 preceding examples. For example, in the second row of the output of this
 example, 0.0007319 is equal to 21959 divided by 30002620.
 
-
 <<<
 [[diff2_function]]
 == DIFF2 Function
@@ -3406,6 +3462,92 @@
 ```
 
 <<<
+[[emptyblob_function]]
+== EMPTY_BLOB() Function
+
+[[syntax_descriptions_of_emptyblob_function]]
+=== Syntax Descriptions of EMPTY_BLOB() Function
+
+The EMPTY_BLOB() function creates a dummy LOB handle of type BLOB.
+
+This function can be used in an INSERT or UPDATE statement, to initialize a LOB column or attribute to EMPTY.
+
+EMPTY_BLOB() function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion SQL Large Objects Guide].
+
+```
+EMPTY_BLOB()
+```
+
+[[considerations_for_emptyblob_function]]
+=== Considerations for EMPTY_BLOB() Function
+
+* If EMPTY_BLOB() is specified, then a dummy lob handle is created.
+
+** No data is associated with the empty LOBs yet, but these dummy LOB handles can later be used to populate the LOB with new data. If the LOB had data previously associated with it, it will be erased.
+
+** The dummy LOB handle will get the same datatype as the underlying column.
++
+For example,
++
+*** If the LOB column is defined as `'EXTERNAL'` during table creation, then the dummy LOB handle will get the type `'EXTERNAL'` and can only process external LOB data.
++
+*** If the LOB column is not defined as `'EXTERNAL'` during table creation, then the dummy LOB handle will be considered as a regular LOB dummy handle. If this handle is used to perform an insert or update operation, the LOB data will be maintained within the Trafodion space in the internal location maintained by Trafodion.
+
+* An empty LOB is distinct from a LOB containing a string of length zero or a null LOB.
+
+[[examples_of_emptyblob_function]]
+=== Examples of EMPTY_BLOB() Function
+
+* This example uses the EMPTY_BLOB to insert an empty LOB and creates a dummy LOB handle. 
++
+```
+insert into test1 values (1, empty_blob());
+```
+
+<<<
+[[emptyclob_function]]
+== EMPTY_CLOB() Function
+
+[[syntax_descriptions_of_emptyclob_function]]
+=== Syntax Descriptions of EMPTY_CLOB() Function
+
+The EMPTY_CLOB() function creates a dummy LOB handle of type CLOB.
+
+This function can be used in an INSERT or UPDATE statement, to initialize a LOB column or attribute to EMPTY.
+
+EMPTY_CLOB() function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion LOB Guide].
+
+```
+EMPTY_CLOB()
+```
+
+[[considerations_for_emptyclob_function]]
+=== Considerations for EMPTY_CLOB() Function
+
+* If EMPTY_CLOB() is specified, then a dummy lob handle is created.
+
+** No data is associated with the empty LOBs yet, but these dummy LOB handles can later be used to populate the LOB with new data. If the LOB had data previously associated with it, it will be erased.
+
+** The dummy LOB handle will get the same datatype as the underlying column.
++
+For example, if the LOB column was defined as `‘EXTERNAL’` during table creation, then the LOB column gets that type. If it’s not defined, then it is considered as a regular LOB. 
+
+* An empty LOB is distinct from a LOB containing a string of length zero or a null LOB.
+
+[[examples_of_emptyclob_function]]
+=== Examples of EMPTY_CLOB() Function
+
+* This example uses the EMPTY_CLOB to insert an empty LOB and creates a dummy LOB handle. 
++
+```
+insert into test2 values (1, empty_clob());
+```
+
+<<<
 [[exp_function]]
 == EXP Function
 
@@ -3639,6 +3781,52 @@
 then selects the desired columns.
 
 <<<
+[[externaltolob_function]]
+== EXTERNALTOLOB Function
+
+[[externaltolob_function_syntax]]
+=== Syntax Descriptions of EXTERNALTOLOB Function
+
+The EXTERNALTOLOB function converts data from an external file into LOB format. This function can be used in an INSERT or UPDATE statement.          
+
+EXTERNALTOLOB function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion SQL Large Objects Guide].
+
+```
+EXTERNALTOLOB('external lob source file name')              
+```
+
+```
+external lob source file name is: 
+hdfs:///{local hdfs file name}
+```
+
+[[externaltolob_function_considerations]]
+=== Considerations for EXTERNALTOLOB Function
+
+* When an external LOB is specified via `EXTERNALTOLOB('external lob source file name')`, the data associated with the external HDFS file is not transferred into the Trafodion LOB. Instead, Trafodion stores the file path/handle of the external file. 
++
+For example, if you have a directory of pictures, you can specify the full hdfs path to each picture file to this function and the path will get stored in the Trafodion table. Later during retrieval, the file name will be used to go to the actual file to retrieve the data. 
+
+[[externaltolob_function_examples]]
+=== Examples of EXTERNALTOLOB Function
+
+* This example uses different functions to convert strings, files, external lob into LOB data. The EXTERNALTOLOB function takes an external file. 
++
+```
+insert into tlob130ext values(1, stringtolob('first lob'),                                           
+filetolob('hdfs:///lobs/lob_input_a1.txt'), 
+externaltolob('hdfs:///lobs/lob_input_a1.txt'));
+```
+
+* In the table tlob130ext where c1 is 2, this example updates (overwrites) the c4 to lob_input_d1.txt stored in hdfs:///lobs/.
++
+```
+update tlob130ext set c4=externaltolob('hdfs:///lobs/lob_input_d1.txt') where c1=2;
+```
+
+<<<
 [[extract_function]]
 == EXTRACT Function
 
@@ -3679,40 +3867,43 @@
 +
 The result is 1.
 
-
 <<<
-[[hour_function]]
-=== HOUR Function
+[[filetolob_function]]
+== FILETOLOB Function
 
-The HOUR function converts a TIME or TIMESTAMP expression into an
-INTEGER value in the range 0 through 23 that represents the
-corresponding hour of the day.
+[[filetolob_function_syntax]]
+=== Syntax Descriptions of FILETOLOB Function
 
-HOUR is a {project-name} SQL extension.
+The FILETOLOB function converts data from a local linux/hdfs file into LOB format. This function can be used in an INSERT or UPDATE statement.          
+
+FILETOLOB function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion SQL Large Objects Guide].
 
 ```
-HOUR (datetime-expression)
+FILETOLOB('lob source file name')
+```     
+
+lob source file name is:
 ```
-* `_datetime-expression_`
-+
-is an expression that evaluates to a datetime value of type TIME or
-TIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.
+hdfs:///{local hdfs file name} |  
+{local linux file name}        |
+{file:///linux file name} 
+```
 
-[[examples_of_hour]]
-=== Examples of HOUR
+[[filetolob_function_examples]]
+=== Examples of FILETOLOB Function
 
-* Return an integer that represents the hour of the day from the
-ship timestamp column in the project table:
+* This example converts data from a local file into LOB format, and stores all data into HDFS associated with that column/row.
 +
 ```
-SELECT start_date, ship_timestamp, HOUR(ship_timestamp)
-FROM persnl.project
-WHERE projcode = 1000;
+insert into tlob130txt1 values(1,filetolob('lob_input_a1.txt'));
+```
 
-
-Start/Date Time/Shipped               (EXPR)
----------- -------------------------- ------
-2007-04-10 2007-04-21 08:15:00.000000      8
+* In the table tlob1 where c1 is 3, this example updates (appends) the c2 to lob_update.txt stored in hdfs:///lobs/.
++
+```
+update tlob1 set c2=filetolob('hdfs:///lobs/lob_update.txt', append) where c1 = 3;
 ```
 
 <<<
@@ -3757,6 +3948,41 @@
 ```
 
 <<<
+[[hour_function]]
+== HOUR Function
+
+The HOUR function converts a TIME or TIMESTAMP expression into an
+INTEGER value in the range 0 through 23 that represents the
+corresponding hour of the day.
+
+HOUR is a {project-name} SQL extension.
+
+```
+HOUR (datetime-expression)
+```
+* `_datetime-expression_`
++
+is an expression that evaluates to a datetime value of type TIME or
+TIMESTAMP. See <<datetime_value_expressions,Datetime Value Expressions>>.
+
+[[examples_of_hour]]
+=== Examples of HOUR
+
+* Return an integer that represents the hour of the day from the
+ship timestamp column in the project table:
++
+```
+SELECT start_date, ship_timestamp, HOUR(ship_timestamp)
+FROM persnl.project
+WHERE projcode = 1000;
+
+
+Start/Date Time/Shipped               (EXPR)
+---------- -------------------------- ------
+2007-04-10 2007-04-21 08:15:00.000000      8
+```
+
+<<<
 [[insert_function]]
 == INSERT Function
 
@@ -6112,6 +6338,302 @@
 ```
 
 <<<
+[[rollup_function]]
+== ROLLUP Function
+
+The ROLLUP function calculates multiple levels of subtotals aggregating from right to left through the comma-separated list of columns, and provides a grand total. 
+
+ROLLUP is an extension to the 'GROUP BY' clause. Related features such as the GROUPING function can be used with 'ORDER BY' to control the placement of summary results.
+
+```
+SELECT…GROUP BY ROLLUP (column 1, [column 2,]…[column n])
+```
+
+ROLLUP generates n+1 levels of subtotals, including a grand total, where n is the number of the selected column(s).
+
+For example, a query that contains three rollup columns returns the following rows:
+
+* First-level: the usual aggregate values as calculated by GROUP BY clause without using ROLLUP.
+* Second-level: subtotals aggregating across column 3 for each combination of column 1 and column 2.
+* Third-level: subtotals aggregating across column 2 and column 3 for each column 1.
+* Fourth-level: the grand total row.
+
+NOTE: Trafodion does not support CUBE function which works slightly differently from ROLLUP.
+
+[[considerations_for_rollup]]
+=== Considerations for ROLLUP
+
+[[null_in_result_sets]]
+==== NULL in Result Sets
+
+* In super-aggregate rows representing subtotals or the grand total, lower level grouping columns are replaced by NULLs.
+* The NULLs in selected columns are considered equal and sorted into one NULL group in result sets.
+
+[[using_rollup_with_the_column_order_reversed]]
+==== Using ROLLUP with the Column Order Reversed
+
+ROLLUP removes the right-most column at each step, therefore the result sets vary with the column order specified in the comma-separated list. 
+
+[cols="50%,50%"]
+|===
+| If the column order is _country_, _state_, _city_ and _name_, ROLLUP returns following groupings. 
+| If the column order is _name_, _city_, _state_ and _country_, ROLLUP returns following groupings.
+| _country_, _state_, _city_ and _name_      | _name_, _city_, _state_ and _country_
+| _country_, _state_ and _city_              | _name_, _city_ and _state_
+| _country_ and _state_                      | _name_ and _city_
+| _country_                                  | _name_
+| grand total                                | grand total
+|===
+
+[[examples_of_rollup]]
+=== Examples of ROLLUP
+
+[[examples_of_grouping_by_one_or_multiple_rollup_columns]]
+==== Examples of Grouping By One or Multiple Rollup Columns
+
+Suppose that we have a _sales1_ table like this:
+
+```
+SELECT * FROM sales1;
+
+DELIVERY_YEAR REGION PRODUCT                          REVENUE    
+------------- ------ -------------------------------- -----------
+         2016 A      Dress                                    100
+         2016 A      Dress                                    200
+         2016 A      Pullover                                 300
+         2016 B      Dress                                    400
+         2017 A      Pullover                                 500
+         2017 B      Dress                                    600
+         2017 B      Pullover                                 700
+         2017 B      Pullover                                 800
+
+--- 8 row(s) selected.
+```
+
+* This is an example of grouping by one rollup column.
++
+```
+SELECT delivery_year, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year);
+```
+
++
+```
+DELIVERY_YEAR TOTAL_REVENUE       
+------------- --------------------
+         2016                 1000
+         2017                 2600
+         NULL                 3600
+
+--- 3 row(s) selected.
+```
+
+* This is an example of grouping by two rollup columns.
++ 
+ROLLUP firstly aggregates at the lowest level (_region_) and then rolls up those aggregations to the next
+level (_delivery_year_), finally it produces a grand total across these two levels.
+
++
+```
+SELECT delivery_year, region, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region);
+```
+
++
+```
+DELIVERY_YEAR REGION TOTAL_REVENUE       
+------------- ------ --------------------
+         2016 A                       600
+         2016 B                       400
+         2016 NULL                   1000
+         2017 A                       500
+         2017 B                      2100
+         2017 NULL                   2600
+         NULL NULL                   3600
+
+--- 7 row(s) selected.
+```
++
+
+* This is an example of grouping by three rollup columns.
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+.Grouping By Three Rollup Columns
+image::grouping-by-three-rollup-columns.jpg[700,700]
+
++
+** First-level: the rows marked in *blue* are the total revenue for each year (_2016_ and _2017_), each region (_A_ and _B_) and each product (_Dress_ and _Pullover_), they are caculated by GROUP BY instead of ROLLUP.
+
++
+** Second-level: the rows marked in *red* provide the total revenue for the given _delivery_year_ and _region_ by _product_.
++ 
+These rows have the _product_ columns set to NULL.
+
++
+** Third-level: the rows marked in *yellow* show the total revenue in each year (_2016_ and _2017_).
++ 
+These rows have the _region_ and _product_ columns set to NULL.
+
++
+** Fourth-level: the row marked in *purple* aggregates over all rows in the _delivery_year_, _region_ and _product_ columns.
++ 
+This row has the _delivery_year_, _region_ and _product_ columns set to NULL. 
+ 
+[[examples_of_null]]
+=== Examples of NULL
+
+The example below demonstrates how ROLLUP treats NULLs in the selected columns and generates NULLs for super-aggregate rows.
+
+Suppose that we have a _sales2_ table like this:
+
+```
+SELECT * FROM sales2;
+
+DELIVERY_YEAR REGION PRODUCT                          REVENUE    
+------------- ------ -------------------------------- -----------
+         NULL A      Dress                                    100
+         NULL A      Dress                                    200
+         2016 A      Pullover                                 300
+         2016 B      Dress                                    400
+         2017 A      Pullover                                 500
+         2017 B      Dress                                    600
+         NULL B      Pullover                                 700
+         NULL B      Pullover                                 800
+
+--- 8 row(s) selected.
+```
+
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue 
+FROM sales2
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
+```
+DELIVERY_YEAR REGION PRODUCT                          TOTAL_REVENUE       
+------------- ------ -------------------------------- --------------------
+         2016 A      Pullover                                          300
+         2016 A      NULL                                              300
+         2016 B      Dress                                             400
+         2016 B      NULL                                              400
+         2016 NULL   NULL                                              700
+         2017 A      Pullover                                          500
+         2017 A      NULL                                              500
+         2017 B      Dress                                             600
+         2017 B      NULL                                              600
+         2017 NULL   NULL                                             1100
+         NULL A      Dress                                             300
+         NULL A      NULL                                              300
+         NULL B      Pullover                                         1500
+         NULL B      NULL                                             1500
+         NULL NULL   NULL                                             1800
+         NULL NULL   NULL                                             3600
+
+--- 16 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_the_column_order_reversed]]
+==== Examples of Using ROLLUP with the Column Order Reversed
+
+Suppose that we have the same _sales1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+* The column order of the example below is _delivery_year_, _region_ and _product_.
+
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+```
+DELIVERY_YEAR REGION PRODUCT                          TOTAL_REVENUE       
+------------- ------ -------------------------------- --------------------
+         2016 A      Dress                                             300
+         2016 A      Pullover                                          300
+         2016 A      NULL                                              600
+         2016 B      Dress                                             400
+         2016 B      NULL                                              400
+         2016 NULL   NULL                                             1000
+         2017 A      Pullover                                          500
+         2017 A      NULL                                              500
+         2017 B      Dress                                             600
+         2017 B      Pullover                                         1500
+         2017 B      NULL                                             2100
+         2017 NULL   NULL                                             2600
+         NULL NULL   NULL                                             3600
+
+--- 13 row(s) selected.
+```
+
+* The column order of the example below is _product_, _region_ and _delivery_year_, the output is different than the result sets above. 
+
++
+```
+SELECT product, region, delivery_year, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (product, region, delivery_year);
+```
+
++
+```
+PRODUCT                          REGION DELIVERY_YEAR TOTAL_REVENUE       
+-------------------------------- ------ ------------- --------------------
+Dress                            A               2016                  300
+Dress                            A               NULL                  300
+Dress                            B               2016                  400
+Dress                            B               2017                  600
+Dress                            B               NULL                 1000
+Dress                            NULL            NULL                 1300
+Pullover                         A               2016                  300
+Pullover                         A               2017                  500
+Pullover                         A               NULL                  800
+Pullover                         B               2017                 1500
+Pullover                         B               NULL                 1500
+Pullover                         NULL            NULL                 2300
+NULL                             NULL            NULL                 3600
+
+--- 13 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_order_by]]
+==== Examples of Using ROLLUP with ORDER BY
+
+Suppose that we have the same _sale1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+This example uses ROLLUP with the ORDER BY clause to sort the results.
+
+```
+SELECT delivery_year, product, SUM (revenue) AS total_revenue 
+FROM sales1 
+GROUP BY ROLLUP (delivery_year, product)
+ORDER BY total_revenue;
+```
+
+```
+DELIVERY_YEAR PRODUCT                          TOTAL_REVENUE       
+------------- -------------------------------- --------------------
+         2016 Pullover                                          300
+         2017 Dress                                             600
+         2016 Dress                                             700
+         2016 NULL                                             1000
+         2017 Pullover                                         2000
+         2017 NULL                                             2600
+         NULL NULL                                             3600
+
+--- 7 row(s) selected.
+```
+
+<<<
 [[round_function]]
 == ROUND Function
 
@@ -7183,6 +7705,40 @@
 ```
 
 <<<
+[[stringtolob_function]]
+== STRINGTOLOB Function
+
+[[stringtolob_function_syntax]]
+=== Syntax Descriptions of STRINGTOLOB Function
+
+The STRINGTOLOB function converts a simple string literal into LOB format. This function can be used in an INSERT or UPDATE statement.
+
+STRINGTOLOB function is a Trafodion SQL extension.
+
+For more information, see http://trafodion.apache.org/docs/lob_guide/index.html[Trafodion SQL Large Objects Guide].
+
+```
+STRINGTOLOB('string literal expression')  
+```
+
+* STRINGTOLOB   
++
+Converts a simple string literal into LOB format.
+
+** string literal expression
++
+is a series of characters enclosed in single quotes.
+
+[[stringtolob_function_examples]]
+=== Examples of STRINGTOLOB Function
+
+* This example converts a simple string literal into LOB format before inserting.
++
+```
+insert into tlob1 values(1,stringtolob('inserted row'));
+```
+
+<<<
 [[substring_function]]
 == SUBSTRING/SUBSTR Function
 
diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_language_elements.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_language_elements.adoc
index 36ad0e5..2704107 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_language_elements.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_language_elements.adoc
@@ -312,6 +312,8 @@
 | <<datetime_data_types,Datetime Data Types>>                 | DATE, TIME, and TIMESTAMP data types.

 | <<interval_data_types,Interval Data Types>>                 | Year-month intervals (years and months) and day-time intervals (days,

 hours, minutes, seconds, and fractions of a second).

+| <<LOB Data Types,LOB Data Types>>                           | A set of large object data types used to store large volumes of data, 

+provides random and piece-wise access to the data, such as BLOB and CLOB.

 | <<numeric_data_types_,Numeric Data Types >>                 | Exact and approximate numeric data types.

 |===

 

@@ -367,6 +369,9 @@
 FRACTION(n) 0-999999 +

 in which n is the number of significant digits (default is 6; minimum is 1; maximum is 6); +

 stored in 2, 4, or 8 bytes depending on number of digits^2^

+.2+| LOB Data Types .2+| LOB | BLOB | Binary Large Object .2+| 10G – no limit + 

+Currently the limit defaults to 10G. This limit can be configured as needed using the CQD `LOB_MAX_SIZE`.

+| CLOB | Character Large Object 

 .10+| Numeric Data Types .5+| Numeric | NUMERIC (1,_scale_) to + NUMERIC (128,_scale_) | Binary number with optional scale; signed or unsigned for 1 to 9 digits

 | 1 to 128 digits; +

 stored: +

diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
index 7ffd7b9..33afd5e 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
@@ -416,6 +416,8 @@
    | time [(time-precision)]

    | timestamp [(timestamp-precision)]

    | interval { start-field to end-field | single-field }

+   | blob [({numeric literal} [unit])] [STORAGE 'storage literal']

+   | clob [({numeric literal} [unit])] [STORAGE 'storage literal']

 

 default is:

      literal

@@ -1501,46 +1503,46 @@
 +

 a list of HBase options to set for the index. These options are applied independently of any HBase options set for

 the index’s table.

-

-// TODO: The Word document did not list all default values. 

+ 

 ** `_hbase-option_ = '_value_'`

 +

-is one of the these HBase options and its assigned value:

+is one of these HBase options and its assigned value:

 +

-[cols="35%,65%",options="header"]

+[options="header"]

 |===

-| HBase Option           | Accepted Values^1^

-| BLOCKCACHE             | 'true' \| 'false'

-| BLOCKSIZE              | *'65536'( \| '_positive-integer_'

-| BLOOMFILTER            | 'NONE' \| 'ROW' \| 'ROWCOL'

-| CACHE_BLOOMS_ON_WRITE  | 'true' \| 'false'

-| CACHE_DATA_ON_WRITE    | 'true' \| 'false'

-| CACHE_INDEXES_ON_WRITE | 'true' \| 'false'

-| COMPACT                | 'true' \| 'false'

-| COMPACT_COMPRESSION    | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'

-| COMPRESSION            | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'

-| DATA_BLOCK_ENCODING    | 'DIFF' \| 'FAST_DIFF' \| 'NONE' \| 'PREFIX'

-| DURABILITY             | 'USE_DEFAULT' \| 'SKIP_WAL' \| 'ASYNC_WAL' \| 'SYNC_WAL' \| 'FSYNC_WAL'

-| EVICT_BLOCKS_ON_CLOSE  | *'true'* \| 'false'

-| IN_MEMORY              | *'true'* \| 'false'

-| KEEP_DELETED_CELLS     | *'true'* \| 'false'

-| MAX_FILESIZE           | '_positive-integer_'

-| MAX_VERSIONS           | '1' \| '_positive-integer_'

-| MEMSTORE_FLUSH_SIZE    | '_positive-integer_'

-| MIN_VERSIONS           | '0' \| '_positive-integer_'

+| HBase Option           | Accepted Values                                                            | Default Values

+| BLOCKCACHE             | 'true' \| 'false'                                                          | 'true'

+| BLOCKSIZE              | '1024' - '64*1024*1024'                                                    | '64*1024'

+| BLOOMFILTER            | 'NONE' \| 'ROW' \| 'ROWCOL'                                                | 'ROW'

+| CACHE_BLOOMS_ON_WRITE  | 'true' \| 'false'                                                          | 'false'

+| CACHE_DATA_ON_WRITE    | 'true' \| 'false'                                                          | 'false'

+| CACHE_INDEXES_ON_WRITE | 'true' \| 'false'                                                          | 'false'

+| COMPACT                | 'true' \| 'false'                                                          | 'true'

+| COMPACT_COMPRESSION    | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'                               | 'NONE'

+| COMPRESSION            | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'                               | 'NONE'

+| DATA_BLOCK_ENCODING    | 'DIFF' \| 'FAST_DIFF' \| 'NONE' \| 'PREFIX'                                | 'NONE'

+| DURABILITY             | 'USE_DEFAULT' \| 'SKIP_WAL' \| 'ASYNC_WAL' \| 'SYNC_WAL' \| 'FSYNC_WAL'    | 'SYNC_WAL'

+| EVICT_BLOCKS_ON_CLOSE  | 'true' \| 'false'                                                          | 'false'

+| IN_MEMORY              | 'true' \| 'false'                                                          | 'false'

+| KEEP_DELETED_CELLS     | 'true' \| 'false'                                                          | 'false'

+| MAX_FILESIZE           | '2*1024*1024' - '2^63^-1'                                                  | '10*1024*1024*1024'

+| MAX_VERSIONS           | '1' \| '_positive-integer_'                                                | '1'

+| MEMSTORE_FLUSH_SIZE    | '1024*1024' - '2^63^-1'                                                    | '128*1024*1024'

+| MIN_VERSIONS           | '0' \| '_positive-integer_'                                                | '0'

 | PREFIX_LENGTH_KEY      | '_positive-integer_', which should be less than maximum length of the key for the table.

-It applies only if the SPLIT_POLICY is `KeyPrefixRegionSplitPolicy`.

-| REPLICATION_SCOPE      | '0' \| *'1'*

+It applies only if the SPLIT_POLICY is `KeyPrefixRegionSplitPolicy`.                                  | '2'

+| REPLICATION_SCOPE      | '0' \| '1'                                                                 | '1'

 | SPLIT_POLICY           | 'org.apache.hadoop.hbase.regionserver. +

 ConstantSizeRegionSplitPolicy' \| +

 'org.apache.hadoop.hbase.regionserver. +

 IncreasingToUpperBoundRegionSplitPolicy' \| +

 'org.apache.hadoop.hbase.regionserver. +

 KeyPrefixRegionSplitPolicy'

-| TTL                    | '-1' (forever) \| '_positive-integer_'

+|'org.apache.hadoop.hbase.regionserver. +

+IncreasingToUpperBoundRegionSplitPolicy'                                                      

+| TTL                    | '_positive-integer_', which should be less than equal to 2147483447.

+                      | '2147483647' (forever)

 |===

-+

-^1^ Values in boldface are default values.

 

 * `SALT LIKE TABLE`

 +

@@ -2724,7 +2726,7 @@
 

 * `VOLATILE`

 +

-specifies a volatile table, which is a table limited to the session that creates the table. After the session ends, the

+specifies a volatile table, which is a table limited to the session that creates the table. After the session ends, the volatile table will be automatically dropped.

 

 * `IF NOT EXISTS`

 +

@@ -2772,40 +2774,44 @@
 

 ** `_hbase-option_ = '_value_'`

 +

-is one of the these HBase options and its assigned value:

+is one of these HBase options and its assigned value:

 +

-[cols="35%,65%",options="header"]

+[options="header"]

 |===

-| HBase Option                                | Accepted Values^1^

-| BLOCKCACHE                                  | 'true' \| 'false'

-| BLOCKSIZE                                   | '65536' \| '_positive-integer_'

-| BLOOMFILTER                                 | 'NONE' \| 'ROW' \| 'ROWCOL'

-| CACHE_BLOOMS_ON_WRITE                       | 'true' \| 'false'

-| CACHE_DATA_ON_WRITE                         | 'true' \| 'false'

-| CACHE_INDEXES_ON_WRITE                      | 'true' \| 'false'

-| COMPACT                                     | 'true' \| 'false'

-| COMPACT_COMPRESSION                         | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'

-| COMPRESSION                                 | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'

-| DATA_BLOCK_ENCODING                         | 'DIFF' \| 'FAST_DIFF' \| 'NONE' \| 'PREFIX'

-| DURABILITY                                  | 'USE_DEFAULT' \| 'SKIP_WAL' \| 'ASYNC_WAL' \| 'SYNC_WAL' \| 'FSYNC_WAL'

-| EVICT_BLOCKS_ON_CLOSE                       | *'true'* \| 'false'

-| IN_MEMORY                                   | *'true'* \| 'false'

-| KEEP_DELETED_CELLS                          | *'true'* \| 'false'

-| MAX_FILESIZE                                | '_positive-integer_'

-| MAX_VERSIONS                                | '1' \| '_positive-integer_'

-| MEMSTORE_FLUSH_SIZE                         | '_positive-integer_'

-| MIN_VERSIONS                                | '0' \| '_positive-integer_'

-| PREFIX_LENGTH_KEY                           | '_positive-integer_', which should be less than maximum length of the

-key for the table. It applies only if the SPLIT_POLICY is `KeyPrefixRegionSplitPolicy`.

-| REPLICATION_SCOPE                           | '0' \| '1'

-| SPLIT_POLICY                                | 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy' \|

-'org.apache.hadoop.hbase.regionserver.IncreasingToUpperBoundRegionSplitPolicy' \|

-'org.apache.hadoop.hbase.regionserver.KeyPrefixRegionSplitPolicy'

-| TTL                                         | '-1' (forever) \| '_positive-integer_'

+| HBase Option           | Accepted Values                                                            | Default Values

+| BLOCKCACHE             | 'true' \| 'false'                                                          | 'true'

+| BLOCKSIZE              | '1024' - '64*1024*1024'                                                    | '64*1024'

+| BLOOMFILTER            | 'NONE' \| 'ROW' \| 'ROWCOL'                                                | 'ROW'

+| CACHE_BLOOMS_ON_WRITE  | 'true' \| 'false'                                                          | 'false'

+| CACHE_DATA_ON_WRITE    | 'true' \| 'false'                                                          | 'false'

+| CACHE_INDEXES_ON_WRITE | 'true' \| 'false'                                                          | 'false'

+| COMPACT                | 'true' \| 'false'                                                          | 'true'

+| COMPACT_COMPRESSION    | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'                               | 'NONE'

+| COMPRESSION            | 'GZ' \| 'LZ4' \| 'LZO' \| 'NONE' \| 'SNAPPY'                               | 'NONE'

+| DATA_BLOCK_ENCODING    | 'DIFF' \| 'FAST_DIFF' \| 'NONE' \| 'PREFIX'                                | 'NONE'

+| DURABILITY             | 'USE_DEFAULT' \| 'SKIP_WAL' \| 'ASYNC_WAL' \| 'SYNC_WAL' \| 'FSYNC_WAL'    | 'SYNC_WAL'

+| EVICT_BLOCKS_ON_CLOSE  | 'true' \| 'false'                                                          | 'false'

+| IN_MEMORY              | 'true' \| 'false'                                                          | 'false'

+| KEEP_DELETED_CELLS     | 'true' \| 'false'                                                          | 'false'

+| MAX_FILESIZE           | '2*1024*1024' - '2^63^-1'                                                  | '10*1024*1024*1024'

+| MAX_VERSIONS           | '1' \| '_positive-integer_'                                                | '1'

+| MEMSTORE_FLUSH_SIZE    | '1024*1024' - '2^63^-1'                                                    | '128*1024*1024'

+| MIN_VERSIONS           | '0' \| '_positive-integer_'                                                | '0'

+| PREFIX_LENGTH_KEY      | '_positive-integer_', which should be less than maximum length of the key for the table.

+It applies only if the SPLIT_POLICY is `KeyPrefixRegionSplitPolicy`.                                  | '2'

+| REPLICATION_SCOPE      | '0' \| '1'                                                                 | '1'

+| SPLIT_POLICY           | 'org.apache.hadoop.hbase.regionserver. +

+ConstantSizeRegionSplitPolicy' \| +

+'org.apache.hadoop.hbase.regionserver. +

+IncreasingToUpperBoundRegionSplitPolicy' \| +

+'org.apache.hadoop.hbase.regionserver. +

+KeyPrefixRegionSplitPolicy'

+|'org.apache.hadoop.hbase.regionserver. +

+IncreasingToUpperBoundRegionSplitPolicy'                                                      

+| TTL                    | '_positive-integer_', which should be less than equal to 2147483447.

+                      | '2147483647' (forever)

 |===

 

-1. Values in boldface are default values.

-

 * `LOAD IF EXISTS`

 +

 loads data into an existing table. Must be used with AS _select-query_.

diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
index 3b42d81..8f3408e 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
@@ -407,13 +407,14 @@
 LOAD [WITH option[[,] option]...] INTO target-table SELECT ... FROM source-table

 

 option is:

-    TRUNCATE TABLE

-  | NO RECOVERY

+    CONTINUE ON ERROR

+  | LOG ERROR ROWS [TO error-location-name]

+  | STOP AFTER num ERROR ROWS

+  | TRUNCATE TABLE

   | NO POPULATE INDEXES

   | NO DUPLICATE CHECK

   | NO OUTPUT

   | INDEX TABLE ONLY

-  | UPSERT USING LOAD

 ```

 

 [[load_syntax]]

@@ -440,19 +441,58 @@
 is a set of options that you can specify for the load operation. You can

 specify one or more of these options:

 

+** `CONTINUE ON ERROR`

++

+LOAD statement will continue after ignorable errors while scanning rows from source table or loading into the target table. The ignorable errors are usually data conversion errors.

++

+Errors during the load or sort phase will cause the LOAD statement to abort. 

++

+This option is implied if `LOG ERROR ROWS [TO _error-location-name_]` or `STOP AFTER _num_ ERROR ROWS` is specified.

+

+** `LOG ERROR ROWS [TO _error-location-name_]`

+*** Error rows

++

+If error rows must be written to a specified location, then specify TO _error-location-name_, otherwise they will be written to the default location.

+`_error-location-name_` must be a HDFS directory name to which trafodion has write access.

++

+Error rows will be logged in HDFS files in the *directory* `/user/trafodion/bulkload/logs` if the error log location is not specified. 

++

+The default name of the *subdirectory* is `_ERR_catalog.schema.target_table_date_id_`, where `_id_` is a numeric identifier timestamp (YYYYMMDD_HHMMSS) unique to the process where the error was seen.

++

+The default name of the *error file* is `_loggingFileNamePrefix_catalog.schema.target_table_instanceID_`, where `_loggingFileNamePrefix_` is hive_scan_err or traf_upsert_err depending on the data source table, and `_instanceID_` is the instance ID starting from 0, generally there is only one instance.

++

+For example, the full path of the table test_load_log is `/user/trafodion/bulkload/logs/test/ERR_TRAFODION.SEABASE.TEST_LOAD_LOG_20171218_035918/traf_upsert_err_TRAFODION.SEABASE.TEST_LOAD_LOG_0`,

++

+where:

++

+1. `/user/trafodion/bulkload/logs/test` is the default name of *directory*.

++

+2. `ERR_TRAFODION.SEABASE.TEST_LOAD_LOG_20171218_035918` is the default name of *subdirectory*.

++

+3. `traf_upsert_err_TRAFODION.SEABASE.TEST_LOAD_LOG_0` is the default name of *error file*.

+

+*** Error logs

++

+Error logs are written in separate files by the processes involved in the load command under sub-directory representing the load command in the given location.

++

+The actual log file location is displayed in the load command output. It is recommended that you use the same location for load as it’s easier to find the error logs.

+

+** `STOP AFTER _num_ ERROR ROWS`

++

+Use this option to stop the LOAD statement from progressing after num errors have been encountered during this statement. 

++

+Duplicate check errors and errors which we have continued from using previous options are included in the count.

+

+*** _num_

++

+specifies the number of error rows.

+

 ** `TRUNCATE TABLE`

 +

 causes the Bulk Loader to truncate the target table before starting the

 load operation. By default, the Bulk Loader does not truncate the target

 table before loading data.

 

-** `NO RECOVERY`

-+

-specifies that the Bulk Loader not use HBase snapshots for recovery. By

-default, the Bulk Loader handles recovery using the HBase snapshots

-mechanism.

-

-<<<

 ** `NO POPULATE INDEXES`

 +

 specifies that the Bulk Loader not handle index maintenance or populate

@@ -472,16 +512,11 @@
 the LOAD statement prints status messages listing the steps that the

 Bulk Loader is executing.

 

-* `INDEX TABLE ONLY`

+** `INDEX TABLE ONLY`

 +

 specifies that the target table, which is an index, be populated with

 data from the parent table.

 

-* `UPSERT USING LOAD`

-+

-specifies that the data be inserted into the target table using row set

-inserts without a transaction.

-

 <<<

 [[load_considerations]]

 === Considerations for LOAD

@@ -615,6 +650,532 @@
 [[load_examples]]

 === Examples of LOAD

 

+[[continue_on_error_examples]]

+==== Examples of `CONTINUE ON ERROR`

+ 

+Suppose that we have following tables:

+

+_source_table_:

+

+```

+SQL>SELECT * FROM source_table; 

+A            B   

+-----------  ----

+          1  aaaa

+          2  bbbb

+          3  cccc

+          4  dd  

+          5  ee  

+          6  fff 

+--- 6 row(s) selected.

+

+SQL>SHOWDDL source_table;

+CREATE TABLE TRAFODION.SEABASE.SOURCE_TABLE

+  ( 

+    A                                INT DEFAULT NULL NOT SERIALIZED

+  , B                                CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+

+_target_table1_:

+

+```

+SQL>SELECT * FROM target_table1;

+--- 0 row(s) selected.

+

+SQL>SHOWDDL target_table1; 

+CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE1

+  ( 

+    A                                INT DEFAULT NULL NOT SERIALIZED

+  , B                                CHAR(3) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+* The examples below demonstrate how the LOAD Statement behaves without and with `CONTINUE ON ERROR`, when ignorable data conversion errors occur.

+

+** When loading data from _source_table_ into _target_table1_ if `CONTINUE ON ERROR` is not specified, the operation fails with a data conversion error.

++

+```

+SQL>LOAD INTO target_table1 SELECT * FROM source_table;

+

+UTIL_OUTPUT

+-------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                          

+Task:  CLEANUP         Status: Started    Time: 2018-01-03 16:15:53.222441                                                      

+Task:  CLEANUP         Status: Ended      Time: 2018-01-03 16:15:53.250826

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.028

+Task:  LOADING DATA    Status: Started    Time: 2018-01-03 16:15:53.250909                                                      

+*** ERROR[8402] A string overflow occurred during the evaluation of a character expression. Conversion of Source Type:CHAR(REC_BYTE_F_ASCII,4 BYTES,ISO88591) Source Value:aaaa to Target Type:CHAR(REC_BYTE_F_ASCII,3 BYTES,ISO88591). [2018-01-03 16:15:54]

+```

+

+** When loading data from _source_table_ into _target_table1_ if `CONTINUE ON ERROR` is specified, the operation succeeds after ignorable data conversion errors.

++

+```

+SQL>LOAD WITH CONTINUE ON ERROR INTO target_table1 SELECT * FROM source_table;

+

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1

+Task:  CLEANUP         Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1

+Task:  CLEANUP         Status: Ended      Object: TRAFODION.SEABASE.TARGET_TABLE1

+Task:  PREPARATION     Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1

+       Rows Processed: 3 

+Task:  PREPARATION     Status: Ended      ET: 00:00:03.151

+Task:  COMPLETION      Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1

+Task:  COMPLETION      Status: Ended      ET: 00:00:01.137

+--- 3 row(s) loaded.

+

+UTIL_OUTPUT

+-------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                          

+Task:  CLEANUP         Status: Started    Time: 2018-01-03 16:19:43.543405                                                      

+Task:  CLEANUP         Status: Ended      Time: 2018-01-03 16:19:43.568828

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.025

+Task:  LOADING DATA    Status: Started    Time: 2018-01-03 16:19:43.568899                                                      

+       Rows Processed: 6 

+       Error Rows:     3 

+Task:  LOADING DATA    Status: Ended      Time: 2018-01-03 16:19:44.211150

+Task:  LOADING DATA    Status: Ended      Elapsed Time:    00:00:00.642

+Task:  COMPLETION      Status: Started    Time: 2018-01-03 16:19:44.211192                                                      

+       Rows Loaded:    3 

+Task:  COMPLETION      Status: Ended      Time: 2018-01-03 16:19:45.171458

+Task:  COMPLETION      Status: Ended      Elapsed Time:    00:00:00.960

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table1;  

+A           B   

+----------- ----

+          4 dd  

+          5 ee  

+          6 fff 

+--- 3 row(s) selected.

+```

+

+[[log_error_rows_examples]]

+==== Examples of `LOG ERROR ROWS [TO error-location-name]`

+

+Suppose that we have two same tables (_source_table_ and _target_table1_) as shown in the <<continue_on_error_examples,Examples of `CONTINUE ON ERROR`>>.

+

+** This example explains how the LOAD statement loads data and logs error rows to the default directory `user/trafodion/bulkload/logs`.

++

+```

+SQL>LOAD WITH LOG ERROR ROWS INTO target_table1 SELECT * FROM source_table;

+

+UTIL_OUTPUT

+-------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                          

+Task:  CLEANUP         Status: Started    Time: 2018-01-03 16:23:03.142862                                                      

+Task:  CLEANUP         Status: Ended      Time: 2018-01-03 16:23:03.151725

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.009

+Logging Location: /user/trafodion/bulkload/logs/ERR_TRAFODION.SEABASE.TARGET_TABLE_20180103_082303

+Task:  LOADING DATA    Status: Started    Time: 2018-01-03 16:23:03.151815

+       Rows Processed: 6 

+       Error Rows:     3 

+Task:  LOADING DATA    Status: Ended      Time: 2018-01-03 16:23:03.920270

+Task:  LOADING DATA    Status: Ended      Elapsed Time:    00:00:00.768

+Task:  COMPLETION      Status: Started    Time: 2018-01-03 16:23:03.920313                                                      

+       Rows Loaded:    3 

+Task:  COMPLETION      Status: Ended      Time: 2018-01-03 16:23:04.301579

+Task:  COMPLETION      Status: Ended      Elapsed Time:    00:00:00.381

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table1;  

+A           B   

+----------- ----

+          4 dd  

+          5 ee  

+          6 fff 

+--- 3 row(s) selected.

+

+[root@cent-1 bin]$ hdfs dfs -ls /user/trafodion/bulkload/logs/

+Found 1 items

+drwxr-xr-x   - trafodion trafodion          0 2018-01-13 16:23 

+/user/trafodion/bulkload/logs/ERR_TRAFODION.SEABASE.TARGET_TABLE_20180103_082303

+```

+

+** This example shows how the LOAD statement loads and logs error rows to the specified directory `user/trafodion/bulkload/error_log`.

++

+```

+SQL>LOAD WITH LOG ERROR ROWS TO '/BULKLOAD/ERROR_LOG' INTO target_table1 SELECT * FROM source_table;

+

+UTIL_OUTPUT

+-------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                               

+Task:  CLEANUP         Status: Started    Time: 2018-01-03 17:19:43.436689                                                      

+Task:  CLEANUP         Status: Ended      Time: 2018-01-03 17:19:43.456761

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.020

+Logging Location: /bulkload/error_log/ERR_TRAFODION.SEABASE.TARGET_TABLE_20180103_091943

+Task:  LOADING DATA    Status: Started    Time: 2018-01-03 17:19:43.456804

+       Rows Processed: 6 

+       Error Rows:     3 

+Task:  LOADING DATA    Status: Ended      Time: 2018-01-03 17:19:43.722825

+Task:  LOADING DATA    Status: Ended      Elapsed Time:    00:00:00.266

+Task:  COMPLETION      Status: Started    Time: 2018-01-03 17:19:43.722868                                                      

+       Rows Loaded:    3 

+Task:  COMPLETION      Status: Ended      Time: 2018-01-03 17:19:44.591544

+Task:  COMPLETION      Status: Ended      Elapsed Time:    00:00:00.869

+--- SQL operation complete.

+

+[root@cent-1 bin]$ hdfs dfs -ls /bulkload/error_log

+Found 1 items

+drwxr-xr-x   - trafodion trafodion          0 2018-01-03 17:19 

+/bulkload/error_log/ERR_TRAFODION.SEABASE.TARGET_TABLE_20180103_091943

+```

+

+[[stop_after_num_error_rows_examples]]

+==== Examples of `STOP AFTER num ERROR ROWS`

+

+Suppose that we have two same tables (_source_table_ and _target_table1_) as shown in the <<continue_on_error_examples,Examples of `CONTINUE ON ERROR`>>.

+

+The examples below illustrate how the LOAD Statement behaves depending on the different `num`. 

+

+```

+SQL>LOAD WITH STOP AFTER 2 ERROR ROWS INTO target_table1 SELECT * FROM source_table;

+

+UTIL_OUTPUT

+---------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                               

+Task:  CLEANUP         Status: Started    Time: 2018-01-05 10:53:52.20569                                                       

+Task:  CLEANUP         Status: Ended      Time: 2018-01-05 10:53:52.45689

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.025

+Task:  LOADING DATA    Status: Started    Time: 2018-01-05 10:53:52.45757                                                       

+*** WARNING[8114] The number of error rows is 3 [2018-01-05 10:53:53]

+*** ERROR[8113] The maximum number of error rows is exceeded. [2018-01-05 10:53:53]

+*** WARNING[8114] The number of error rows is 3 [2018-01-05 10:53:53]

+

+SQL>SELECT * FROM target_table1;

+--- 0 row(s) selected.

+```

+

+```

+SQL>LOAD WITH STOP AFTER 3 ERROR ROWS INTO target_table1 SELECT * FROM source_table;

+

+UTIL_OUTPUT

+---------------------------------------------------------------------------------------------

+Task:  LOAD            Status: Started    Object: TRAFODION.SEABASE.TARGET_TABLE1                                                               

+Task:  CLEANUP         Status: Started    Time: 2018-01-05 15:55:58.975459                                                      

+Task:  CLEANUP         Status: Ended      Time: 2018-01-05 15:55:59.20219

+Task:  CLEANUP         Status: Ended      Elapsed Time:    00:00:00.045

+Task:  LOADING DATA    Status: Started    Time: 2018-01-05 15:55:59.20322                                                       

+       Rows Processed: 6 

+       Error Rows:     3 

+Task:  LOADING DATA    Status: Ended      Time: 2018-01-05 15:55:59.960109

+Task:  LOADING DATA    Status: Ended      Elapsed Time:    00:00:00.940

+Task:  COMPLETION      Status: Started    Time: 2018-01-05 15:55:59.960180                                                      

+       Rows Loaded:    3 

+Task:  COMPLETION      Status: Ended      Time: 2018-01-05 15:56:00.448496

+Task:  COMPLETION      Status: Ended      Elapsed Time:    00:00:00.488

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table1;

+A           B   

+----------- ----

+          4 dd  

+          5 ee  

+          6 fff 

+--- 3 row(s) selected.

+```

+

+[[index_table_only_examples]]

+==== Examples of `INDEX TABLE ONLY`

+

+Suppose that we have following tables:

+

+_source_table_:

+```

+SQL>SELECT * FROM source_table;   

+A            B   

+-----------  ----

+          1  aaaa

+          2  bbbb

+          3  cccc

+          4  dd  

+          5  ee  

+          6  fff 

+--- 6 row(s) selected.

+

+SQL>SHOWDDL source_table;

+CREATE TABLE TRAFODION.SEABASE.SOURCE_TABLE

+  ( 

+    A                                INT DEFAULT NULL NOT SERIALIZED

+  , B                                CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+

+_target_table1_:

+```

+SQL>SELECT * FROM target_table1;

+--- 0 row(s) selected.

+

+SQL>SHOWDDL target_table1;

+CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE1

+  (

+    A                                INT DEFAULT NULL NOT SERIALIZED

+  , B                                CHAR(3) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT

+;

+--- SQL operation complete.

+```

+

+_target_table2_:

+```

+SQL>SELECT * FROM target_table2;

+--- 0 row(s) selected.

+

+SQL>SHOWDDL target_table2;

+CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE2

+  (

+    A                                INT NO DEFAULT NOT NULL NOT DROPPABLE NOT

+      SERIALIZED

+  , B                                CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  , PRIMARY KEY (A ASC)

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+

+_target_table3_:

+```

+SELECT * FROM target_table3;

+--- 0 row(s) selected.

+

+SHOWDDL target_table3;

+CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE3

+  (

+    A                                INT NO DEFAULT NOT NULL NOT DROPPABLE NOT

+      SERIALIZED

+  , B                                CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  , PRIMARY KEY (A ASC)

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+

+_target_table4_:

+```

+SELECT * FROM target_table4;

+--- 0 row(s) selected.

+

+CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE4

+  (

+    A                                INT DEFAULT NULL NOT SERIALIZED

+  , B                                CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT DEFAULT NULL NOT SERIALIZED

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+--- SQL operation complete.

+```

+

+* The examples below demonstrate how the index table and target tabel get populated.

+** The index table gets populated, while the target table does not get populated if `NO POPULATE` is specified.

++

+```

+SQL>CREATE INDEX index_target_table1 ON target_table1(b) NO POPULATE;

+--- SQL operation complete.

+

+SQL>SET PARSERFLAGS 1;

+--- SQL operation complete.

+

+SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table1) SELECT b,a FROM source_table;

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table1;

+--- 0 row(s) selected.

+

+SELECT * FROM table(index_table index_target_table1);  

+B@    A              

+----  --------------------

+aaaa                     1

+bbbb                     2

+cccc                     3

+dd                       4

+ee                       5

+fff                      6

+--- 6 row(s) selected.

+```

+

+** The index table gets populated, and the target table gets populated as well if `NO POPULATE` is not specified.

++

+```

+SQL>CREATE INDEX index_target_table1 ON target_table1(b);

+--- SQL operation complete.

+

+SQL>SET PARSERFLAGS 1;

+--- SQL operation complete.

+

+SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table1) SELECT b,a FROM source_table;

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table1;

+A            B   

+-----------  ----

+          1  aaaa

+          2  bbbb

+          3  cccc

+          4  dd  

+          5  ee  

+          6  fff 

+--- 6 row(s) selected.

+

+SQL>SELECT * FROM table(index_table index_target_table1);  

+B@    A              

+----  --------------------

+aaaa                     1

+bbbb                     2

+cccc                     3

+dd                       4

+ee                       5

+fff                      6

+--- 6 row(s) selected.

+```

+

+* The examples below illustrate that how to populate index tables depending on different target tables.

+** The _target_table2_ has columns A (primary key column) and B (index column) defined, in this case, populate the index table with columns B and A from the _source_table_.

++ 

+```

+SQL>CREATE INDEX index_target_table2 ON target_table2(b) NO POPULATE;

+--- SQL operation complete.

+

+SQL>SET PARSERFLAGS 1;

+--- SQL operation complete.

+

+SQL>SHOWDDL TABLE(INDEX_TABLE index_target_table2);

+CREATE TABLE TRAFODION.SEABASE.INDEX_TARGET_TABLE2

+  (

+    "B@"                             CHAR(4) CHARACTER SET ISO88591 COLLATE

+      DEFAULT NO DEFAULT NOT SERIALIZED

+  , A                                INT NO DEFAULT NOT NULL NOT DROPPABLE NOT

+      SERIALIZED

+  , PRIMARY KEY ("B@" ASC, A ASC)

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+

+SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table2) SELECT b,a FROM source_table;

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table2;

+--- 0 row(s) selected.

+

+SQL>SELECT * FROM TABLE(INDEX_TABLE index_target_table2);  

+B@    A              

+----  --------------------

+aaaa                     1

+bbbb                     2

+cccc                     3

+dd                       4

+ee                       5

+fff                      6

+--- 6 row(s) selected.

+```

+

+** The _target_table3_ has columns A (primary key column and index column) and B defined, in this case, populate the index table with column A from the _source_table_.

++

+```

+SQL>CREATE INDEX index_target_table3 ON target_table3(a) NO POPULATE;

+--- SQL operation complete.

+

+SQL>SET PARSERFLAGS 1;

+--- SQL operation complete.

+

+SQL>SHOWDDL TABLE(INDEX_TABLE index_target_table3);

+CREATE TABLE TRAFODION.SEABASE.INDEX_TARGET_TABLE3

+  (

+    "A@"                             INT NO DEFAULT NOT NULL NOT DROPPABLE NOT

+      SERIALIZED

+  , PRIMARY KEY ("A@" ASC)

+  )

+ ATTRIBUTES ALIGNED FORMAT 

+;

+

+SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table3) SELECT a FROM source_table;

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table3;

+--- 0 row(s) selected.

+

+SQL> SELECT * FROM TABLE(INDEX_TABLE index_target_table3);

+A@         

+-----------

+          1

+          2

+          3

+          4

+          5

+          6

+--- 6 row(s) selected.

+```

+

+** The _target_table4_ has columns A (index column) and B defined, in this case, populate the index table with column A and syskey from the _source_table_.

++

+```

+SQL> CREATE INDEX index_target_table4 ON target_table4(a) NO POPULATE;

+--- SQL operation complete.

+

+SQL>SET PARSERFLAGS 1;

+--- SQL operation complete.

+

+SQL>SHOWDDL TABLE(INDEX_TABLE index_target_table4);

+CREATE TABLE TRAFODION.SEABASE.INDEX_TARGET_TABLE4

+  (

+    "A@"                             INT NO DEFAULT NOT SERIALIZED

+  , SYSKEY                           LARGEINT NO DEFAULT NOT NULL NOT DROPPABLE

+      NOT SERIALIZED

+  , PRIMARY KEY ("A@" ASC, SYSKEY ASC)

+  )

+ ATTRIBUTES ALIGNED FORMAT

+;

+--- SQL operation complete.

+

+SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table4) SELECT a,syskey FROM source_table;

+--- SQL operation complete.

+

+SQL>SELECT * FROM target_table4;

+--- 0 row(s) selected.

+

+SQL>SELECT * FROM TABLE(INDEX_TABLE index_target_table4);

+A@           SYSKEY              

+-----------  --------------------

+          1   4239726128363214004

+          2   4239726128363256924

+          3   4239726128363258834

+          4   4239726128363260240

+          5   4239726128363261628

+          6   4239726128363263088

+--- 6 row(s) selected.

+```

+

+NOTE: At this moment, if you want to drop the index, such as _index_target_table2_, _index_target_table3_ or _index_target_table4_ created above, please populate the index from its parent table before dropping it, see the example below. For more information, see <<populate_index_utility,POPULATE INDEX Utility>>.

+

+```

+SQL> DROP INDEX index_target_table4;

+*** ERROR[4254] Object TRAFODION.SEABASE.INDEX_TARGET_TABLE4 has invalid state and cannot be accessed. Use cleanup command to drop it.

+

+SQL> POPULATE INDEX index_target_table4 ON target_table4;

+--- SQL operation complete.

+

+SQL> DROP INDEX index_target_table4;

+--- SQL operation complete.

+```

+[[loading_data_from_hive_table_examples]]

+==== Examples of Loading data from Hive Table

+

 * For customer demographics data residing in

 `/hive/tpcds/customer_demographics`, create an external Hive table using

 the following Hive SQL:

diff --git a/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
new file mode 100644
index 0000000..a3ea166
--- /dev/null
+++ b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
Binary files differ
diff --git a/docs/src/site/markdown/download.md b/docs/src/site/markdown/download.md
index b974fb4..ac014b1 100644
--- a/docs/src/site/markdown/download.md
+++ b/docs/src/site/markdown/download.md
@@ -125,4 +125,4 @@
 
 * * * *
 
-Note: when downloading from a mirror please check the [md5sum](http://www.apache.org/dev/release-signing#md5) and verify the [OpenPGP](http://www.apache.org/dev/release-signing#openpgp) compatible signature from the main [Apache](http://www.apache.org/) site. Links are provided above (next to the release download link). This [KEYS](http://www.apache.org/dist/incubator/trafodion/KEYS) file contains the public keys used for signing release. It is recommended that (when possible) a [web of trust](http://www.apache.org/dev/release-signing#web-of-trust) is used to confirm the identity of these keys. For more information, please see the [Apache Release FAQ](http://www.apache.org/dev/release.html).
+Note: when downloading from a mirror please check the [md5sum](http://www.apache.org/dev/release-signing#md5) and verify the [OpenPGP](http://www.apache.org/dev/release-signing#openpgp) compatible signature from the main [Apache](http://www.apache.org/) site. Links are provided above (next to the release download link). This [KEYS](http://www.apache.org/dist/trafodion/KEYS) file contains the public keys used for signing release. It is recommended that (when possible) a [web of trust](http://www.apache.org/dev/release-signing#web-of-trust) is used to confirm the identity of these keys. For more information, please see the [Apache Release FAQ](http://www.apache.org/dev/release.html).
diff --git a/docs/src/site/markdown/enable-secure-trafodion.md b/docs/src/site/markdown/enable-secure-trafodion.md
index c36cfcc..cc601f8 100644
--- a/docs/src/site/markdown/enable-secure-trafodion.md
+++ b/docs/src/site/markdown/enable-secure-trafodion.md
@@ -14,5 +14,5 @@
 -->
 
 Redirecting to the Trafodion Provisioning Guide...
-<p><meta http-equiv="refresh" content="0; url=http://trafodion.incubator.apache.org/docs/provisioning_guide/target/site/index.html#enable-security"></meta></p>
+<p><meta http-equiv="refresh" content="0; url=http://trafodion.apache.org/docs/provisioning_guide/target/site/index.html#enable-security"></meta></p>
 
diff --git a/docs/src/site/markdown/index.md b/docs/src/site/markdown/index.md
index b8798b3..a023bfa 100644
--- a/docs/src/site/markdown/index.md
+++ b/docs/src/site/markdown/index.md
@@ -31,8 +31,8 @@
 
 Trafodion provides SQL access to structured, semi-structured, and unstructured data allowing you to run operational, historical, and analytical workloads on a single platform.
 
-[revolution]: http://trafodion.incubator.apache.org
-[scale]: http://trafodion.incubator.apache.org
+[revolution]: http://trafodion.apache.org
+[scale]: http://trafodion.apache.org
 [stack]: index.html
 
 
@@ -45,7 +45,7 @@
   <p><h5>We're working on release 2.2!</h5></p> 
   <p>Check out the <a href="https://cwiki.apache.org/confluence/display/TRAFODION/Roadmap">Roadmap</a> page for planned content.</p>
   <p><h5>Apache Trafodion 2.1.0-incubating was released on May 1, 2017</h5></p> 
-  <p>Check it out on the <a href="http://trafodion.incubator.apache.org/download.html">Download</a> page.</p>
+  <p>Check it out on the <a href="http://trafodion.apache.org/download.html">Download</a> page.</p>
   <p><h5>Want to disucss Trafodion in Chinese? Join the Trafodion discussion on Tencent QQ!</h5></p> 
   <p><a href="http://im.qq.com/">QQ</a> Group ID: 176011868.</p>
 </td></tr></table>
@@ -70,14 +70,12 @@
 
 ## About
 
-Apache Trafodion (incubating) is a webscale SQL-on-Hadoop solution enabling transactional or operational workloads on Apache Hadoop. 
+Apache Trafodion is a webscale SQL-on-Hadoop solution enabling transactional or operational workloads on Apache Hadoop. 
 
 The name &quot;Trafodion&quot; (the Welsh word for transactions, pronounced &quot;Tra-vod-eee-on&quot;) was chosen specifically to emphasize the differentiation that Trafodion provides in closing a critical gap in the Hadoop ecosystem. 
 
 Trafodion builds on the scalability, elasticity, and flexibility of Hadoop. Trafodion extends Hadoop to provide guaranteed transactional integrity, enabling new kinds of big data applications to run on Hadoop. 
 
-<em>Disclaimer: Apache Trafodion is an effort undergoing incubation at the Apache Software Foundation (ASF), sponsored by the Apache Incubator PMC. Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF.</em>
-
 ---
 
 ## Key Features
diff --git a/docs/src/site/markdown/ldapcheck.md b/docs/src/site/markdown/ldapcheck.md
index 02575ad..b5f12cc 100644
--- a/docs/src/site/markdown/ldapcheck.md
+++ b/docs/src/site/markdown/ldapcheck.md
@@ -13,5 +13,5 @@
   License.
 -->
 Redirecting to the Trafodion Provisioning Guide...
-<p><meta http-equiv="refresh" content="0; url=http://trafodion.incubator.apache.org/docs/provisioning_guide/target/site/index.html#enable-security-ldapcheck"></meta></p>
+<p><meta http-equiv="refresh" content="0; url=http://trafodion.apache.org/docs/provisioning_guide/target/site/index.html#enable-security-ldapcheck"></meta></p>
 
diff --git a/docs/src/site/markdown/ldapconfigcheck.md b/docs/src/site/markdown/ldapconfigcheck.md
index b24b16a..bada8f5 100644
--- a/docs/src/site/markdown/ldapconfigcheck.md
+++ b/docs/src/site/markdown/ldapconfigcheck.md
@@ -13,4 +13,4 @@
   License.
 -->
 Redirecting to the Trafodion Provisioning Guide...
-<p><meta http-equiv="refresh" content="0; url=http://trafodion.incubator.apache.org/docs/provisioning_guide/target/site/index.html#enable-security-ldapconfigcheck"></meta></p>
+<p><meta http-equiv="refresh" content="0; url=http://trafodion.apache.org/docs/provisioning_guide/target/site/index.html#enable-security-ldapconfigcheck"></meta></p>
diff --git a/docs/src/site/markdown/quickstart.md b/docs/src/site/markdown/quickstart.md
index 22fbc66..a2b138b 100644
--- a/docs/src/site/markdown/quickstart.md
+++ b/docs/src/site/markdown/quickstart.md
@@ -101,8 +101,8 @@
 
 # More Information
 
-For more information, refer to the [Trafodion Documentation Page] (documentation.html) for starting points.  There is also a lot more information on the [Trafodion website] (http://trafodion.incubator.apache.org) and [Trafodion Wiki] (https://cwiki.apache.org/confluence/display/TRAFODION/Apache+Trafodion+Home) that you might refer to as you explore Trafodion.
+For more information, refer to the [Trafodion Documentation Page] (documentation.html) for starting points.  There is also a lot more information on the [Trafodion website] (http://trafodion.apache.org) and [Trafodion Wiki] (https://cwiki.apache.org/confluence/display/TRAFODION/Apache+Trafodion+Home) that you might refer to as you explore Trafodion.
 
-If you have questions or suggestions or just want to share what you've learned about Trafodion, you can contact a community of Trafodion users via the [Trafodion User Group mailing list](http://mail-archives.apache.org/mod_mbox/incubator-trafodion-user/) or other [Project Mailing Lists] (http://trafodion.apache.org/mail-lists.html)
+If you have questions or suggestions or just want to share what you've learned about Trafodion, you can contact a community of Trafodion users via the [Trafodion User Group mailing list](http://mail-archives.apache.org/mod_mbox/trafodion-user/) or other [Project Mailing Lists] (http://trafodion.apache.org/mail-lists.html)
 
-Have fun with Trafodion!
\ No newline at end of file
+Have fun with Trafodion!
diff --git a/docs/src/site/markdown/release-notes-1-0-1.md b/docs/src/site/markdown/release-notes-1-0-1.md
index 83b2544..61c4e86 100644
--- a/docs/src/site/markdown/release-notes-1-0-1.md
+++ b/docs/src/site/markdown/release-notes-1-0-1.md
@@ -247,4 +247,4 @@
 
 **Cause:** This is a day-one issue.
 
-**Solution:** Retry the query after two minutes. Set <code>CQD HIST_NO_STATS_REFRESH_INTERVAL</code> to '<code>0</code>'. Run an <code>UPDATE STATISTICS</code> statement. Perform DML operations in a different session.
\ No newline at end of file
+**Solution:** Retry the query after two minutes. Set <code>CQD HIST_NO_STATS_REFRESH_INTERVAL</code> to '<code>0</code>'. Run an <code>UPDATE STATISTICS</code> statement. Perform DML operations in a different session.
diff --git a/docs/src/site/site.xml b/docs/src/site/site.xml
index 3b46546..7372f5c 100644
--- a/docs/src/site/site.xml
+++ b/docs/src/site/site.xml
@@ -35,7 +35,7 @@
     <!-- Apache Trafodion, but with "Trafodion" highlighted -->
     <name>
        <![CDATA[
-       <img src="images/logos/trafodion-logo.jpg" alt="Trafodion Logo" width="383"> <a href="http://incubator.apache.org" target="_blank"><span class="logo-right"><img id="incubator-logo" alt="Apache Incubator" src="images/logos/egg-logo.png" /></span></a>
+       <img src="images/logos/trafodion-logo.jpg" alt="Trafodion Logo" width="383">
        ]]>
     </name>
     <href>index.html</href>
@@ -51,13 +51,14 @@
       <breadcrumbs>false</breadcrumbs>   
       <imgLightbox>false</imgLightbox>
       <markPageHeader>false</markPageHeader>
+      <protocolRelativeURLs>true</protocolRelativeURLs>
       <smoothScroll>true</smoothScroll>
       <!--  Enable code highlighting -->
       <highlightJs>true</highlightJs>
       <brand>
         <!-- Brand text in top-left part of the site -->
         <name>
-          <![CDATA[<span class="brand-apache">Apache </span><span class="brand-trafodion">Trafodion</span> <span class="brand-apache">(incubating)</span> ]]>
+          <![CDATA[<span class="brand-apache">Apache </span><span class="brand-trafodion">Trafodion</span> <span class="brand-apache"></span> ]]>
         </name>
         <href>index.html</href>
       </brand>
@@ -251,7 +252,6 @@
       <item href="https://cwiki.apache.org/confluence/display/TRAFODION/Metadata+Cleanup" name="Metadata Cleanup Utility"/>
     </menu>
     <menu name="Apache">
-      <item href="http://incubator.apache.org/projects/trafodion.html" name="Project Status" target="_blank"/>
       <item href="http://www.apache.org/foundation/how-it-works.html" name="Apache Foundation" target="_blank"/>
       <item href="http://www.apache.org/licenses/" name="Apache License" target="_blank"/>
       <item href="http://www.apache.org/security/" name="Apache Security" target="_blank"/>
@@ -262,11 +262,8 @@
     <footer>
       <hr />
       <div class="row span16">
-        <span>Apache, Apache Maven, Apache Maven Fluido Skin, the Apache feather logo, the Apache Maven project logo and the Apache Incubator project logo 
+        <span>Apache, Apache Maven, Apache Maven Fluido Skin, the Apache feather logo, and the Apache Maven project logo
         are trademarks of the Apache Software Foundation.</span><br /><br />
-        <span>
-          <a href="http://incubator.apache.org" target="_blank"><img id="incubator-logo" alt="Apache Incubator" src="images/logos/egg-logo.png" /></a>
-        </span>
       </div>
     </footer>
   </body>
diff --git a/install/.gitignore b/install/.gitignore
index 2aa6b05..aafac2b 100644
--- a/install/.gitignore
+++ b/install/.gitignore
@@ -1,7 +1,6 @@
 installer-*.tar.gz
 LICENSE
 NOTICE
-DISCLAIMER
 ambari-installer/RPMROOT
 ambari-installer/traf-mpack.tar.gz
 ambari-installer/mpack-install/repo
diff --git a/install/Makefile b/install/Makefile
index c131f58..13bc734 100644
--- a/install/Makefile
+++ b/install/Makefile
@@ -14,7 +14,7 @@
 #  limitations under the License.
 #
 # @@@ END COPYRIGHT @@@
-RELEASE_VER ?= ${TRAFODION_VER}-incubating
+RELEASE_VER ?= ${TRAFODION_VER}
 RELEASE_TYPE ?= $(shell echo $(TRAFODION_VER_PROD)| sed -e 's/ /-/g')
 PYINSTALLER_TARNAME = $(shell echo ${RELEASE_TYPE}_pyinstaller-${RELEASE_VER}.tar.gz |tr '[A-Z]' '[a-z]')
 
@@ -26,7 +26,7 @@
 create-dir:
 	mkdir -p ../distribution
 
-pkg-pyinstaller: create-dir python-installer/LICENSE python-installer/NOTICE python-installer/DISCLAIMER
+pkg-pyinstaller: create-dir python-installer/LICENSE python-installer/NOTICE 
 	tar czf ${PYINSTALLER_TARNAME} python-installer
 	mv ${PYINSTALLER_TARNAME} ../distribution
 
@@ -39,9 +39,6 @@
 python-installer/NOTICE: ../NOTICE
 	cp -f $? $@
 
-python-installer/DISCLAIMER: ../DISCLAIMER
-	cp -f $? $@
-
 version:
 	@echo "$(RELEASE_VER)"
 
diff --git a/install/ambari-installer/Makefile b/install/ambari-installer/Makefile
index 0c6e7c6..02819af 100644
--- a/install/ambari-installer/Makefile
+++ b/install/ambari-installer/Makefile
@@ -36,7 +36,7 @@
 # select trafodion version for given HDP stack
 REPO_LIST= 2.1.0 $(TRAFODION_VER)
 
-$(SOURCEDIR)/ambari_rpm.tar.gz: mpack-install/LICENSE mpack-install/NOTICE mpack-install/DISCLAIMER repofiles traf-mpack/mpack.json
+$(SOURCEDIR)/ambari_rpm.tar.gz: mpack-install/LICENSE mpack-install/NOTICE repofiles traf-mpack/mpack.json
 	rm -rf $(RPMROOT)
 	mkdir -p $(SOURCEDIR)
 	tar czf $@ traf-mpack mpack-install
@@ -65,9 +65,6 @@
 mpack-install/NOTICE: ../../NOTICE
 	cp -f $? $@
 
-mpack-install/DISCLAIMER: ../../DISCLAIMER
-	cp -f $? $@
-
 rpmbuild: $(SOURCEDIR)/ambari_rpm.tar.gz
 	mkdir -p $(RPMDIR)
 	mkdir -p $(BUILDDIR)
@@ -89,7 +86,6 @@
 	rm -rf $(RPMROOT)
 	rm -rf mpack-install/LICENSE
 	rm -rf mpack-install/NOTICE
-	rm -rf mpack-install/DISCLAIMER
 	rm -rf traf-mpack/custom-services/TRAFODION/*/repos
 	rm -f traf-mpack/custom-services/TRAFODION/*/metainfo.xml
 	rm -rf traf-mpack/mpack.json
diff --git a/install/ambari-installer/traf_ambari.spec b/install/ambari-installer/traf_ambari.spec
index 1c1dcb9..e49b2e9 100644
--- a/install/ambari-installer/traf_ambari.spec
+++ b/install/ambari-installer/traf_ambari.spec
@@ -29,7 +29,7 @@
 Source0:        ambari_rpm.tar.gz
 BuildArch:	noarch
 BuildRoot:	%{_tmppath}/%{name}-%{version}-%{release}
-URL:            http://trafodion.incubator.apache.org
+URL:            http://trafodion.apache.org
 
 Requires: ambari-server
 
diff --git a/licenses/Makefile b/licenses/Makefile
index cc194b7..9004179 100644
--- a/licenses/Makefile
+++ b/licenses/Makefile
@@ -19,7 +19,7 @@
 #
 # @@@ END COPYRIGHT @@@
 
-all: LICENSE-src LICENSE-server LICENSE-pyinstall LICENSE-install LICENSE-clients NOTICE-server DISCLAIMER-server
+all: LICENSE-src LICENSE-server LICENSE-pyinstall LICENSE-install LICENSE-clients NOTICE-server 
 
 # All source code included in Trafodion source
 LICENSE-src:
@@ -33,9 +33,6 @@
 NOTICE-server:
 	cat ../NOTICE note-server-bin > $@
 
-DISCLAIMER-server:
-	cat ../DISCLAIMER > $@
-
 LICENSE-install:
 	cat Apache > $@
 
diff --git a/licenses/lic-server-src b/licenses/lic-server-src
index dbe56af..fe1cf85 100644
--- a/licenses/lic-server-src
+++ b/licenses/lic-server-src
@@ -4,8 +4,8 @@
 +++++++++++++++++++++++++++++
 
 BSD-3 clause for files:
-   incubator-trafodion/core/sql/common/swsprintf.cpp
-   incubator-trafodion/core/sql/common/swscanf.cpp
+   trafodion/core/sql/common/swsprintf.cpp
+   trafodion/core/sql/common/swscanf.cpp
 
 Copyright (c) 1990, 1993
    The Regents of the University of California.  All rights reserved.
@@ -39,7 +39,7 @@
 
 +++++++++++++++++++++++++++++
 
-BSD-3 clause for file incubator-trafodion/core/sql/parser/ulexer.h
+BSD-3 clause for file trafodion/core/sql/parser/ulexer.h
 
  Copyright (c) 1993 The Regents of the University of California.
 
@@ -59,7 +59,7 @@
 
 +++++++++++++++++++++++++++++
 
-MIT-Expat for files in: incubator-trafodion/core/sql/qmscommon/expat
+MIT-Expat for files in: trafodion/core/sql/qmscommon/expat
 
  Copyright (c) 1998, 1999, 2000 Thai Open Source Software Center Ltd
                                 and Clark Cooper
@@ -68,7 +68,7 @@
  Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Expat maintainers.
  Copyright 2000, Clark Cooper
 
- For details see incubator-trafodion/core/sql/qmscommon/expat/COPYING
+ For details see trafodion/core/sql/qmscommon/expat/COPYING
 
 +++++++++++++++++++++++++++++
 
@@ -79,18 +79,18 @@
 +++++++++++++++++++++++++++++
 
 BSD-3 clause for files in:  
-   incubator-trafodion/dcs/src/main/resources/dcs-webapps/master/datatables
+   trafodion/dcs/src/main/resources/dcs-webapps/master/datatables
 
  Copyright 2009 Allan Jardine. All Rights Reserved
  Copyright 2008-2012 Allan Jardine, all rights reserved.
 
 For details see:
-  incubator-trafodion/dcs/src/main/resources/dcs-webapps/master/datatables/license-bsd.txt
+  trafodion/dcs/src/main/resources/dcs-webapps/master/datatables/license-bsd.txt
 
 +++++++++++++++++++++++++++++
 
 MIT-Expat for files in: 
-   incubator-trafodion/dcs/src/main/resources/dcs-webapps/master/js/lib/jit.js
+   trafodion/dcs/src/main/resources/dcs-webapps/master/js/lib/jit.js
 
  Copyright (c) 2011 Sencha Inc. - Author: Nicolas Garcia Belmonte (http://philogb.github.com/)
  Copyright (c) 2006-2010 Valerio Proietti
@@ -116,8 +116,8 @@
 +++++++++++++++++++++++++++++
 
 MIT-Expat for files in:
-   incubator-trafodion/dcs/src/main/resources/dcs-webapps/master/jquery-ui
-   incubator-trafodion/dcs/src/main/resources/dcs-webapps/master/js/lib/jquery-1.11.0.js
+   trafodion/dcs/src/main/resources/dcs-webapps/master/jquery-ui
+   trafodion/dcs/src/main/resources/dcs-webapps/master/js/lib/jquery-1.11.0.js
 
  Copyright 2014 jQuery Foundation and other contributors
  (c) 2005, 2014 jQuery Foundation, Inc.
@@ -142,11 +142,11 @@
 ++++++++++++++++++++++++++++++
 
 The PostgreSQL Licence (PostgreSQL) for files in:
-	incubator-trafodion/core/sql/common/ComJSON.cpp
-	incubator-trafodion/core/sql/common/ComJSON.h
-	incubator-trafodion/core/sql/common/ComJSONFuncs.cpp
-	incubator-trafodion/core/sql/common/ComJSONStringInfo.cpp
-	incubator-trafodion/core/sql/common/ComJSONStringInfo.h
+	trafodion/core/sql/common/ComJSON.cpp
+	trafodion/core/sql/common/ComJSON.h
+	trafodion/core/sql/common/ComJSONFuncs.cpp
+	trafodion/core/sql/common/ComJSONStringInfo.cpp
+	trafodion/core/sql/common/ComJSONStringInfo.h
 	
 PostgreSQL License is a liberal Open Source license, similar to the BSD or MIT licenses.
 
@@ -167,4 +167,4 @@
 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED 
 HEREUNDER IS ON AN "AS IS" BASIS, AND $ORGANISATION HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, 
 SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
-++++++++++++++++++++++++++++++
\ No newline at end of file
+++++++++++++++++++++++++++++++
diff --git a/licenses/lic-test-src b/licenses/lic-test-src
index e60863d..a487c84 100644
--- a/licenses/lic-test-src
+++ b/licenses/lic-test-src
@@ -2,7 +2,7 @@
 Software used to test Apache Trafodion bundles source with BSD-like licenses:
 
 BSD-3 clause for files in:
-  incubator-trafodion/tests/phx/src/test/java/org/trafodion/phoenix/end2end
+  trafodion/tests/phx/src/test/java/org/trafodion/phoenix/end2end
 
 Apache Trafodion changed these files to work within its test environment
 
@@ -37,7 +37,7 @@
 Two pictures were donated for this purpose by Christophe LeRouzo.  He has
 formally given Apache Trafodion to permission to use these picturs.
 
- incubator-trafodion/core/sql/regress/executor/anoush.jpg
- incubator-trafodion/core/sql/regress/executor/deep.jpg
+ trafodion/core/sql/regress/executor/anoush.jpg
+ trafodion/core/sql/regress/executor/deep.jpg
 
 +++++++++++++++++++++++++++++
diff --git a/pom.xml b/pom.xml
index 4045b79..cff1a4e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -40,7 +40,7 @@
   <packaging>pom</packaging>
   <name>Apache Trafodion</name>
   <description>Trafodion is a webscale SQL-on-Hadoop solution enabling transactional or operational workloads on Hadoop.</description>
-  <url>http://trafodion.incubator.apache.org</url>
+  <url>http://trafodion.apache.org</url>
   <inceptionYear>2015</inceptionYear>
 
   <licenses>
@@ -63,9 +63,9 @@
   </issueManagement>
 
   <scm>
-    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</connection>
-    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/incubator-trafodion.git</developerConnection>
-    <url>https://git-wip-us.apache.org/repos/asf?p=incubator-trafodion.git</url>
+    <connection>scm:git:http://git-wip-us.apache.org/repos/asf/trafodion.git</connection>
+    <developerConnection>scm:git:https://git-wip-us.apache.org/repos/asf/trafodion.git</developerConnection>
+    <url>https://git-wip-us.apache.org/repos/asf?p=trafodion.git</url>
     <tag>HEAD</tag>
   </scm>
 
@@ -77,35 +77,35 @@
   <mailingLists>
     <mailingList>
       <name>User List</name>
-      <subscribe>user-subscribe@trafodion.incubator.apache.org</subscribe>
-      <unsubscribe>user-unsubscribe@trafodion.incubator.apache.org</unsubscribe>
-      <post>user@trafodion.incubator.apache.org</post>
-      <archive>http://mail-archives.apache.org/mod_mbox/incubator-trafodion-user/</archive>
+      <subscribe>user-subscribe@trafodion.apache.org</subscribe>
+      <unsubscribe>user-unsubscribe@trafodion.apache.org</unsubscribe>
+      <post>user@trafodion.apache.org</post>
+      <archive>http://mail-archives.apache.org/mod_mbox/trafodion-user/</archive>
     </mailingList>
     <mailingList>
       <name>Developer List</name>
-      <subscribe>dev-subscribe@trafodion.incubator.apache.org</subscribe>
-      <unsubscribe>dev-unsubscribe@trafodion.incubator.apache.org</unsubscribe>
-      <post>dev@trafodion.incubator.apache.org</post>
-      <archive>http://mail-archives.apache.org/mod_mbox/incubator-trafodion-dev/</archive>
+      <subscribe>dev-subscribe@trafodion.apache.org</subscribe>
+      <unsubscribe>dev-unsubscribe@trafodion.apache.org</unsubscribe>
+      <post>dev@trafodion.apache.org</post>
+      <archive>http://mail-archives.apache.org/mod_mbox/trafodion-dev/</archive>
     </mailingList>
     <mailingList>
       <name>GitHub Codereview List</name>
-      <subscribe>codereview-subscribe@trafodion.incubator.apache.org</subscribe>
-      <unsubscribe>codereview-unsubscribe@trafodion.incubator.apache.org</unsubscribe>
-      <archive>http://mail-archives.apache.org/mod_mbox/incubator-trafodion-codereview/</archive>
+      <subscribe>codereview-subscribe@trafodion.apache.org</subscribe>
+      <unsubscribe>codereview-unsubscribe@trafodion.apache.org</unsubscribe>
+      <archive>http://mail-archives.apache.org/mod_mbox/trafodion-codereview/</archive>
     </mailingList>
     <mailingList>
       <name>Commits List</name>
-      <subscribe>commits-subscribe@trafodion.incubator.apache.org</subscribe>
-      <unsubscribe>commits-unsubscribe@trafodion.incubator.apache.org</unsubscribe>
-      <archive>http://mail-archives.apache.org/mod_mbox/incubator-trafodion-commits/</archive>
+      <subscribe>commits-subscribe@trafodion.apache.org</subscribe>
+      <unsubscribe>commits-unsubscribe@trafodion.apache.org</unsubscribe>
+      <archive>http://mail-archives.apache.org/mod_mbox/trafodion-commits/</archive>
     </mailingList>
     <mailingList>
       <name>Issues List</name>
-      <subscribe>issues-subscribe@trafodion.incubator.apache.org</subscribe>
-      <unsubscribe>issues-unsubscribe@trafodion.incubator.apache.org</unsubscribe>
-      <archive>http://mail-archives.apache.org/mod_mbox/incubator-trafodion-issues/</archive>
+      <subscribe>issues-subscribe@trafodion.apache.org</subscribe>
+      <unsubscribe>issues-unsubscribe@trafodion.apache.org</unsubscribe>
+      <archive>http://mail-archives.apache.org/mod_mbox/trafodion-issues/</archive>
     </mailingList>
   </mailingLists>
 
@@ -113,7 +113,7 @@
     <developer>
       <id>TrafodionDeveloper</id>
       <name>See list of Trafodion developers</name>
-      <email>dev@trafodion.incubator.apache.org</email>
+      <email>dev@trafodion.apache.org</email>
       <url>https://cwiki.apache.org/confluence/display/TRAFODION/Contributors</url>
     </developer>
   </developers>
@@ -121,7 +121,7 @@
   <contributors>
     <contributor>
       <name>See list of Trafodion contributors</name>
-      <email>dev@trafodion.incubator.apache.org</email>
+      <email>dev@trafodion.apache.org</email>
       <url>https://cwiki.apache.org/confluence/display/TRAFODION/Contributors</url>
     </contributor>
   </contributors>
@@ -281,9 +281,9 @@
 
   <distributionManagement>
     <site>
-      <id>trafodion.incubator.apache.org</id>
-      <name>Trafodion Website at incubator.apache.org</name>
-      <!-- On why this is the tmp dir and not trafodion.incubator.apache.org, see
+      <id>trafodion.apache.org</id>
+      <name>Trafodion Website at apache.org</name>
+      <!-- On why this is the tmp dir and not trafodion.apache.org, see
       https://issues.apache.org/jira/browse/HBASE-7593?focusedCommentId=13555866&page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-13555866
       -->
       <url>file:///tmp</url>
diff --git a/tools/docker/Dockerfile b/tools/docker/Dockerfile
index be75b9b..c849072 100644
--- a/tools/docker/Dockerfile
+++ b/tools/docker/Dockerfile
@@ -15,9 +15,9 @@
 # limitations under the License.
 
 FROM centos:centos6.6
-MAINTAINER Trafodion Community <dev@trafodion.incubator.apache.org>
+MAINTAINER Trafodion Community <dev@trafodion.apache.org>
 
-LABEL Vendor="Apache Trafodion (incubating)"
+LABEL Vendor="Apache Trafodion"
 LABEL version=unstable
 
 # download and install environment dependencies
diff --git a/tools/docker/start-compile-docker.sh b/tools/docker/start-compile-docker.sh
index f9697ec..74e3661 100755
--- a/tools/docker/start-compile-docker.sh
+++ b/tools/docker/start-compile-docker.sh
@@ -42,7 +42,7 @@
 RUN cd /home/${USER_NAME} \
  && mkdir download \
  && mkdir trafodion-build-tools \
- && wget https://raw.githubusercontent.com/apache/incubator-trafodion/master/install/traf_tools_setup.sh \
+ && wget https://raw.githubusercontent.com/apache/trafodion/master/install/traf_tools_setup.sh \
  && chmod +x traf_tools_setup.sh \
  && ./traf_tools_setup.sh -d ~/download -i ~/trafodion-build-tools \
  && rm -fr ./download \
@@ -56,9 +56,9 @@
 
 docker run -i -t \
   --rm=true \
-  -w "/home/${USER_NAME}/incubator-trafodion" \
+  -w "/home/${USER_NAME}/trafodion" \
   -u "${USER_NAME}" \
-  -v "$PWD:/home/${USER_NAME}/incubator-trafodion" \
+  -v "$PWD:/home/${USER_NAME}/trafodion" \
   -v "$HOME/.m2:/home/${USER_NAME}/.m2" \
   --name TrafodionEnv \
   ${IMAGE_NAME}-${USER_NAME} \
diff --git a/win-odbc64/odbcclient/drvr35/cdesc.cpp b/win-odbc64/odbcclient/drvr35/cdesc.cpp
index a8b2d94..73b6326 100644
--- a/win-odbc64/odbcclient/drvr35/cdesc.cpp
+++ b/win-odbc64/odbcclient/drvr35/cdesc.cpp
@@ -2299,7 +2299,8 @@
 						pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL, RowNumber, ParamNumber);

 					else

 					{

-						sprintf((char *)errorMsg," Incorrect Format or Data.", RowNumber, ParamNumber);

+						sprintf((char *)errorMsg," Incorrect Format or Data [RowNumber: %d, ParamNumber:%d].",

+                                                        RowNumber, ParamNumber);

 						pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL,

 								RowNumber, ParamNumber);

 					}

diff --git a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
index 2b1a39f..a4f53f3 100644
--- a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
@@ -2170,7 +2170,7 @@
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,

 								cTmpFraction);

 				else

-					DataLen = sprintf(cTmpBuf,"%02d",

+					DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",

 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);

 				break;

 			case SQLDTCODE_YEAR_TO_HOUR:

@@ -2717,7 +2717,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)

 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);

 				else

-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

+					sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

 				break;

 			case SQL_INTERVAL_MINUTE:

 				if (intervalTmp->interval_sign == SQL_TRUE)

@@ -2779,7 +2779,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)

 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);

 				else

-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

+					sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

 				break;

 			case SQL_INTERVAL_MINUTE:

 				if (intervalTmp->interval_sign == SQL_TRUE)

diff --git a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
index 918445a..e597135 100755
--- a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
@@ -36,6 +36,36 @@
 

 #define MAXCHARLEN 32768 //32K

 

+// for server2008 when using function pow() then throws STATUS_ILLEGAL_INSTRUCTION

+double pow(int base, short power, unsigned long *error)

+{

+	DWORD dwVersion = 0;

+	DWORD dwBuild = 0;

+

+	dwVersion = GetVersion();

+

+	// Get the build number.

+

+	if (dwVersion < 0x80000000)

+		dwBuild = (DWORD)(HIWORD(dwVersion));

+

+	double retValue = 1;

+	if (dwBuild == 7600)

+	{

+		for (int i = 0; i < power; i++)

+			retValue = retValue * 10;

+	}

+	else

+	{

+		errno = 0;

+		retValue = pow((double)base, power);

+		if (errno == ERANGE || retValue == 0)

+			*error = IDS_22_003;

+	}

+

+	return retValue;

+}

+

 extern short convDoItMxcs(char * source,

 						  long sourceLen,

 						  short sourceType,

@@ -1277,22 +1307,30 @@
 			case SQLTYPECODE_SMALLINT:

 				dTmp = *((SHORT *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_SMALLINT_UNSIGNED:

 				dTmp = *((USHORT *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_INTEGER:

 				dTmp = *((LONG *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_INTEGER_UNSIGNED:

 				dTmp = *((ULONG *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_LARGEINT:

 				tempVal64 = *((__int64 *)srcDataPtr);

@@ -1595,6 +1633,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(DATE_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE, 

@@ -1845,6 +1884,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(TIME_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME, 

@@ -2066,6 +2106,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(TIMESTAMP_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP, 

@@ -2278,7 +2319,9 @@
 				if (srcPrecision > 0)

 				{

 					// SQL returns fraction of a second which has to be converted to nano seconds

-					dTmp = (*(UDWORD*)SQLTimestamp->fraction *  1000000000.0) / pow(10,srcPrecision);

+					dTmp = (*(UDWORD*)SQLTimestamp->fraction *  1000000000.0) / pow(10,srcPrecision,&retCode);

+					if (retCode == IDS_22_003)

+						return retCode;

 					ulFraction = dTmp;

 				}

 				else

@@ -2804,7 +2847,9 @@
 				if (srcPrecision > 0)

 				{

 					// SQL returns fraction of a second which has to be converted to nano seconds

-					dTmp = (*(UDWORD*)SQLTimestamp->fraction *  1000000000.0) / pow(10,srcPrecision);

+					dTmp = (*(UDWORD*)SQLTimestamp->fraction *  1000000000.0) / pow(10,srcPrecision,&retCode);

+					if (retCode == IDS_22_003)

+						return retCode;

 					ulFraction = dTmp;

 

 				}

@@ -3298,22 +3343,30 @@
 			case SQLTYPECODE_SMALLINT:

 				dTmp = *((SHORT *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_SMALLINT_UNSIGNED:

 				dTmp = *((USHORT *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_INTEGER:

 				dTmp = *((LONG *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_INTEGER_UNSIGNED:

 				dTmp = *((ULONG *)srcDataPtr);

 				if (srcScale > 0)

-					dTmp = dTmp / (long)pow(10,srcScale);

+					dTmp = dTmp / (long)pow(10,srcScale,&retCode);

+				if (retCode == IDS_22_003)

+					return retCode;

 				break;

 			case SQLTYPECODE_LARGEINT:

 				tempVal64 = *((__int64 *)srcDataPtr);

@@ -3661,7 +3714,7 @@
 SQLRETURN ODBC::ConvertNumericToChar(SQLSMALLINT SQLDataType, SQLPOINTER srcDataPtr, SQLSMALLINT srcScale, 

 			char *cTmpBuf, SQLINTEGER &DecimalPoint)

 {

-	

+	unsigned long retCode = SQL_SUCCESS;

 	long lTmp;

 	ldiv_t lDiv;

 	__int64 i64Tmp;

@@ -3672,7 +3725,9 @@
 			lTmp = *((short *)srcDataPtr);

 			if (srcScale > 0)

 			{

-				lDiv = ldiv(lTmp, (long)pow(10,srcScale));

+				lDiv = ldiv(lTmp, (long)pow(10,srcScale,&retCode));

+				if (retCode == IDS_22_003)

+					return retCode;

 				if (gDrvrGlobal.gSpecial_1 && lDiv.quot == 0)

 				{

 					if (lDiv.rem < 0)

@@ -3695,8 +3750,9 @@
 			lTmp = *((unsigned short *)srcDataPtr);

 			if (srcScale > 0)

 			{

-				lDiv = ldiv(lTmp, (long)pow(10,srcScale));

-				lDiv = ldiv(lTmp, (long)pow(10,srcScale));

+				lDiv = ldiv(lTmp, (long)pow(10,srcScale,&retCode));

+				if (retCode == IDS_22_003)

+					return retCode;

 				if (gDrvrGlobal.gSpecial_1 && lDiv.quot == 0)

 					sprintf(cTmpBuf, ".%0*ld", srcScale, abs(lDiv.rem));

 				else

@@ -3709,7 +3765,9 @@
 			lTmp = *((long *)srcDataPtr);

 			if (srcScale > 0)

 			{

-				lDiv = ldiv(lTmp, (long)pow(10,srcScale));

+				lDiv = ldiv(lTmp, (long)pow(10,srcScale,&retCode));

+				if (retCode == IDS_22_003)

+					return retCode;

 				if (gDrvrGlobal.gSpecial_1 && lDiv.quot == 0)

 				{

 					if (lDiv.rem < 0)

@@ -3779,13 +3837,13 @@
 				sprintf(cTmpBuf, "%I64d", i64Tmp);

 			break;

 		default:

-			return SQL_ERROR;

+			retCode = IDS_07_006;

 	}

 	if ((tmpPtr = strchr(cTmpBuf, '.')) != NULL)

 		DecimalPoint = tmpPtr - cTmpBuf;

 	else

 		DecimalPoint = 0;

-	return SQL_SUCCESS;

+	return retCode;

 }

 

 SQLRETURN ODBC::ConvertDecimalToChar(SQLSMALLINT SQLDataType, SQLPOINTER srcDataPtr, SQLINTEGER srcLength, 

@@ -3898,6 +3956,7 @@
 SQLRETURN ODBC::ConvertSoftDecimalToDouble(SQLSMALLINT SQLDataType, SQLPOINTER srcDataPtr, SQLINTEGER srcLength, 

 								SQLSMALLINT srcScale, double &dTmp)

 {

+	unsigned long retCode = SQL_SUCCESS;

 	char *stopStr;

 	char cTmpBuf[256];

 	double dTmp1;

@@ -3911,7 +3970,9 @@
 			for (i = 1; i < srcLength ; cTmpBuf[i++] += '0');

 			cTmpBuf[srcLength] =  '\0';

 			dTmp = strtod(cTmpBuf,&stopStr);

-			dTmp1 = pow(10, srcScale);

+			dTmp1 = pow(10, srcScale, &retCode);

+			if (retCode == IDS_22_003)

+				return retCode;

 			dTmp = dTmp / dTmp1;

 			break;

 		case SQLTYPECODE_DECIMAL_LARGE_UNSIGNED:

@@ -3920,13 +3981,15 @@
 			for (i = 0; i < srcLength ; cTmpBuf[i++] += '0');

 			cTmpBuf[srcLength] =  '\0';

 			dTmp = strtod(cTmpBuf,&stopStr);

-			dTmp1 = pow(10, srcScale);

+			dTmp1 = pow(10, srcScale, &retCode);

+			if (retCode == IDS_22_003)

+				return retCode;

 			dTmp = dTmp / dTmp1;

 			break;

 		default:

-			return SQL_ERROR;

+			retCode = IDS_07_006;

 	}

-	return SQL_SUCCESS;

+	return retCode;

 }

 

 unsigned long ODBC::ConvertSQLCharToNumeric(SQLPOINTER srcDataPtr, SQLINTEGER srcLength,

@@ -3987,7 +4050,8 @@
 						SQLINTEGER	srcLength,

 						SQLSMALLINT CDataType,

 						SQLPOINTER outValue)

-{						  

+{

+	unsigned long retCode = SQL_SUCCESS;

     char    in_value[50];

     short   datetime_parts[8];

     char    *token;

@@ -4058,7 +4122,9 @@
 	if (token != NULL)

     {

 		int exponent = 9 - strlen(token);

-		fraction_part = (exponent >= 0)? atol(token) * pow((double)10,exponent):atol(token) / pow((double)10,-exponent) ;

+		fraction_part = (exponent >= 0)? atol(token) * pow((double)10,exponent,&retCode):atol(token) / pow((double)10,-exponent,&retCode);

+		if (retCode == IDS_22_003)

+			return retCode;

 		datetime_parts[6] = (short)(fraction_part / 1000);

 		datetime_parts[7] = (short)(fraction_part % 1000);

     }