Merge [TRAFODION-2913] PR 1403 Tweak some MDAM-related heuristics
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
index 876e831..ce7c980 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
@@ -541,7 +541,7 @@
 			stmtLabel = stmtLabel.toUpperCase();
 		}
 
-		TrafT4CallableStatement stmt;
+		TrafT4CallableStatement stmt = null;
 
 		clearWarnings();
 		if (_isClosed() == true) {
@@ -557,8 +557,11 @@
 					return stmt;
 				}
 			}
-
-			stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+            if (stmtLabel.equalsIgnoreCase("null")) {
+                stmt = new TrafT4CallableStatement(this, sql);
+            } else {
+                stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+            }
 			stmt.prepareCall(stmt.sql_, stmt.queryTimeout_, stmt.resultSetHoldability_);
 
 			if (isStatementCachingEnabled()) {
diff --git a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
index 9b5e5e0..c716998 100644
--- a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
+++ b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
@@ -647,7 +647,7 @@
 	pBuffer += length;
 	if (sqlWarningOrErrorLength > 0) pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-	tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+	tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
 	pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
 	if (outValuesLength > 0) pBuffer = printHex(pBuffer, outValues, outValuesLength);
 
@@ -1243,7 +1243,7 @@
   if (sqlWarningOrErrorLength > 0) 
     pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
   pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
   if (outValuesLength > 0)
      pBuffer = printHex(pBuffer, outValues, outValuesLength);
@@ -1318,7 +1318,7 @@
   if (sqlWarningOrErrorLength > 0) 
     pBuffer  = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
 
-  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+  tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
   pBuffer    = writeTraceMsg(pBuffer, temp, tempStrlen);
   if (outValuesLength > 0)
      pBuffer = printHex(pBuffer, outValues, outValuesLength);
diff --git a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
index a0aaf26..3e15202 100644
--- a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
@@ -6286,7 +6286,7 @@
 				ControlQueryLen = ControlQueryLen + 4;
 				break;
 			case 6:
-				sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from  %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, verBuffer, verBuffer, atol(verBuffer), schemaToken, tableName);
+				sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from  %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName);
 				strcpy(HashTableInfo+ControlQueryLen, ";HE="); // HE means Guesstimated rowset size. Change 128 to HP soon.
 				ControlQueryLen = ControlQueryLen + 4;
 				break;
diff --git a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
index ccbb689..465a224 100644
--- a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
@@ -972,7 +972,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -999,7 +999,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -1265,7 +1265,7 @@
 			{
 				char RGWarningOrError[256];
 
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 			}
@@ -1294,7 +1294,7 @@
 			{
 				char *RGWarningOrError;
 				RGWarningOrError = new char[256];
-				sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+				sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 				sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
 				GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
 				delete RGWarningOrError;
@@ -2233,7 +2233,7 @@
 		{
 			char RGWarningOrError[256];
 
-			sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+			sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 			sprintf( RGWarningOrError
                   , "The query's estimated cost: %.50s exceeded resource management attribute limit set."
                   , b
@@ -2260,7 +2260,7 @@
 			char *RGWarningOrError;
 
 			RGWarningOrError = new char[256];
-			sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+			sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
 			sprintf( RGWarningOrError
                   , "The query's estimated cost: %.50s exceeded resource management attribute limit set."
                   , b
@@ -5078,7 +5078,7 @@
                     "cast('%s' as varchar(128)) TABLE_CAT, "
                     "cast(trim(ob_table.SCHEMA_NAME) as varchar(128)) TABLE_SCHEM, "
                     "cast(trim(ob_table.OBJECT_NAME) as varchar(128)) TABLE_NAME, "
-                    "cast(idx.is_unique as smallint) NON_UNIQUE, "
+                    "cast(case when idx.is_unique = 1 then 0 else 1 end as smallint) NON_UNIQUE, "
                     "cast(NULL as varchar(128)) INDEX_QUALIFIER, " // not support
                     "cast(trim(ob.OBJECT_NAME) as varchar(128)) INDEX_NAME, "
                     "cast(3 as smallint) TYPE, " // SQL_INDEX_OTHER
diff --git a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
index c3bb9f3..b6240d3 100644
--- a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
+++ b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
@@ -661,6 +661,7 @@
       
       try
       {
+         // here if stmtName = null, then it will pass a "null" string, this will skip driver's conditional judgment (if (stmtName ==null) throw exception).
          cStmt = ((TrafT4Connection)conn).prepareCall(queryStr,"\"" + stmtName + "\"");
       } catch (NoSuchMethodError nsme)
       {
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
index bab28c5..c40fdd6 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
@@ -2307,7 +2307,7 @@
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,
 								cTmpFraction);
 				else
-					DataLen = sprintf(cTmpBuf,"%02d",
+					DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);
 				break;
 			case SQLDTCODE_YEAR_TO_HOUR:
@@ -2861,7 +2861,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)
 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
 				else
-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+					sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
 				break;
 			case SQL_INTERVAL_MINUTE:
 				if (intervalTmp->interval_sign == SQL_TRUE)
@@ -2923,7 +2923,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)
 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
 				else
-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+					sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
 				break;
 			case SQL_INTERVAL_MINUTE:
 				if (intervalTmp->interval_sign == SQL_TRUE)
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
index a300bbd..b2c0091 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
@@ -1684,6 +1684,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(DATE_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE, 
@@ -1930,6 +1931,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(TIME_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME, 
@@ -2149,6 +2151,7 @@
 								return IDS_22_003;
 						}
 					}
+                    DataLen = sizeof(TIMESTAMP_STRUCT);
 				}
 			}
 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP, 
diff --git a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
index d36f665..dd49e21 100755
--- a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
+++ b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
@@ -119,6 +119,15 @@
   esac
 done
 
+#check the HBase compatiblity if TrafExportSnapshot is able to be used
+java org.trafodion.utility.backuprestore.TrafExportSnapshot -t
+if [[ $? -ne 0 ]]; then
+  echo 'not able to use TrafExportSnapshot'
+  mr_limit=0
+else
+  echo 'able to use TrafExportSnapshot'
+fi
+
 echo "logging output to: ${log_file}"
 
 #create tmp and log folders if they don't exist
diff --git a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
index 3abf8ac..e38c0f1 100644
--- a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
+++ b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
@@ -1103,9 +1103,22 @@
   static int innerMain(final Configuration conf, final String [] args) throws Exception {
     return ToolRunner.run(conf, new TrafExportSnapshot(), args);
   }
-
+  private void testMobAvail()  {
+      MobUtils.getMobHome(HBaseConfiguration.create());
+  }
   public static void main(String[] args) throws Exception {
 	  LOG.info("Trafodion Export Snapshot Utility");
+    if(args.length == 1)
+    {
+      String cmd = args[0];
+      if (cmd.equals("-t") || cmd.equals("--test")) {
+        //test if MobUtils is aviable in this system
+        //doing something meanless
+        TrafExportSnapshot ti = new TrafExportSnapshot(); 
+        ti.testMobAvail();
+        System.exit(0); //normal
+      }
+    } 
     System.exit(innerMain(HBaseConfiguration.create(), args));
   }
 }
diff --git a/core/sqf/src/seabed/src/sqstatepi.cpp b/core/sqf/src/seabed/src/sqstatepi.cpp
index 383a13a..4f1a267 100644
--- a/core/sqf/src/seabed/src/sqstatepi.cpp
+++ b/core/sqf/src/seabed/src/sqstatepi.cpp
@@ -289,18 +289,23 @@
     lp_prog = basename(pp_proc->program);
 #ifdef SQ_PHANDLE_VERIFIER
     sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
-    sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
             pp_proc->process_name,
             pp_proc->type,
             lp_proc_t,
             pp_proc->nid,
             pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
             pp_proc->verifier,
-#endif
             lp_prog);
+#else
+    sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+            pp_proc->process_name,
+            pp_proc->type,
+            lp_proc_t,
+            pp_proc->nid,
+            pp_proc->pid,
+            lp_prog);
+#endif
+
     if (pv_str)
         lp_op = "sb_ic_get_openers";
     else
@@ -339,18 +344,22 @@
     lp_prog = basename(pp_proc->program);
 #ifdef SQ_PHANDLE_VERIFIER
     sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
-    sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
             pp_proc->process_name,
             pp_proc->type,
             lp_proc_t,
             pp_proc->nid,
             pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
             pp_proc->verifier,
-#endif
             lp_prog);
+#else
+    sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+            pp_proc->process_name,
+            pp_proc->type,
+            lp_proc_t,
+            pp_proc->nid,
+            pp_proc->pid,
+            lp_prog);
+#endif
     if (pv_str)
         lp_op = "sb_ic_get_opens";
     else
diff --git a/core/sql/generator/GenRelMisc.cpp b/core/sql/generator/GenRelMisc.cpp
index d2a0c24..737bf4e 100644
--- a/core/sql/generator/GenRelMisc.cpp
+++ b/core/sql/generator/GenRelMisc.cpp
@@ -4014,7 +4014,16 @@
 	    tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
             setInUpdateOrInsert(bindWA, NULL);
 	    childNode = tmpAssign->getSource().getItemExpr();
-
+            //don't allow LOB insert in a tuple list
+            if (childNode->getOperatorType() == ITM_LOBINSERT)
+              {                                                          
+                // cannot have this function in a values list with
+                // multiple tuples. Use a single tuple.
+                *CmpCommon::diags() << DgSqlCode(-4483);
+                GenExit();
+                return -1;
+                        
+              }
             castNode->child(0) = childNode;
           }
           else
diff --git a/core/sql/langman/Measure.cpp b/core/sql/langman/Measure.cpp
index 9dfe206..3d57f31 100644
--- a/core/sql/langman/Measure.cpp
+++ b/core/sql/langman/Measure.cpp
@@ -57,7 +57,7 @@
   char fileName[256];
   char *logFileEnv = 0;
   if((logFileEnv = getenv("MEASURE_LOG_FILE")) != NULL){
-    sprintf(fileName, "%s.%d", pid);
+    sprintf(fileName, "%s.%d", logFileEnv, pid);
     logFile = fopen(fileName, "w+t");
     if(logFile == 0){
       logFile = stderr;
diff --git a/core/sql/optimizer/MJVIndexBuilder.cpp b/core/sql/optimizer/MJVIndexBuilder.cpp
index 3fe08fc..99ba778 100644
--- a/core/sql/optimizer/MJVIndexBuilder.cpp
+++ b/core/sql/optimizer/MJVIndexBuilder.cpp
@@ -534,7 +534,7 @@
   for (size_t i = 0; i < size; i++) {
     at(i)->out();
     if (i < size-1) {
-      printf(" --> ", i);
+      printf(" --> ");
     }
   }
 };
@@ -853,7 +853,7 @@
 void PrintColIndList (const IndexList& toPrint, char* name)
 {
   printf ("\n%s:\n",name);
-  printf ("--------------------------\n",name);
+  printf ("--------------------------\n");
   for (size_t i = 0; i < toPrint.entries(); i++) 
   {
     ColIndList currList = toPrint.at(i);	
diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
index 1cab0b0..2a4163b 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
@@ -87,7 +87,7 @@
 		IndexInfo[] expIndexInfo = {

 				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C1", 0, 3, (short)0, (String)null),

 				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C2", 0, 3, (short)0, (String)null),

-				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)

+				new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, true, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)

 		};

 		

 		try {

diff --git a/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc b/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
index 91ff943..4ec556d 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/olap_functions.adoc
@@ -260,7 +260,7 @@
 

 * `_inline-window-specification_`

 +

-specifies_the_window_over_which_the_avg_is_computed. The

+specifies the window over which the AVG is computed. The

 _inline-window-specification_ can contain an optional partition by

 clause, an optional ORDER BY clause and an optional window frame clause.

 The PARTITION BY clause specifies how the intermediate result is

@@ -483,7 +483,7 @@
 [[first_value_window_function]]

 == FIRST_VALUE Window Function

 

-The FIRST_VALUE window function returns the first value from a sorted partition.
+The FIRST_VALUE window function returns the first value from a sorted partition.

 

 `FIRST_VALUE (expression)`

 

diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
index 364211a..0d5c3c7 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
@@ -1492,7 +1492,7 @@
 
 <<<
 [[concat_function]]
-=== CONCAT Function
+== CONCAT Function
 
 The CONCAT function returns the concatenation of two character value
 expressions as a character string value. You can also use the
@@ -1534,7 +1534,7 @@
 === Considerations for CONCAT
 
 [[operands]]
-=== Operands
+==== Operands
 
 
 A string value can be specified by any character value expression, such
@@ -1547,7 +1547,7 @@
 
 
 [[sql-parameters]]
-=== SQL Parameters
+==== SQL Parameters
 
 You can concatenate an SQL parameter and a character value expression.
 The concatenated parameter takes on the data type attributes of the
@@ -6338,6 +6338,302 @@
 ```
 
 <<<
+[[rollup_function]]
+== ROLLUP Function
+
+The ROLLUP function calculates multiple levels of subtotals aggregating from right to left through the comma-separated list of columns, and provides a grand total. 
+
+ROLLUP is an extension to the 'GROUP BY' clause. Related features such as the GROUPING function can be used with 'ORDER BY' to control the placement of summary results.
+
+```
+SELECT…GROUP BY ROLLUP (column 1, [column 2,]…[column n])
+```
+
+ROLLUP generates n+1 levels of subtotals, including a grand total, where n is the number of the selected column(s).
+
+For example, a query that contains three rollup columns returns the following rows:
+
+* First-level: the usual aggregate values as calculated by GROUP BY clause without using ROLLUP.
+* Second-level: subtotals aggregating across column 3 for each combination of column 1 and column 2.
+* Third-level: subtotals aggregating across column 2 and column 3 for each column 1.
+* Fourth-level: the grand total row.
+
+NOTE: Trafodion does not support CUBE function which works slightly differently from ROLLUP.
+
+[[considerations_for_rollup]]
+=== Considerations for ROLLUP
+
+[[null_in_result_sets]]
+==== NULL in Result Sets
+
+* In super-aggregate rows representing subtotals or the grand total, lower level grouping columns are replaced by NULLs.
+* The NULLs in selected columns are considered equal and sorted into one NULL group in result sets.
+
+[[using_rollup_with_the_column_order_reversed]]
+==== Using ROLLUP with the Column Order Reversed
+
+ROLLUP removes the right-most column at each step, therefore the result sets vary with the column order specified in the comma-separated list. 
+
+[cols="50%,50%"]
+|===
+| If the column order is _country_, _state_, _city_ and _name_, ROLLUP returns following groupings. 
+| If the column order is _name_, _city_, _state_ and _country_, ROLLUP returns following groupings.
+| _country_, _state_, _city_ and _name_      | _name_, _city_, _state_ and _country_
+| _country_, _state_ and _city_              | _name_, _city_ and _state_
+| _country_ and _state_                      | _name_ and _city_
+| _country_                                  | _name_
+| grand total                                | grand total
+|===
+
+[[examples_of_rollup]]
+=== Examples of ROLLUP
+
+[[examples_of_grouping_by_one_or_multiple_rollup_columns]]
+==== Examples of Grouping By One or Multiple Rollup Columns
+
+Suppose that we have a _sales1_ table like this:
+
+```
+SELECT * FROM sales1;
+
+DELIVERY_YEAR REGION PRODUCT                          REVENUE    
+------------- ------ -------------------------------- -----------
+         2016 A      Dress                                    100
+         2016 A      Dress                                    200
+         2016 A      Pullover                                 300
+         2016 B      Dress                                    400
+         2017 A      Pullover                                 500
+         2017 B      Dress                                    600
+         2017 B      Pullover                                 700
+         2017 B      Pullover                                 800
+
+--- 8 row(s) selected.
+```
+
+* This is an example of grouping by one rollup column.
++
+```
+SELECT delivery_year, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year);
+```
+
++
+```
+DELIVERY_YEAR TOTAL_REVENUE       
+------------- --------------------
+         2016                 1000
+         2017                 2600
+         NULL                 3600
+
+--- 3 row(s) selected.
+```
+
+* This is an example of grouping by two rollup columns.
++ 
+ROLLUP firstly aggregates at the lowest level (_region_) and then rolls up those aggregations to the next
+level (_delivery_year_), finally it produces a grand total across these two levels.
+
++
+```
+SELECT delivery_year, region, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region);
+```
+
++
+```
+DELIVERY_YEAR REGION TOTAL_REVENUE       
+------------- ------ --------------------
+         2016 A                       600
+         2016 B                       400
+         2016 NULL                   1000
+         2017 A                       500
+         2017 B                      2100
+         2017 NULL                   2600
+         NULL NULL                   3600
+
+--- 7 row(s) selected.
+```
++
+
+* This is an example of grouping by three rollup columns.
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue 
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+.Grouping By Three Rollup Columns
+image::grouping-by-three-rollup-columns.jpg[700,700]
+
++
+** First-level: the rows marked in *blue* are the total revenue for each year (_2016_ and _2017_), each region (_A_ and _B_) and each product (_Dress_ and _Pullover_), they are caculated by GROUP BY instead of ROLLUP.
+
++
+** Second-level: the rows marked in *red* provide the total revenue for the given _delivery_year_ and _region_ by _product_.
++ 
+These rows have the _product_ columns set to NULL.
+
++
+** Third-level: the rows marked in *yellow* show the total revenue in each year (_2016_ and _2017_).
++ 
+These rows have the _region_ and _product_ columns set to NULL.
+
++
+** Fourth-level: the row marked in *purple* aggregates over all rows in the _delivery_year_, _region_ and _product_ columns.
++ 
+This row has the _delivery_year_, _region_ and _product_ columns set to NULL. 
+ 
+[[examples_of_null]]
+=== Examples of NULL
+
+The example below demonstrates how ROLLUP treats NULLs in the selected columns and generates NULLs for super-aggregate rows.
+
+Suppose that we have a _sales2_ table like this:
+
+```
+SELECT * FROM sales2;
+
+DELIVERY_YEAR REGION PRODUCT                          REVENUE    
+------------- ------ -------------------------------- -----------
+         NULL A      Dress                                    100
+         NULL A      Dress                                    200
+         2016 A      Pullover                                 300
+         2016 B      Dress                                    400
+         2017 A      Pullover                                 500
+         2017 B      Dress                                    600
+         NULL B      Pullover                                 700
+         NULL B      Pullover                                 800
+
+--- 8 row(s) selected.
+```
+
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue 
+FROM sales2
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
+```
+DELIVERY_YEAR REGION PRODUCT                          TOTAL_REVENUE       
+------------- ------ -------------------------------- --------------------
+         2016 A      Pullover                                          300
+         2016 A      NULL                                              300
+         2016 B      Dress                                             400
+         2016 B      NULL                                              400
+         2016 NULL   NULL                                              700
+         2017 A      Pullover                                          500
+         2017 A      NULL                                              500
+         2017 B      Dress                                             600
+         2017 B      NULL                                              600
+         2017 NULL   NULL                                             1100
+         NULL A      Dress                                             300
+         NULL A      NULL                                              300
+         NULL B      Pullover                                         1500
+         NULL B      NULL                                             1500
+         NULL NULL   NULL                                             1800
+         NULL NULL   NULL                                             3600
+
+--- 16 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_the_column_order_reversed]]
+==== Examples of Using ROLLUP with the Column Order Reversed
+
+Suppose that we have the same _sales1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+* The column order of the example below is _delivery_year_, _region_ and _product_.
+
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+```
+DELIVERY_YEAR REGION PRODUCT                          TOTAL_REVENUE       
+------------- ------ -------------------------------- --------------------
+         2016 A      Dress                                             300
+         2016 A      Pullover                                          300
+         2016 A      NULL                                              600
+         2016 B      Dress                                             400
+         2016 B      NULL                                              400
+         2016 NULL   NULL                                             1000
+         2017 A      Pullover                                          500
+         2017 A      NULL                                              500
+         2017 B      Dress                                             600
+         2017 B      Pullover                                         1500
+         2017 B      NULL                                             2100
+         2017 NULL   NULL                                             2600
+         NULL NULL   NULL                                             3600
+
+--- 13 row(s) selected.
+```
+
+* The column order of the example below is _product_, _region_ and _delivery_year_, the output is different than the result sets above. 
+
++
+```
+SELECT product, region, delivery_year, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (product, region, delivery_year);
+```
+
++
+```
+PRODUCT                          REGION DELIVERY_YEAR TOTAL_REVENUE       
+-------------------------------- ------ ------------- --------------------
+Dress                            A               2016                  300
+Dress                            A               NULL                  300
+Dress                            B               2016                  400
+Dress                            B               2017                  600
+Dress                            B               NULL                 1000
+Dress                            NULL            NULL                 1300
+Pullover                         A               2016                  300
+Pullover                         A               2017                  500
+Pullover                         A               NULL                  800
+Pullover                         B               2017                 1500
+Pullover                         B               NULL                 1500
+Pullover                         NULL            NULL                 2300
+NULL                             NULL            NULL                 3600
+
+--- 13 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_order_by]]
+==== Examples of Using ROLLUP with ORDER BY
+
+Suppose that we have the same _sale1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+This example uses ROLLUP with the ORDER BY clause to sort the results.
+
+```
+SELECT delivery_year, product, SUM (revenue) AS total_revenue 
+FROM sales1 
+GROUP BY ROLLUP (delivery_year, product)
+ORDER BY total_revenue;
+```
+
+```
+DELIVERY_YEAR PRODUCT                          TOTAL_REVENUE       
+------------- -------------------------------- --------------------
+         2016 Pullover                                          300
+         2017 Dress                                             600
+         2016 Dress                                             700
+         2016 NULL                                             1000
+         2017 Pullover                                         2000
+         2017 NULL                                             2600
+         NULL NULL                                             3600
+
+--- 7 row(s) selected.
+```
+
+<<<
 [[round_function]]
 == ROUND Function
 
diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
index d7f1266..8f3408e 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_utilities.adoc
@@ -784,7 +784,7 @@
 Task:  COMPLETION      Status: Ended      Elapsed Time:    00:00:00.381

 --- SQL operation complete.

 

-SQL>select * from target_table1;  

+SQL>SELECT * FROM target_table1;  

 A           B   

 ----------- ----

           4 dd  

@@ -832,8 +832,8 @@
 

 Suppose that we have two same tables (_source_table_ and _target_table1_) as shown in the <<continue_on_error_examples,Examples of `CONTINUE ON ERROR`>>.

 

-* The examples below illustrate how the LOAD Statement behaves depending on the different `num`. 

-+

+The examples below illustrate how the LOAD Statement behaves depending on the different `num`. 

+

 ```

 SQL>LOAD WITH STOP AFTER 2 ERROR ROWS INTO target_table1 SELECT * FROM source_table;

 

@@ -851,7 +851,7 @@
 SQL>SELECT * FROM target_table1;

 --- 0 row(s) selected.

 ```

-+

+

 ```

 SQL>LOAD WITH STOP AFTER 3 ERROR ROWS INTO target_table1 SELECT * FROM source_table;

 

@@ -930,10 +930,10 @@
 

 _target_table2_:

 ```

-SQL>select * from target_table2;

+SQL>SELECT * FROM target_table2;

 --- 0 row(s) selected.

 

-SQL>showddl target_table2;

+SQL>SHOWDDL target_table2;

 CREATE TABLE TRAFODION.SEABASE.TARGET_TABLE2

   (

     A                                INT NO DEFAULT NOT NULL NOT DROPPABLE NOT

@@ -995,10 +995,10 @@
 SQL>LOAD WITH INDEX TABLE ONLY INTO TABLE(INDEX_TABLE index_target_table1) SELECT b,a FROM source_table;

 --- SQL operation complete.

 

-SQL>select * from target_table1;

+SQL>SELECT * FROM target_table1;

 --- 0 row(s) selected.

 

-select * from table(index_table index_target_table1);  

+SELECT * FROM table(index_table index_target_table1);  

 B@    A              

 ----  --------------------

 aaaa                     1

@@ -1033,7 +1033,7 @@
           6  fff 

 --- 6 row(s) selected.

 

-SQL>select * from table(index_table index_target_table1);  

+SQL>SELECT * FROM table(index_table index_target_table1);  

 B@    A              

 ----  --------------------

 aaaa                     1

@@ -1125,7 +1125,7 @@
 ** The _target_table4_ has columns A (index column) and B defined, in this case, populate the index table with column A and syskey from the _source_table_.

 +

 ```

-SQL> create index index_target_table4 on target_table4(a) no populate;

+SQL> CREATE INDEX index_target_table4 ON target_table4(a) NO POPULATE;

 --- SQL operation complete.

 

 SQL>SET PARSERFLAGS 1;

@@ -1173,6 +1173,8 @@
 SQL> DROP INDEX index_target_table4;

 --- SQL operation complete.

 ```

+[[loading_data_from_hive_table_examples]]

+==== Examples of Loading data from Hive Table

 

 * For customer demographics data residing in

 `/hive/tpcds/customer_demographics`, create an external Hive table using

diff --git a/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
new file mode 100644
index 0000000..a3ea166
--- /dev/null
+++ b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
Binary files differ
diff --git a/install/traf_checkset_env.sh b/install/traf_checkset_env.sh
new file mode 100755
index 0000000..58ae454
--- /dev/null
+++ b/install/traf_checkset_env.sh
@@ -0,0 +1,311 @@
+#!/bin/bash
+#
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+#
+# make sure the environment can build the code of trafodion or not.
+# must configure the yum repo right before execute this script.
+# run this script with normal user, while must has sudo permission.
+
+function usage {
+
+cat <<EOF
+
+Syntax: $0 [ -y ] [ -h | -help | --help | -v | -version | --version ]
+Usage:
+
+    1. You can create building environment with this script 
+    2. You must have sudo privileges before running this script.
+    3. This script will change the following environment:
+        a) Checking and installing basic commands(like as yum lsb_release awk cut uname).
+        b) Checking the os version.
+        c) Installing all libraries which is depended by Trafodion.
+        d) Removing pdsh and qt-dev if had installed.
+        e) Checking and setting the java environment.
+        f) Checking and setting locale(modify the LANG and LC_LANG to en_US.UTF-8)
+        g) Changing $HOME/.bashrc(Add JAVA_HOME, LANG, LC_LANG and TOOLSDIR, modify PATH)
+        h) Calling the script traf_tools_setup.sh to install the depended tools.
+    4. Getting more information from the link:
+        https://cwiki.apache.org/confluence/display/TRAFODION/Create+Build+Environment
+    5. It's best to download all the necessary softwares and set the environment variable "MY_SW_HOME" before running this script.
+    6. You can set optional environment variables as following:
+        a) MY_JVM_PATH: set the specific JVM path
+        b) MY_JAVA_VER: set the specific JDK version name
+        c) MY_SUDO: set the specific sudo command
+        d) MY_YUM: set the command for installing packages 
+        e) MY_LOCAL_SW_DIST:
+    7. This script maybe create the directory "${MY_DOWNLOAD_SW_DIST}" if not exits.
+
+EOF
+}
+
+#default path
+MY_JVM_PATH=${MY_JVM_PATH-"/usr/lib/jvm"}
+MY_JAVA_VER=${MY_JAVA_VER-"java-1.7.0-openjdk"}
+MY_SUDO=${MY_SUDO-"sudo"}
+MY_YUM=${MY_YUM-"${MY_SUDO} yum -y"}
+
+# for setup tools, 
+MY_SW_HOME=${MY_SW_HOME-"/add_your_local_shared_folder_here"}
+MY_LOCAL_SW_DIST=${MY_LOCAL_SW_DIST-${MY_SW_HOME}/local_software_tools}
+MY_INSTALL_SW_DIST=${MY_INSTALL_SW_DIST-${MY_SW_HOME}/installed_software_tools}
+MY_DOWNLOAD_SW_DIST=${MY_DOWNLOAD_SW_DIST-${MY_SW_HOME}/download_software_tools}
+
+MY_IMPLICIT_Y="n"
+while [ $# -gt 0 ];
+do
+  case $1 in
+      -y) MY_IMPLICIT_Y="y"
+	  ;;
+      -h|-help|--help)
+	  usage
+	  exit 0
+	  ;;
+      -v|-version|--version)
+	  echo "version 0.0.0.1"
+	  exit 0
+	  ;;
+      *)  echo "ERROR: Unexpected argument $1"
+	  usage
+	  exit 1
+	  ;;
+  esac
+  shift
+done
+
+
+CHECKLOG=${LOGFILE-$(pwd)/$0.log}
+local_sws=(udis llvm mpich bison icu zookeeper thrift apache-maven protobuf apache-log4cxx hadoop)
+http_sws=(
+http://sourceforge.net/projects/udis86/files/udis86/1.7/udis86-1.7.2.tar.gz
+http://llvm.org/releases/3.2/llvm-3.2.src.tar.gz
+http://www.mpich.org/static/downloads/3.0.4/mpich-3.0.4.tar.gz
+http://ftp.gnu.org/gnu/bison/bison-3.0.tar.gz
+http://download.icu-project.org/files/icu4c/4.4/icu4c-4_4-src.tgz
+https://archive.apache.org/dist/zookeeper/zookeeper-3.4.5/zookeeper-3.4.5.tar.gz
+http://archive.apache.org/dist/thrift/0.9.0/thrift-0.9.0.tar.gz
+http://archive.apache.org/dist/maven/maven-3/3.3.3/binaries/apache-maven-3.3.3-bin.tar.gz
+https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
+https://dist.apache.org/repos/dist/release/logging/log4cxx/0.10.0/apache-log4cxx-0.10.0.tar.gz
+http://archive.apache.org/dist/hadoop/common/hadoop-2.6.0/hadoop-2.6.0.tar.gz
+)
+
+# check the local software directory
+if [ ! -d ${MY_LOCAL_SW_DIST} ]; then
+    echo "WARNING: Local software tools aren't present. Will download all tools from the internet. This will be very slow. If you do have the local software tools present, set the environment variable MY_LOCAL_SW_DIST to point to them and run this script again. The default local software directory is [${MY_LOCAL_SW_DIST}]. Do you want to continue? Enter y/n (default: n):"
+    if [ "x$MY_IMPLICIT_Y" = "xn" ]; then
+	read YN
+	case ${YN} in
+	    Y|y|Yes|YES)
+		;;
+	    *)
+		echo "Downloading the following build tools from the internet:"
+		for i in `seq ${#local_sws[@]}`
+		do
+		    printf "%2d.%15s: %s\n" ${i} ${local_sws[${i}-1]} ${http_sws[${i}-1]}
+		done
+		exit 1
+		;;
+	esac
+    fi
+else
+    # check the local software's source exist or not
+    for local_sw in ${local_sws[@]}
+    do
+	local_file=`ls ${MY_LOCAL_SW_DIST} | grep ${local_sw}`
+	if [ "x${local_file}" = "x" ]; then
+            echo "WARNING: [${local_sw}] source file does not exist in directory [${MY_LOCAL_SW_DIST}]"
+	fi
+    done
+fi
+
+# check the permission.
+(${MY_SUDO} touch /etc/trafpermise) >>${LOGFILE}2>&1
+if [ "x$?" != "x0" ]; then
+    echo "ERROR: you must run this script with sudo without password permission."
+    exit 1
+fi
+
+# check some basic commands
+basecmds=(yum lsb_release awk cut uname)
+for basecmd in ${basecmds[@]}  
+do
+    basecmdpath=`which ${basecmd}`
+    if [ "x$?" != "x0" ]; then
+	case ${basecmd} in
+	    yum)
+		echo "ERROR: You must first install yum."
+		exit 1
+		;;
+	    lsb_release)
+		(${MY_YUM} install redhat-lsb) >>${LOGFILE}2>&1
+		if [ "x$?" = "x0" ]; then
+		    echo "ERROR: yum repo server has an error when running command [${MY_YUM} install redhat-lsb]."
+		    exit 1
+		fi
+		;;
+	    *)
+		echo "ERROR: command [${basecmd}] does not exist. Make sure you have installed it, and have added it to the command path."
+		exit 1
+	esac
+    fi
+    echo "INFO: command ${basecmd} exists"
+done
+
+osname=`uname -o`
+cpuarch=`uname -m`
+echo "INFO: os is [${osname}], cpu architecture is [${cpuarch}]"
+
+osdistributor=`lsb_release -i | cut -d : -f 2-`
+case ${osdistributor} in
+    *RedHat*|*CentOS*)
+	echo "INFO: os distributor id is [${osdistributor}]"
+	;;
+    *)
+	echo "ERROR: The OS distribution is [${osdistributor}], but Trafodion only supports RedHat and CentOS presently."
+	exit 1
+esac
+
+osver=`lsb_release -r | cut -d : -f 2-`
+osvermajor=`echo $osver | cut -d . -f 1-1`
+osverminor=`echo $osver | cut -d . -f 2-2`
+case ${osvermajor} in
+    6|7)
+	echo "INFO: os version is [$osver]"
+	;;
+    *)
+	echo "ERROR: OS version is [$osver]. Only 6.x versions are presently supported."
+	exit 1
+esac
+
+# install all libs
+dependlibs=(
+	    epel-release alsa-lib-devel ant ant-nodeps apr-devel	\
+	    apr-util-devel boost-devel cmake device-mapper-multipath	\
+	    dhcp doxygen flex gcc-c++ gd git glibc-devel		\
+	    glibc-devel.i686 graphviz-perl gzip ${MY_JAVA_VER}-devel	\
+	    libX11-devel libXau-devel libaio-devel libcurl-devel	\
+	    libibcm.i686 libibumad-devel libibumad-devel.i686 libiodbc	\
+	    libiodbc-devel librdmacm-devel librdmacm-devel.i686		\
+	    libxml2-devel lua-devel lzo-minilzo net-snmp-devel		\
+	    net-snmp-perl openldap-clients openldap-devel		\
+	    openldap-devel.i686 openmotif openssl-devel			\
+	    openssl-devel.i686 perl-Config-IniFiles perl-Config-Tiny	\
+	    perl-DBD-SQLite perl-Expect perl-IO-Tty perl-Math-Calc-Units \
+	    perl-Params-Validate perl-Parse-RecDescent perl-TermReadKey	\
+	    perl-Time-HiRes protobuf-compiler protobuf-devel		\
+	    readline-devel rpm-build saslwrapper sqlite-devel unixODBC	\
+	    unixODBC-devel uuid-perl wget xerces-c-devel xinetd		\
+	    ncurses-devel)
+faillibs=()
+echo "INFO: install dependent library start"
+for dependlib in ${dependlibs[@]}
+do
+    (${MY_YUM} install ${dependlib}) >>${LOGFILE}2>&1
+    if [ "x$?" != "x0" ]; then
+	faillibs=("${faillibs[@]}" "${dependlib}")
+	echo "ERROR: install dependent library [${dependlib}] fail"
+    else
+	echo "INFO: install dependent library [${dependlib}] success"
+    fi
+done
+echo "INFO: install dependent library finish"
+
+# remove pdsh and qt-dev
+echo "INFO: remove pdsh and qt-dev commands to avoid problems with trafodion scripts"
+(${MY_YUM} erase pdsh) >>${LOGFILE}2>&1
+(${MY_YUM} erase qt-dev) >>${LOGFILE}2>&1
+
+# check and set the java
+echo "INFO: check and set java environment"
+javadir=`\ls -L ${MY_JVM_PATH} | grep "${MY_JAVA_VER}-" | head -n 1`
+javapath="${MY_JVM_PATH}/${javadir}"
+
+if [ ! -d ${javapath} ]; then
+     echo "ERROR: java dir [${javapath}] isn't right"
+     exit 1
+fi
+
+javahome=`grep JAVA_HOME ~/.bashrc | wc -l`
+if [ "x${javahome}" = "x0" ]; then
+    echo -en "\n# Added by traf_checkset_env.sh of trafodion\n" >> $HOME/.bashrc
+    echo -en "export JAVA_HOME=${javapath}\n" >> $HOME/.bashrc
+    echo -en "export PATH=\$PATH:\$JAVA_HOME/bin\n" >> $HOME/.bashrc
+else
+    java_version=`${JAVA_HOME}/bin/java -version 2>&1 | awk 'NR==1{ gsub(/"/,""); print $3}'`
+    case ${java_version} in
+	1.7.*)
+	    echo "INFO: java version is [$java_version]"
+	    ;;
+	*)
+	    echo "ERROR: java version is [$java_version]. Only 1.7.x versions are presently supported."
+	    exit 1
+    esac
+fi
+
+# check and set locale
+local_locale=`locale | grep -v "en_US.UTF-8" | head -n 1`
+if [ "x${local_locale}" != "x" ]; then
+    lang_set=`grep LANG ~/.bashrc | awk -F= '{ print $2 }'`
+    if [ "x${lang_set}" != "xen_US.UTF-8" ]; then
+	echo -en "\n# Added by traf_checkset_env.sh of trafodion\n" >> $HOME/.bashrc
+	echo -en "export LANG=\"en_US.UTF-8\"\n" >> $HOME/.bashrc
+    fi
+    lc_all_set=`grep LC_TIME ~/.bashrc | awk -F= '{ print $2 }'`
+    if [ "x${lc_all_set}" != "xen_US.UTF-8" ]; then
+	echo -en "\n# Added by traf_checkset_env.sh of trafodion\n" >> $HOME/.bashrc
+	echo -en "export LC_ALL=\"en_US.UTF-8\"\n" >> $HOME/.bashrc
+    fi
+fi
+
+# check error
+if (("${#faillibs[@]}" > "0")); then
+    echo "ERROR: libs [${faillibs[@]}] aren't found in your repo server."
+    exit 1;
+fi
+
+source $HOME/.bashrc
+echo $'\n'"Build environment is created SUCCESS!"
+
+echo $'\n'"Install the required Build tools start!"
+export MY_LOCAL_SW_DIST
+if [ ! -e ${MY_DOWNLOAD_SW_DIST} ]; then
+    echo "INFO: mkdir [${MY_DOWNLOAD_SW_DIST}]"
+    mkdir ${MY_DOWNLOAD_SW_DIST}
+fi
+echo "INFO: install tools with command:"\
+"    [./traf_tools_setup.sh -d ${MY_DOWNLOAD_SW_DIST} -i ${MY_INSTALL_SW_DIST}]]"
+./traf_tools_setup.sh -d ${MY_DOWNLOAD_SW_DIST} -i ${MY_INSTALL_SW_DIST}
+if [ "x$?" = "x0" ]; then
+    echo $'\n'"Install the required Build tools SUCCESS!"
+else
+    echo "ERROR: Install the required Build tools FAIL.
+    the execute command:
+     [./traf_tools_setup.sh -d ${MY_DOWNLOAD_SW_DIST} -i ${MY_INSTALL_SW_DIST}]"
+fi
+
+tooldirexist=`grep TOOLSDIR ~/.bashrc | wc -l`
+if [ "x${tooldirexist}" = "x0" ]; then
+    echo -en "\n# Added by traf_checkset_env.sh of trafodion\n" >> $HOME/.bashrc
+    echo -en "export TOOLSDIR=${MY_INSTALL_SW_DIST}\n" >> $HOME/.bashrc
+    echo -en "export PATH=\$PATH:\$TOOLSDIR/apache-maven-3.3.3/bin\n" >> $HOME/.bashrc
+fi
+
diff --git a/install/traf_tools_setup.sh b/install/traf_tools_setup.sh
index 1a5db79..69012b7 100755
--- a/install/traf_tools_setup.sh
+++ b/install/traf_tools_setup.sh
@@ -51,6 +51,27 @@
 # May need root or SUDO access to install tools in desired location
 # ----------------------------------------------------------------------------- 
 
+##############################################################################
+# Environment variables - optional
+#
+# MY_LOCAL_SW_DIST - shared location on local network for tar balls for build, etc.
+# - udis
+# - llvm
+# - mpich
+# - bison
+# - icu
+# - zookeeper
+# - thrift
+# - apache-maven
+# - protobuf
+# - apache-log4cxx
+# - hadoop
+# also set http_proxy and ftp_proxy if necessary, to download
+# files from repositories on the Internet
+#
+##############################################################################
+MY_LOCAL_SW_DIST=${MY_LOCAL_SW_DIST-""}
+
 function Usage {
    echo
    echo "Usage: $0 -d <downloaddir> -i <installdir>"
@@ -76,6 +97,13 @@
   SRCDIR="$2"
   TARFILE="${URL##*/}"
 
+  if [ "x${MY_LOCAL_SW_DIST}" != "x" ]; then
+      echo "INFO:   copy file ${TARFILE} from [${MY_LOCAL_SW_DIST}/${TARFILE}]"
+      if [ -f ${MY_LOCAL_SW_DIST}/${TARFILE} ]; then
+	  cp ${MY_LOCAL_SW_DIST}/${TARFILE} ${BASEDIR}/${TARFILE}
+      fi
+  fi
+
   if [ ! -e $BASEDIR/$TARFILE ]; then
     wget $URL  >>$LOGFILE 2>&1
     echo "INFO:   downloaded tar file: $TARFILE " | tee -a $LOGFILE
diff --git a/win-odbc64/odbcclient/drvr35/cdesc.cpp b/win-odbc64/odbcclient/drvr35/cdesc.cpp
index a8b2d94..73b6326 100644
--- a/win-odbc64/odbcclient/drvr35/cdesc.cpp
+++ b/win-odbc64/odbcclient/drvr35/cdesc.cpp
@@ -2299,7 +2299,8 @@
 						pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL, RowNumber, ParamNumber);

 					else

 					{

-						sprintf((char *)errorMsg," Incorrect Format or Data.", RowNumber, ParamNumber);

+						sprintf((char *)errorMsg," Incorrect Format or Data [RowNumber: %d, ParamNumber:%d].",

+                                                        RowNumber, ParamNumber);

 						pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL,

 								RowNumber, ParamNumber);

 					}

diff --git a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
index 2b1a39f..a4f53f3 100644
--- a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
@@ -2170,7 +2170,7 @@
 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,

 								cTmpFraction);

 				else

-					DataLen = sprintf(cTmpBuf,"%02d",

+					DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",

 								pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);

 				break;

 			case SQLDTCODE_YEAR_TO_HOUR:

@@ -2717,7 +2717,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)

 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);

 				else

-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

+					sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

 				break;

 			case SQL_INTERVAL_MINUTE:

 				if (intervalTmp->interval_sign == SQL_TRUE)

@@ -2779,7 +2779,7 @@
 				if (intervalTmp->interval_sign == SQL_TRUE)

 					sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);

 				else

-					sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

+					sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);

 				break;

 			case SQL_INTERVAL_MINUTE:

 				if (intervalTmp->interval_sign == SQL_TRUE)

diff --git a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
index 89c0329..e597135 100755
--- a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
@@ -1633,6 +1633,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(DATE_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE, 

@@ -1883,6 +1884,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(TIME_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME, 

@@ -2104,6 +2106,7 @@
 								return IDS_22_003;

 						}

 					}

+					DataLen = sizeof(TIMESTAMP_STRUCT);

 				}

 			}

 			if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP,