Merge [TRAFODION-2910] PR 1400 Add LOAD details in SQL Reference Manual
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
index 876e831..ce7c980 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4Connection.java
@@ -541,7 +541,7 @@
stmtLabel = stmtLabel.toUpperCase();
}
- TrafT4CallableStatement stmt;
+ TrafT4CallableStatement stmt = null;
clearWarnings();
if (_isClosed() == true) {
@@ -557,8 +557,11 @@
return stmt;
}
}
-
- stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+ if (stmtLabel.equalsIgnoreCase("null")) {
+ stmt = new TrafT4CallableStatement(this, sql);
+ } else {
+ stmt = new TrafT4CallableStatement(this, sql, stmtLabel);
+ }
stmt.prepareCall(stmt.sql_, stmt.queryTimeout_, stmt.resultSetHoldability_);
if (isStatementCachingEnabled()) {
diff --git a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
index 9b5e5e0..c716998 100644
--- a/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
+++ b/core/conn/odbc/src/odbc/Common/ODBCMXTraceMsgs.cpp
@@ -647,7 +647,7 @@
pBuffer += length;
if (sqlWarningOrErrorLength > 0) pBuffer = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
- tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+ tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
pBuffer = writeTraceMsg(pBuffer, temp, tempStrlen);
if (outValuesLength > 0) pBuffer = printHex(pBuffer, outValues, outValuesLength);
@@ -1243,7 +1243,7 @@
if (sqlWarningOrErrorLength > 0)
pBuffer = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
- tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+ tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
pBuffer = writeTraceMsg(pBuffer, temp, tempStrlen);
if (outValuesLength > 0)
pBuffer = printHex(pBuffer, outValues, outValuesLength);
@@ -1318,7 +1318,7 @@
if (sqlWarningOrErrorLength > 0)
pBuffer = printHex(pBuffer, sqlWarningOrError, sqlWarningOrErrorLength);
- tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:");
+ tempStrlen = sprintf(temp, "RowsAffected:%d OutValues:", rowsAffected);
pBuffer = writeTraceMsg(pBuffer, temp, tempStrlen);
if (outValuesLength > 0)
pBuffer = printHex(pBuffer, outValues, outValuesLength);
diff --git a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
index a0aaf26..3e15202 100644
--- a/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvr/SrvrConnect.cpp
@@ -6286,7 +6286,7 @@
ControlQueryLen = ControlQueryLen + 4;
break;
case 6:
- sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, verBuffer, verBuffer, atol(verBuffer), schemaToken, tableName);
+ sprintf(ControlQuery,"select cast(cast((52 * 1024 * 128) / (sum(co.column_size)) as integer) as varchar(10) character set ISO88591) from %s.SYSTEM_SCHEMA.SCHEMATA sc, NEO.HP_DEFINITION_SCHEMA.OBJECTS ob, NEO.HP_DEFINITION_SCHEMA.COLS co where sc.SCHEMA_NAME = '%s' and ob.OBJECT_NAME = '%s' and sc.SCHEMA_UID = ob.SCHEMA_UID and ob.OBJECT_UID = co.OBJECT_UID and ob.OBJECT_TYPE = 'BT' FOR READ UNCOMMITTED ACCESS", srvrGlobal->SystemCatalog, schemaToken, tableName);
strcpy(HashTableInfo+ControlQueryLen, ";HE="); // HE means Guesstimated rowset size. Change 128 to HP soon.
ControlQueryLen = ControlQueryLen + 4;
break;
diff --git a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
index ccbb689..465a224 100644
--- a/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
+++ b/core/conn/odbc/src/odbc/nsksrvrcore/srvrothers.cpp
@@ -972,7 +972,7 @@
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
@@ -999,7 +999,7 @@
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
@@ -1265,7 +1265,7 @@
{
char RGWarningOrError[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(1, "01000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
}
@@ -1294,7 +1294,7 @@
{
char *RGWarningOrError;
RGWarningOrError = new char[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf(RGWarningOrError, "The query's estimated cost: %.50s exceeded resource management attribute limit set.", b);
GETMXCSWARNINGORERROR(-1, "HY000", RGWarningOrError, sqlWarningOrErrorLength, sqlWarningOrError);
delete RGWarningOrError;
@@ -2233,7 +2233,7 @@
{
char RGWarningOrError[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf( RGWarningOrError
, "The query's estimated cost: %.50s exceeded resource management attribute limit set."
, b
@@ -2260,7 +2260,7 @@
char *RGWarningOrError;
RGWarningOrError = new char[256];
- sprintf(b,"lf",pSrvrStmt->cost_info.totalTime);
+ sprintf(b,"%lf",pSrvrStmt->cost_info.totalTime);
sprintf( RGWarningOrError
, "The query's estimated cost: %.50s exceeded resource management attribute limit set."
, b
@@ -5078,7 +5078,7 @@
"cast('%s' as varchar(128)) TABLE_CAT, "
"cast(trim(ob_table.SCHEMA_NAME) as varchar(128)) TABLE_SCHEM, "
"cast(trim(ob_table.OBJECT_NAME) as varchar(128)) TABLE_NAME, "
- "cast(idx.is_unique as smallint) NON_UNIQUE, "
+ "cast(case when idx.is_unique = 1 then 0 else 1 end as smallint) NON_UNIQUE, "
"cast(NULL as varchar(128)) INDEX_QUALIFIER, " // not support
"cast(trim(ob.OBJECT_NAME) as varchar(128)) INDEX_NAME, "
"cast(3 as smallint) TYPE, " // SQL_INDEX_OTHER
diff --git a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
index c3bb9f3..b6240d3 100644
--- a/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
+++ b/core/conn/trafci/src/main/java/org/trafodion/ci/DatabaseQuery.java
@@ -661,6 +661,7 @@
try
{
+ // here if stmtName = null, then it will pass a "null" string, this will skip driver's conditional judgment (if (stmtName ==null) throw exception).
cStmt = ((TrafT4Connection)conn).prepareCall(queryStr,"\"" + stmtName + "\"");
} catch (NoSuchMethodError nsme)
{
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
index bab28c5..c40fdd6 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/ctosqlconv.cpp
@@ -2307,7 +2307,7 @@
pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,
cTmpFraction);
else
- DataLen = sprintf(cTmpBuf,"%02d",
+ DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",
pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);
break;
case SQLDTCODE_YEAR_TO_HOUR:
@@ -2861,7 +2861,7 @@
if (intervalTmp->interval_sign == SQL_TRUE)
sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
else
- sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+ sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
break;
case SQL_INTERVAL_MINUTE:
if (intervalTmp->interval_sign == SQL_TRUE)
@@ -2923,7 +2923,7 @@
if (intervalTmp->interval_sign == SQL_TRUE)
sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
else
- sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+ sprintf(cTmpBuf,"%ld:%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
break;
case SQL_INTERVAL_MINUTE:
if (intervalTmp->interval_sign == SQL_TRUE)
diff --git a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
index a300bbd..b2c0091 100644
--- a/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
+++ b/core/conn/unixodbc/odbc/odbcclient/unixcli/cli/sqltocconv.cpp
@@ -1684,6 +1684,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(DATE_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE,
@@ -1930,6 +1931,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(TIME_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME,
@@ -2149,6 +2151,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(TIMESTAMP_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP,
diff --git a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
index d36f665..dd49e21 100755
--- a/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
+++ b/core/sqf/hbase_utilities/backup_and_restore/run_full_trafodion_backup.sh
@@ -119,6 +119,15 @@
esac
done
+#check the HBase compatiblity if TrafExportSnapshot is able to be used
+java org.trafodion.utility.backuprestore.TrafExportSnapshot -t
+if [[ $? -ne 0 ]]; then
+ echo 'not able to use TrafExportSnapshot'
+ mr_limit=0
+else
+ echo 'able to use TrafExportSnapshot'
+fi
+
echo "logging output to: ${log_file}"
#create tmp and log folders if they don't exist
diff --git a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
index 3abf8ac..e38c0f1 100644
--- a/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
+++ b/core/sqf/hbase_utilities/src/main/java/org/trafodion/utility/backuprestore/TrafExportSnapshot.java
@@ -1103,9 +1103,22 @@
static int innerMain(final Configuration conf, final String [] args) throws Exception {
return ToolRunner.run(conf, new TrafExportSnapshot(), args);
}
-
+ private void testMobAvail() {
+ MobUtils.getMobHome(HBaseConfiguration.create());
+ }
public static void main(String[] args) throws Exception {
LOG.info("Trafodion Export Snapshot Utility");
+ if(args.length == 1)
+ {
+ String cmd = args[0];
+ if (cmd.equals("-t") || cmd.equals("--test")) {
+ //test if MobUtils is aviable in this system
+ //doing something meanless
+ TrafExportSnapshot ti = new TrafExportSnapshot();
+ ti.testMobAvail();
+ System.exit(0); //normal
+ }
+ }
System.exit(innerMain(HBaseConfiguration.create(), args));
}
}
diff --git a/core/sqf/src/seabed/src/sqstatepi.cpp b/core/sqf/src/seabed/src/sqstatepi.cpp
index 383a13a..4f1a267 100644
--- a/core/sqf/src/seabed/src/sqstatepi.cpp
+++ b/core/sqf/src/seabed/src/sqstatepi.cpp
@@ -289,18 +289,23 @@
lp_prog = basename(pp_proc->program);
#ifdef SQ_PHANDLE_VERIFIER
sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
- sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
pp_proc->process_name,
pp_proc->type,
lp_proc_t,
pp_proc->nid,
pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
pp_proc->verifier,
-#endif
lp_prog);
+#else
+ sprintf(la_title, "sb-openers for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+ pp_proc->process_name,
+ pp_proc->type,
+ lp_proc_t,
+ pp_proc->nid,
+ pp_proc->pid,
+ lp_prog);
+#endif
+
if (pv_str)
lp_op = "sb_ic_get_openers";
else
@@ -339,18 +344,22 @@
lp_prog = basename(pp_proc->program);
#ifdef SQ_PHANDLE_VERIFIER
sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d" PFVY ", prog=%s",
-#else
- sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
-#endif
pp_proc->process_name,
pp_proc->type,
lp_proc_t,
pp_proc->nid,
pp_proc->pid,
-#ifdef SQ_PHANDLE_VERIFIER
pp_proc->verifier,
-#endif
lp_prog);
+#else
+ sprintf(la_title, "sb-opens for process=%s, type=%d(%s), p-id=%d/%d, prog=%s",
+ pp_proc->process_name,
+ pp_proc->type,
+ lp_proc_t,
+ pp_proc->nid,
+ pp_proc->pid,
+ lp_prog);
+#endif
if (pv_str)
lp_op = "sb_ic_get_opens";
else
diff --git a/core/sql/generator/GenRelMisc.cpp b/core/sql/generator/GenRelMisc.cpp
index d2a0c24..737bf4e 100644
--- a/core/sql/generator/GenRelMisc.cpp
+++ b/core/sql/generator/GenRelMisc.cpp
@@ -4014,7 +4014,16 @@
tmpAssign = (Assign *)tmpAssign->bindNode(bindWA);
setInUpdateOrInsert(bindWA, NULL);
childNode = tmpAssign->getSource().getItemExpr();
-
+ //don't allow LOB insert in a tuple list
+ if (childNode->getOperatorType() == ITM_LOBINSERT)
+ {
+ // cannot have this function in a values list with
+ // multiple tuples. Use a single tuple.
+ *CmpCommon::diags() << DgSqlCode(-4483);
+ GenExit();
+ return -1;
+
+ }
castNode->child(0) = childNode;
}
else
diff --git a/core/sql/langman/Measure.cpp b/core/sql/langman/Measure.cpp
index 9dfe206..3d57f31 100644
--- a/core/sql/langman/Measure.cpp
+++ b/core/sql/langman/Measure.cpp
@@ -57,7 +57,7 @@
char fileName[256];
char *logFileEnv = 0;
if((logFileEnv = getenv("MEASURE_LOG_FILE")) != NULL){
- sprintf(fileName, "%s.%d", pid);
+ sprintf(fileName, "%s.%d", logFileEnv, pid);
logFile = fopen(fileName, "w+t");
if(logFile == 0){
logFile = stderr;
diff --git a/core/sql/optimizer/BindItemExpr.cpp b/core/sql/optimizer/BindItemExpr.cpp
index a4f296b..3b71061 100644
--- a/core/sql/optimizer/BindItemExpr.cpp
+++ b/core/sql/optimizer/BindItemExpr.cpp
@@ -3654,12 +3654,10 @@
else if (convType == 2)
{
Parser parser(bindWA->currentCmpContext());
- char buf[1000];
+ char buf[128];
- // right justify the string representation of numeric operand
- // and then do the concat
- sprintf(buf, "CAST(SPACE(%d - CHAR_LENGTH(CAST(@A1 AS VARCHAR(%d)))) || CAST(@A1 AS VARCHAR(%d)) AS VARCHAR(%d))",
- dLen, dLen, dLen, dLen);
+ sprintf(buf, "CAST(CAST(@A1 AS VARCHAR(%d)) AS VARCHAR(%d))",
+ dLen, dLen);
newChild =
parser.getItemExprTree(buf, strlen(buf), BINDITEMEXPR_STMTCHARSET, 1, child(srcChildIndex));
diff --git a/core/sql/optimizer/MJVIndexBuilder.cpp b/core/sql/optimizer/MJVIndexBuilder.cpp
index 3fe08fc..99ba778 100644
--- a/core/sql/optimizer/MJVIndexBuilder.cpp
+++ b/core/sql/optimizer/MJVIndexBuilder.cpp
@@ -534,7 +534,7 @@
for (size_t i = 0; i < size; i++) {
at(i)->out();
if (i < size-1) {
- printf(" --> ", i);
+ printf(" --> ");
}
}
};
@@ -853,7 +853,7 @@
void PrintColIndList (const IndexList& toPrint, char* name)
{
printf ("\n%s:\n",name);
- printf ("--------------------------\n",name);
+ printf ("--------------------------\n");
for (size_t i = 0; i < toPrint.entries(); i++)
{
ColIndList currList = toPrint.at(i);
diff --git a/core/sql/regress/seabase/EXPECTED032 b/core/sql/regress/seabase/EXPECTED032
index 5a4aa97..b975865 100644
--- a/core/sql/regress/seabase/EXPECTED032
+++ b/core/sql/regress/seabase/EXPECTED032
@@ -333,8 +333,8 @@
(EXPR)
------------
- 11
- 21
+11
+21
--- 2 row(s) selected.
>>insert into t032t1 values ('3', 3, 3, date '2016-08-15', time '10:11:12',
diff --git a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
index 1cab0b0..2a4163b 100644
--- a/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
+++ b/dcs/src/test/jdbc_test/src/test/java/org/trafodion/jdbc_test/TestGetIndexInfo.java
@@ -87,7 +87,7 @@
IndexInfo[] expIndexInfo = {
new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C1", 0, 3, (short)0, (String)null),
new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, (String)null, (short)0, (short)0, "C2", 0, 3, (short)0, (String)null),
- new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, false, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)
+ new IndexInfo("TRAFODION", "SEABASE", INDEX_INFO_TEST_TABLE, true, (String)null, INDEX_C1_NAME, (short)3, (short)0, "", 0, 0, (short)0, (String)null)
};
try {
diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
index d6d2e70..0d5c3c7 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
@@ -6338,6 +6338,302 @@
```
<<<
+[[rollup_function]]
+== ROLLUP Function
+
+The ROLLUP function calculates multiple levels of subtotals aggregating from right to left through the comma-separated list of columns, and provides a grand total.
+
+ROLLUP is an extension to the 'GROUP BY' clause. Related features such as the GROUPING function can be used with 'ORDER BY' to control the placement of summary results.
+
+```
+SELECT…GROUP BY ROLLUP (column 1, [column 2,]…[column n])
+```
+
+ROLLUP generates n+1 levels of subtotals, including a grand total, where n is the number of the selected column(s).
+
+For example, a query that contains three rollup columns returns the following rows:
+
+* First-level: the usual aggregate values as calculated by GROUP BY clause without using ROLLUP.
+* Second-level: subtotals aggregating across column 3 for each combination of column 1 and column 2.
+* Third-level: subtotals aggregating across column 2 and column 3 for each column 1.
+* Fourth-level: the grand total row.
+
+NOTE: Trafodion does not support CUBE function which works slightly differently from ROLLUP.
+
+[[considerations_for_rollup]]
+=== Considerations for ROLLUP
+
+[[null_in_result_sets]]
+==== NULL in Result Sets
+
+* In super-aggregate rows representing subtotals or the grand total, lower level grouping columns are replaced by NULLs.
+* The NULLs in selected columns are considered equal and sorted into one NULL group in result sets.
+
+[[using_rollup_with_the_column_order_reversed]]
+==== Using ROLLUP with the Column Order Reversed
+
+ROLLUP removes the right-most column at each step, therefore the result sets vary with the column order specified in the comma-separated list.
+
+[cols="50%,50%"]
+|===
+| If the column order is _country_, _state_, _city_ and _name_, ROLLUP returns following groupings.
+| If the column order is _name_, _city_, _state_ and _country_, ROLLUP returns following groupings.
+| _country_, _state_, _city_ and _name_ | _name_, _city_, _state_ and _country_
+| _country_, _state_ and _city_ | _name_, _city_ and _state_
+| _country_ and _state_ | _name_ and _city_
+| _country_ | _name_
+| grand total | grand total
+|===
+
+[[examples_of_rollup]]
+=== Examples of ROLLUP
+
+[[examples_of_grouping_by_one_or_multiple_rollup_columns]]
+==== Examples of Grouping By One or Multiple Rollup Columns
+
+Suppose that we have a _sales1_ table like this:
+
+```
+SELECT * FROM sales1;
+
+DELIVERY_YEAR REGION PRODUCT REVENUE
+------------- ------ -------------------------------- -----------
+ 2016 A Dress 100
+ 2016 A Dress 200
+ 2016 A Pullover 300
+ 2016 B Dress 400
+ 2017 A Pullover 500
+ 2017 B Dress 600
+ 2017 B Pullover 700
+ 2017 B Pullover 800
+
+--- 8 row(s) selected.
+```
+
+* This is an example of grouping by one rollup column.
++
+```
+SELECT delivery_year, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year);
+```
+
++
+```
+DELIVERY_YEAR TOTAL_REVENUE
+------------- --------------------
+ 2016 1000
+ 2017 2600
+ NULL 3600
+
+--- 3 row(s) selected.
+```
+
+* This is an example of grouping by two rollup columns.
++
+ROLLUP firstly aggregates at the lowest level (_region_) and then rolls up those aggregations to the next
+level (_delivery_year_), finally it produces a grand total across these two levels.
+
++
+```
+SELECT delivery_year, region, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region);
+```
+
++
+```
+DELIVERY_YEAR REGION TOTAL_REVENUE
+------------- ------ --------------------
+ 2016 A 600
+ 2016 B 400
+ 2016 NULL 1000
+ 2017 A 500
+ 2017 B 2100
+ 2017 NULL 2600
+ NULL NULL 3600
+
+--- 7 row(s) selected.
+```
++
+
+* This is an example of grouping by three rollup columns.
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+.Grouping By Three Rollup Columns
+image::grouping-by-three-rollup-columns.jpg[700,700]
+
++
+** First-level: the rows marked in *blue* are the total revenue for each year (_2016_ and _2017_), each region (_A_ and _B_) and each product (_Dress_ and _Pullover_), they are caculated by GROUP BY instead of ROLLUP.
+
++
+** Second-level: the rows marked in *red* provide the total revenue for the given _delivery_year_ and _region_ by _product_.
++
+These rows have the _product_ columns set to NULL.
+
++
+** Third-level: the rows marked in *yellow* show the total revenue in each year (_2016_ and _2017_).
++
+These rows have the _region_ and _product_ columns set to NULL.
+
++
+** Fourth-level: the row marked in *purple* aggregates over all rows in the _delivery_year_, _region_ and _product_ columns.
++
+This row has the _delivery_year_, _region_ and _product_ columns set to NULL.
+
+[[examples_of_null]]
+=== Examples of NULL
+
+The example below demonstrates how ROLLUP treats NULLs in the selected columns and generates NULLs for super-aggregate rows.
+
+Suppose that we have a _sales2_ table like this:
+
+```
+SELECT * FROM sales2;
+
+DELIVERY_YEAR REGION PRODUCT REVENUE
+------------- ------ -------------------------------- -----------
+ NULL A Dress 100
+ NULL A Dress 200
+ 2016 A Pullover 300
+ 2016 B Dress 400
+ 2017 A Pullover 500
+ 2017 B Dress 600
+ NULL B Pullover 700
+ NULL B Pullover 800
+
+--- 8 row(s) selected.
+```
+
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue
+FROM sales2
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
+```
+DELIVERY_YEAR REGION PRODUCT TOTAL_REVENUE
+------------- ------ -------------------------------- --------------------
+ 2016 A Pullover 300
+ 2016 A NULL 300
+ 2016 B Dress 400
+ 2016 B NULL 400
+ 2016 NULL NULL 700
+ 2017 A Pullover 500
+ 2017 A NULL 500
+ 2017 B Dress 600
+ 2017 B NULL 600
+ 2017 NULL NULL 1100
+ NULL A Dress 300
+ NULL A NULL 300
+ NULL B Pullover 1500
+ NULL B NULL 1500
+ NULL NULL NULL 1800
+ NULL NULL NULL 3600
+
+--- 16 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_the_column_order_reversed]]
+==== Examples of Using ROLLUP with the Column Order Reversed
+
+Suppose that we have the same _sales1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+* The column order of the example below is _delivery_year_, _region_ and _product_.
+
++
+```
+SELECT delivery_year, region, product, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, region, product);
+```
+
++
+```
+DELIVERY_YEAR REGION PRODUCT TOTAL_REVENUE
+------------- ------ -------------------------------- --------------------
+ 2016 A Dress 300
+ 2016 A Pullover 300
+ 2016 A NULL 600
+ 2016 B Dress 400
+ 2016 B NULL 400
+ 2016 NULL NULL 1000
+ 2017 A Pullover 500
+ 2017 A NULL 500
+ 2017 B Dress 600
+ 2017 B Pullover 1500
+ 2017 B NULL 2100
+ 2017 NULL NULL 2600
+ NULL NULL NULL 3600
+
+--- 13 row(s) selected.
+```
+
+* The column order of the example below is _product_, _region_ and _delivery_year_, the output is different than the result sets above.
+
++
+```
+SELECT product, region, delivery_year, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (product, region, delivery_year);
+```
+
++
+```
+PRODUCT REGION DELIVERY_YEAR TOTAL_REVENUE
+-------------------------------- ------ ------------- --------------------
+Dress A 2016 300
+Dress A NULL 300
+Dress B 2016 400
+Dress B 2017 600
+Dress B NULL 1000
+Dress NULL NULL 1300
+Pullover A 2016 300
+Pullover A 2017 500
+Pullover A NULL 800
+Pullover B 2017 1500
+Pullover B NULL 1500
+Pullover NULL NULL 2300
+NULL NULL NULL 3600
+
+--- 13 row(s) selected.
+```
+
+[[examples_of_using_rollup_with_order_by]]
+==== Examples of Using ROLLUP with ORDER BY
+
+Suppose that we have the same _sale1_ table as shown in the <<examples_of_grouping_by_one_or_multiple_rollup_columns,Examples of Grouping By One or Multiple Rollup Columns>>.
+
+This example uses ROLLUP with the ORDER BY clause to sort the results.
+
+```
+SELECT delivery_year, product, SUM (revenue) AS total_revenue
+FROM sales1
+GROUP BY ROLLUP (delivery_year, product)
+ORDER BY total_revenue;
+```
+
+```
+DELIVERY_YEAR PRODUCT TOTAL_REVENUE
+------------- -------------------------------- --------------------
+ 2016 Pullover 300
+ 2017 Dress 600
+ 2016 Dress 700
+ 2016 NULL 1000
+ 2017 Pullover 2000
+ 2017 NULL 2600
+ NULL NULL 3600
+
+--- 7 row(s) selected.
+```
+
+<<<
[[round_function]]
== ROUND Function
diff --git a/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
new file mode 100644
index 0000000..a3ea166
--- /dev/null
+++ b/docs/sql_reference/src/images/grouping-by-three-rollup-columns.jpg
Binary files differ
diff --git a/win-odbc64/odbcclient/drvr35/cdesc.cpp b/win-odbc64/odbcclient/drvr35/cdesc.cpp
index a8b2d94..73b6326 100644
--- a/win-odbc64/odbcclient/drvr35/cdesc.cpp
+++ b/win-odbc64/odbcclient/drvr35/cdesc.cpp
@@ -2299,7 +2299,8 @@
pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL, RowNumber, ParamNumber);
else
{
- sprintf((char *)errorMsg," Incorrect Format or Data.", RowNumber, ParamNumber);
+ sprintf((char *)errorMsg," Incorrect Format or Data [RowNumber: %d, ParamNumber:%d].",
+ RowNumber, ParamNumber);
pHandle->setDiagRec(DRIVER_ERROR, retCode, 0, (char *)errorMsg, NULL,
RowNumber, ParamNumber);
}
diff --git a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
index 2b1a39f..a4f53f3 100644
--- a/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/ctosqlconv.cpp
@@ -2170,7 +2170,7 @@
pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second,
cTmpFraction);
else
- DataLen = sprintf(cTmpBuf,"%02d",
+ DataLen = sprintf(cTmpBuf,"%02d:%02d:%02d",
pSQLTimestamp->hour,pSQLTimestamp->minute,pSQLTimestamp->second);
break;
case SQLDTCODE_YEAR_TO_HOUR:
@@ -2717,7 +2717,7 @@
if (intervalTmp->interval_sign == SQL_TRUE)
sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
else
- sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+ sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
break;
case SQL_INTERVAL_MINUTE:
if (intervalTmp->interval_sign == SQL_TRUE)
@@ -2779,7 +2779,7 @@
if (intervalTmp->interval_sign == SQL_TRUE)
sprintf(cTmpBuf,"-%ld",intervalTmp->intval.day_second.hour);
else
- sprintf(cTmpBuf,"%ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
+ sprintf(cTmpBuf,"%ld %ld",intervalTmp->intval.day_second.hour,intervalTmp->intval.day_second.minute);
break;
case SQL_INTERVAL_MINUTE:
if (intervalTmp->interval_sign == SQL_TRUE)
diff --git a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
index 89c0329..e597135 100755
--- a/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
+++ b/win-odbc64/odbcclient/drvr35/sqltocconv.cpp
@@ -1633,6 +1633,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(DATE_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_DATE,
@@ -1883,6 +1884,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(TIME_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIME,
@@ -2104,6 +2106,7 @@
return IDS_22_003;
}
}
+ DataLen = sizeof(TIMESTAMP_STRUCT);
}
}
if ((retCode = ConvertSQLCharToDate(ODBCDataType, cTmpBuf, srcLength, SQL_C_TIMESTAMP,