Merge [TRAFODION-2998] PR 1483 Fix sleep exp code to compile on CentOS 7
diff --git a/.rat-excludes b/.rat-excludes
index 48d90fe..32c8646 100644
--- a/.rat-excludes
+++ b/.rat-excludes
@@ -44,9 +44,8 @@
 # SQL messages file
 SqlciErrors.txt
 # DCS config files
-backup-masters
 servers
-master
+masters
 # jquery
 jquery-ui.css*
 jquery-ui.js*
diff --git a/RAT_README b/RAT_README
index 66a448f..1799dd1 100644
--- a/RAT_README
+++ b/RAT_README
@@ -59,8 +59,7 @@
                  -> this file does not handle comments
 
 dcs/conf/servers 
-dcs/conf/backup-masters 
-dcs/conf/master
+dcs/conf/masters
                  :  Apache Trafodion DCS default configuration file
                  -> configuration file do not handle comments
 
diff --git a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
index ee1268d..0016510 100644
--- a/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
+++ b/core/conn/jdbcT4/src/main/java/org/trafodion/jdbc/t4/TrafT4PreparedStatement.java
@@ -1244,6 +1244,10 @@
 			case Types.CLOB:
 				setString(parameterIndex, x.toString());
 				break;
+			case Types.NCHAR:
+			case Types.NVARCHAR:
+			    setNString(parameterIndex, x.toString());
+			    break;
 			case Types.VARBINARY:
 			case Types.BINARY:
 			case Types.LONGVARBINARY:
@@ -2517,8 +2521,23 @@
 
 	public void setNString(int parameterIndex, String value)
 			throws SQLException {
-		// TODO Auto-generated method stub
+	    if (connection_.props_.t4Logger_.isLoggable(Level.FINE) == true) {
+            Object p[] = T4LoggingUtilities.makeParams(connection_.props_, parameterIndex, value);
+            connection_.props_.t4Logger_.logp(Level.FINE, "TrafT4PreparedStatement", "setNString", "", p);
+        }
 
+        validateSetInvocation(parameterIndex);
+        int dataType = inputDesc_[parameterIndex - 1].dataType_;
+
+        switch (dataType) {
+        case Types.CHAR:
+        case Types.VARCHAR:
+            addParamValue(parameterIndex, value);
+            break;
+        default:
+            throw TrafT4Messages.createSQLException(connection_.props_, connection_.getLocale(),
+                    "fetch_output_inconsistent", null);
+        }
 	}
 
 	public void setNCharacterStream(int parameterIndex, Reader value,
diff --git a/core/sqf/export/include/common/evl_sqlog_eventnum.h b/core/sqf/export/include/common/evl_sqlog_eventnum.h
index 96c3df9..0418c70 100644
--- a/core/sqf/export/include/common/evl_sqlog_eventnum.h
+++ b/core/sqf/export/include/common/evl_sqlog_eventnum.h
@@ -255,6 +255,12 @@
 #define MON_MONITOR_MAIN_9                  101020109
 #define MON_MONITOR_MAIN_10                 101020110
 #define MON_MONITOR_MAIN_11                 101020111
+#define MON_MONITOR_MAIN_12                 101020112
+#define MON_MONITOR_MAIN_13                 101020113
+#define MON_MONITOR_MAIN_14                 101020114
+#define MON_MONITOR_MAIN_15                 101020115
+#define MON_MONITOR_MAIN_16                 101020116
+#define MON_MONITOR_MAIN_17                 101020117
 #define MON_MONITOR_TMLEADER_1              101020201
 #define MON_MONITOR_TMLEADER_2              101020202
 #define MON_MONITOR_DEATH_HANDLER_1         101020301
@@ -895,6 +901,14 @@
 #define MON_ZCLIENT_ISZNODEEXPIRED_2        101371802
 #define MON_ZCLIENT_CHECKMYZNODE_1          101371901
 #define MON_ZCLIENT_CHECKMYZNODE_2          101371902
+#define MON_ZCLIENT_AMICONFIGUREDMASTER_1   101372101
+#define MON_ZCLIENT_AMICONFIGUREDMASTER_2   101372102
+#define MON_ZCLIENT_WAITFORANDRETURNMASTER  101372103
+#define MON_ZCLIENT_CREATEMASTERZNODE       101372104
+#define MON_ZCLIENT_WATCHMASTERNODEDELETE_1 101372105
+#define MON_ZCLIENT_WATCHMASTERNODEDELETE_2 101372106
+#define MON_ZCLIENT_WATCHMASTERNODEDELETE_3 101372107
+#define MON_ZCLIENT_CREATEORSETMASTERWATCH  101372108
 
 /* Module: zconfig.cxx = 38 */
 #define ZCONFIG_ZCONFIG_1                   101380101
diff --git a/core/sqf/monitor/linux/cluster.cxx b/core/sqf/monitor/linux/cluster.cxx
index c22e8ea..83ea923 100644
--- a/core/sqf/monitor/linux/cluster.cxx
+++ b/core/sqf/monitor/linux/cluster.cxx
@@ -67,6 +67,11 @@
 extern bool IAmIntegrating;
 extern bool IAmIntegrated;
 extern bool IsRealCluster;
+extern bool IsAgentMode;
+extern bool IsMaster;
+extern bool IsMPIChild;
+extern char MasterMonitorName[MAX_PROCESS_PATH];
+extern char Node_name[MPI_MAX_PROCESSOR_NAME];
 extern bool ZClientEnabled;
 extern char IntegratingMonitorPort[MPI_MAX_PORT_NAME];
 extern char MyCommPort[MPI_MAX_PORT_NAME];
@@ -289,11 +294,12 @@
     
     if (trace_settings & (TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
     {
-        trace_printf( "%s@%d - TmReady, nid=%d, tm count=%d, soft node down=%d\n"
+        trace_printf( "%s@%d - TmReady, nid=%d, tm count=%d, soft node down=%d, LNodesCount=%d\n"
                     , method_name, __LINE__
                     , nid
                     , tmReadyCount_
-                    , MyNode->IsSoftNodeDown() );
+                    , MyNode->IsSoftNodeDown()
+                    , MyNode->GetLNodesCount() );
     }
 
     MyNode->StartPStartDPersistentDTM( nid );
@@ -352,7 +358,133 @@
     TRACE_EXIT;
 }
 
-// Assigns a new TMLeader if given pnid is same as TmLeaderNid 
+// Assign leaders as required
+// Current leaders are TM Leader and Monitor Leader
+void CCluster::AssignLeaders( int pnid, bool checkProcess )
+{
+    const char method_name[] = "CCluster::AssignLeaders";
+    TRACE_ENTRY;
+    
+    AssignTmLeader ( pnid, checkProcess );
+    AssignMonitorLeader ( pnid );
+    
+    TRACE_EXIT;
+}
+
+// Assign montior lead in the case of failure
+void CCluster::AssignMonitorLeader( int pnid )
+{
+    const char method_name[] = "CCluster::AssignMonitorLeader";
+    TRACE_ENTRY;
+     
+    int i = 0;
+    int rc = 0;
+    
+    int monitorLeaderPNid = monitorLeaderPNid_;
+    CNode *node = NULL;
+
+    if (monitorLeaderPNid_ != pnid) 
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+        {
+            trace_printf( "%s@%d" " - (MasterMonitor) returning, pnid %d != monitorLead %d\n"
+                        , method_name, __LINE__, pnid, monitorLeaderPNid_ );
+        }
+        TRACE_EXIT;
+        return;
+    }
+
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+    {
+        trace_printf( "%s@%d" " - (MasterMonitor) Node "  "%d" " MonitorLeader failed!\n"
+                    , method_name, __LINE__, monitorLeaderPNid_ );
+    }
+
+    for (i=0; i<GetConfigPNodesMax(); i++)
+    {
+        monitorLeaderPNid++;
+
+        if (monitorLeaderPNid == GetConfigPNodesMax())
+        {
+            monitorLeaderPNid = 0; // restart with nid 0
+        }
+
+        if (monitorLeaderPNid == pnid)
+        {
+            continue; // this is the node that is going down, skip it
+        }
+
+        if (Node[monitorLeaderPNid] == NULL)
+        {
+            continue;
+        }
+
+        node = Node[monitorLeaderPNid];
+
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+        {
+            trace_printf( "%s@%d - Node pnid=%d (%s), phase=%s, isSoftNodeDown=%d\n"
+                        , method_name, __LINE__
+                        , node->GetPNid()
+                        , node->GetName()
+                        , NodePhaseString(node->GetPhase())
+                        , node->IsSoftNodeDown());
+        }
+
+        if ( node->IsSpareNode() ||
+             node->IsSoftNodeDown() ||
+             node->GetState() != State_Up ||
+             node->GetPhase() != Phase_Ready )
+        {
+            continue; // skip this node for any of the above reasons 
+        }  
+
+        monitorLeaderPNid_ = node->GetPNid();
+
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+        {
+            trace_printf("%s@%d" " - Node "  "%d" " is the new monitorLeaderPNid_." "\n", method_name, __LINE__, monitorLeaderPNid_);
+        }
+
+        if (ZClientEnabled)
+        {
+            rc = ZClient->CreateMasterZNode ( node->GetName() );  
+            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+            {
+                trace_printf("%s@%d" " (MasterMonitor) AssignMonitorLeader CreateMasterZNode with rc = %d\n", method_name, __LINE__, rc);
+            }
+            if ( (rc == ZOK) || (rc == ZNODEEXISTS) )
+            {
+                if ( IsAgentMode )
+                {
+                    rc = ZClient->WatchMasterNode( node->GetName( ) );
+                    if ( trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC) )
+                    {
+                        trace_printf( "%s@%d" " (MasterMonitor) AssignMonitorLeader WatchMasterNode with rc = %d\n", method_name, __LINE__, rc );
+                    }
+                }
+            }
+            else
+            {
+                 if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
+                 {
+                     trace_printf("%s@%d" " (MasterMonitor) AssignMonitorLeader  Unable to set create or set watch\n", method_name, __LINE__);
+                 }
+                 char    buf[MON_STRING_BUF_SIZE];
+                 snprintf( buf, sizeof(buf)
+                           , "[%s], Unable to set create or set watch on master node %s\n"
+                           , method_name, node->GetName() );
+                 mon_log_write(MON_ZCLIENT_CREATEORSETMASTERWATCH, SQ_LOG_ERR, buf);
+            }
+        }
+
+        break;
+    }
+    
+    TRACE_EXIT;
+}
+
+// Assigns a new TMLeader if given pnid is same as tmLeaderNid_ 
 // TmLeader is a logical node num. 
 // pnid has gone down, so if that node was previously the TM leader, a new one needs to be chosen.
 void CCluster::AssignTmLeader( int pnid, bool checkProcess )
@@ -364,15 +496,15 @@
     CNode *node = NULL;
     CProcess *process = NULL;
 
-    int TmLeaderPNid = LNode[TmLeaderNid]->GetNode()->GetPNid();
+    int TmLeaderPNid = LNode[tmLeaderNid_]->GetNode()->GetPNid();
 
     if (TmLeaderPNid != pnid) 
     {
-        node = LNode[TmLeaderNid]->GetNode();
+        node = LNode[tmLeaderNid_]->GetNode();
 
         if (checkProcess)
         {
-            process = LNode[TmLeaderNid]->GetProcessLByType( ProcessType_DTM );
+            process = LNode[tmLeaderNid_]->GetProcessLByType( ProcessType_DTM );
             if (process)
             {
                 if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
@@ -413,7 +545,7 @@
     if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
     {
         trace_printf( "%s@%d" " - Node "  "%d" " TmLeader failed! (checkProcess=%d)\n"
-                    , method_name, __LINE__, TmLeaderNid, checkProcess );
+                    , method_name, __LINE__, tmLeaderNid_, checkProcess );
     }
 
     for (i=0; i<GetConfigPNodesMax(); i++)
@@ -455,11 +587,11 @@
             continue; // skip this node for any of the above reasons 
         }  
 
-        TmLeaderNid = node->GetFirstLNode()->GetNid();
+        tmLeaderNid_ = node->GetFirstLNode()->GetNid();
 
         if (checkProcess)
         {
-            process = LNode[TmLeaderNid]->GetProcessLByType( ProcessType_DTM );
+            process = LNode[tmLeaderNid_]->GetProcessLByType( ProcessType_DTM );
             if (!process)
             {
                 continue; // skip this node no DTM process exists
@@ -468,7 +600,7 @@
 
         if (trace_settings & (TRACE_INIT | TRACE_RECOVERY | TRACE_REQUEST | TRACE_SYNC | TRACE_TMSYNC))
         {
-            trace_printf("%s@%d" " - Node "  "%d" " is the new TmLeader." "\n", method_name, __LINE__, TmLeaderNid);
+            trace_printf("%s@%d" " - Node "  "%d" " is the new TmLeader." "\n", method_name, __LINE__, tmLeaderNid_);
         }
 
         break;
@@ -487,13 +619,13 @@
       ,epollFD_(-1),
       Node (NULL),
       LNode (NULL),
-      TmSyncPNid (-1),
-      CurNodes (0),
-      CurProcs (0),
+      tmSyncPNid_ (-1),
+      currentNodes_ (0),
       configPNodesCount_ (-1),
       configPNodesMax_ (-1),
-      NodeMap (NULL),
-      TmLeaderNid (-1),
+      nodeMap_ (NULL),
+      tmLeaderNid_ (-1),
+      monitorLeaderPNid_ (-1),
       tmReadyCount_(0),
       minRecvCount_(4096),
       recvBuffer_(NULL),
@@ -529,6 +661,7 @@
     const char method_name[] = "CCluster::CCluster";
     TRACE_ENTRY;
 
+    configMaster_ = -1;
     MPI_Comm_set_errhandler(MPI_COMM_WORLD,MPI_ERRORS_RETURN);
 
     char *env = getenv("SQ_MON_CHECK_SEQNUM");
@@ -548,6 +681,9 @@
     CClusterConfig *clusterConfig = Nodes->GetClusterConfig();
     configPNodesMax_ = clusterConfig->GetPNodesConfigMax();
 
+    // get master from CClusterConfig
+    configMaster_ = clusterConfig->GetConfigMaster();
+
     // Compute minimum "sync cycles" per second.   The minimum is 1/10
     // the expected number, assuming "next_test_delay" cycles per second (where
     // next_test_delay is in microseconds).
@@ -640,14 +776,29 @@
     const char method_name[] = "CCluster::~CCluster";
     TRACE_ENTRY;
 
+    if (epollFD_ != -1)
+    {
+        close( epollFD_ );
+    }
+
+    if (commSock_ != -1)
+    {
+        close( commSock_ );
+    }
+
+    if (syncSock_ != -1)
+    {
+        close( syncSock_ );
+    }
+
     delete [] comms_;
     delete [] otherMonRank_;
     delete [] socks_;
     delete [] sockPorts_;
-    if (NodeMap)
+    if (nodeMap_)
     {
-        delete [] NodeMap;
-        NodeMap = NULL;
+        delete [] nodeMap_;
+        nodeMap_ = NULL;
     }
 
     delete [] recvBuffer2_;
@@ -677,26 +828,26 @@
 
     unsigned long long seqNum = 0;
 
-    for (int i = 0; i < GetConfigPNodesMax(); i++)
+    for (int i = 0; i < GetConfigPNodesCount(); i++)
     {
         if (trace_settings & TRACE_RECOVERY)
         {
-            trace_printf("%s@%d nodestate[%d].seq_num=%lld, seqNum=%lld\n", method_name, __LINE__, i, nodestate[i].seq_num, seqNum );
+            trace_printf("%s@%d nodestate[%d].seq_num=%lld, seqNum=%lld\n", method_name, __LINE__, i, nodestate[indexToPnid_[i]].seq_num, seqNum );
         }
-        if (nodestate[i].seq_num > 1)
+        if (nodestate[indexToPnid_[i]].seq_num > 1)
         {
             if (seqNum == 0) 
             {
-                seqNum = nodestate[i].seq_num;
+                seqNum = nodestate[indexToPnid_[i]].seq_num;
             }
             else
             {
-                assert(nodestate[i].seq_num == seqNum);
+                assert(nodestate[indexToPnid_[i]].seq_num == seqNum);
             }
         }
         if (trace_settings & TRACE_RECOVERY)
         {
-            trace_printf("%s@%d nodestate[%d].seq_num=%lld, seqNum=%lld\n", method_name, __LINE__, i, nodestate[i].seq_num, seqNum );
+            trace_printf("%s@%d nodestate[%d].seq_num=%lld, seqNum=%lld\n", method_name, __LINE__, i, nodestate[indexToPnid_[i]].seq_num, seqNum );
         }
     }
 
@@ -857,6 +1008,7 @@
             if ( ZClientEnabled )
             {
                 ZClient->WatchNodeDelete( node->GetName() );
+                ZClient->WatchNodeMasterDelete( node->GetName() );
             }
         }
     }
@@ -875,7 +1027,7 @@
     if ( Emulate_Down )
     {
         IAmIntegrated = false;
-        AssignTmLeader(pnid, false);
+        AssignLeaders(pnid, false);
     }
 
     TRACE_EXIT;
@@ -976,7 +1128,7 @@
     }
 
     IAmIntegrated = false;
-    AssignTmLeader(pnid, false);
+    AssignLeaders(pnid, false);
 
     TRACE_EXIT;
 }
@@ -1237,8 +1389,8 @@
     TRACE_ENTRY;
 
     if (trace_settings & (TRACE_REQUEST | TRACE_INIT | TRACE_RECOVERY))
-       trace_printf( "%s@%d - pnid=%d, name=%s\n"
-                   , method_name, __LINE__, pnid, node_name );
+       trace_printf( "%s@%d - pnid=%d, name=%s (MyPNID = %d)\n"
+                   , method_name, __LINE__, pnid, node_name, MyPNID );
 
     if ( pnid == -1 )
     {
@@ -2252,7 +2404,7 @@
         {
         case SyncType_TmData:
             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-                trace_printf("%s@%d - TMSYNC(TmData) on Node %s (pnid=%d)\n", method_name, __LINE__, Node[pnid]->GetName(), pnid);
+                trace_printf("%s@%d - TMSYNC(TmData) on Node %s (pnid=%d), (phase=%d)\n", method_name, __LINE__, Node[pnid]->GetName(), pnid, MyNode->GetPhase());
             if ( ! MyNode->IsSpareNode() && MyNode->GetPhase() != Phase_Ready )
             {
                 MyNode->CheckActivationPhase();
@@ -2264,7 +2416,7 @@
                     // Begin a Slave Sync Start
                     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                         trace_printf("%s@%d - Slave Sync Start on Node %s (pnid=%d)\n", method_name, __LINE__, Node[pnid]->GetName(), pnid);
-                    TmSyncPNid = pnid;
+                    tmSyncPNid_ = pnid;
                     Node[pnid]->SetTmSyncState( recv_msg->u.sync.state );
                     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                     {
@@ -2278,12 +2430,12 @@
                         trace_printf("%s@%d - Sync State Collision! Node %s (pnid=%d) TmSyncState=(%d)(%s)\n", method_name, __LINE__, MyNode->GetName(), MyPNID, MyNode->GetTmSyncState(), SyncStateString( MyNode->GetTmSyncState()) );
                     if ( MyNode->GetTmSyncState() == SyncState_Continue )
                     {
-                        if ( pnid > TmSyncPNid ) 
+                        if ( pnid > tmSyncPNid_ ) 
                             // highest node id will continue
                         {
                             // They take priority ... we abort
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-                                trace_printf("%s@%d - Aborting Slave Sync Start on node %s (pnid=%d)\n", method_name, __LINE__, Node[Monitor->TmSyncPNid]->GetName(), Monitor->TmSyncPNid);
+                                trace_printf("%s@%d - Aborting Slave Sync Start on node %s (pnid=%d)\n", method_name, __LINE__, Node[Monitor->tmSyncPNid_]->GetName(), Monitor->tmSyncPNid_);
                             MyNode->SetTmSyncState( SyncState_Null );
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                                 trace_printf("%s@%d - Node %s (pnid=%d) TmSyncState updated (%d)(%s)\n", method_name, __LINE__, MyNode->GetName(), MyPNID, MyNode->GetTmSyncState(), SyncStateString( MyNode->GetTmSyncState() ) );
@@ -2291,7 +2443,7 @@
                             // Continue with other node's Slave TmSync Start request
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                                 trace_printf("%s@%d - Slave Sync Start on node %s (pnid=%d)\n", method_name, __LINE__, Node[pnid]->GetName(), pnid);
-                            TmSyncPNid = pnid;
+                            tmSyncPNid_ = pnid;
                             Node[pnid]->SetTmSyncState( recv_msg->u.sync.state );
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                             {
@@ -2315,7 +2467,7 @@
                             // Continue with other node's Slave TmSync Start request
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                                 trace_printf("%s@%d - Slave Sync Start on node %s (pnid=%d)\n", method_name, __LINE__, Node[pnid]->GetName(), pnid);
-                            TmSyncPNid = pnid;
+                            tmSyncPNid_ = pnid;
                             Node[pnid]->SetTmSyncState( recv_msg->u.sync.state );
                             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                             {
@@ -2631,9 +2783,9 @@
         case SyncType_TmData:
             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
                 trace_printf("%s@%d    - TMSYNC(TmData) on Node %s (pnid=%d)\n", method_name, __LINE__, Node[MyPNID]->GetName(), MyPNID);
-            TmSyncPNid = MyPNID;
+            tmSyncPNid_ = MyPNID;
             if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-                trace_printf("%s@%d    - Sync communicated, TmSyncPNid=%d\n", method_name, __LINE__, TmSyncPNid);
+                trace_printf("%s@%d    - Sync communicated, tmSyncPNid_=%d\n", method_name, __LINE__, tmSyncPNid_);
             if ( ! MyNode->IsSpareNode() && MyNode->GetPhase() != Phase_Ready )
             {
                 MyNode->CheckActivationPhase();
@@ -2822,7 +2974,7 @@
     int rankToPnid[worldSize];
     CClusterConfig *clusterConfig = Nodes->GetClusterConfig();
     
-    CurNodes = worldSize;
+    currentNodes_ = worldSize;
 
     if ( IsRealCluster )
     {
@@ -2871,16 +3023,49 @@
         InitServerSock();
     }
 
-    // The new monitor in a real cluster initializes all 
-    // existing nodes to a down state.
-    // ReIntegrate() will set the state to up when communication is established.
-    if ( IAmIntegrating )
+    if (trace_settings & TRACE_INIT)
     {
+        trace_printf( "%s@%d (MasterMonitor) IAmIntegrating=%d,"
+                      " IsAgentMode=%d, IsMaster=%d,"
+                      " MasterMonitorName=%s, Node_name=%s\n"
+                    , method_name, __LINE__
+                    , IAmIntegrating
+                    , IsAgentMode, IsMaster, MasterMonitorName, Node_name );
+    }
+
+    if (IAmIntegrating || IsAgentMode)
+    {
+        int TmLeaderPNid = -1;
+        if (IsMaster)
+        {
+            tmLeaderNid_ = Nodes->GetFirstNid();
+            TmLeaderPNid = LNode[tmLeaderNid_]->GetNode()->GetPNid();
+        }
+        // Monitors processes in AGENT mode in a real cluster initialize all
+        // remote nodes to a down state. The master monitor and the joining
+        // monitors will set the joining node state to up as part of the node
+        // re-integration processing as monitor processes join the cluster
+        // through the master.
         for (int i=0; i < clusterConfig->GetPNodesCount(); i++)
         {
-            if ( Node[indexToPnid_[i]] && Node[indexToPnid_[i]]->GetPNid() != MyPNID )
+            if (Node[indexToPnid_[i]])
             {
-                Node[indexToPnid_[i]]->SetState( State_Down );
+                if (Node[indexToPnid_[i]]->GetPNid() == MyPNID)
+                { // Set bit indicating node is up
+                    upNodes_.upNodes[indexToPnid_[i]/MAX_NODE_BITMASK] |= 
+                        (1ull << (indexToPnid_[i]%MAX_NODE_BITMASK));
+                }
+                else
+                { // Set node state to down   
+                    Node[indexToPnid_[i]]->SetState( State_Down );
+                    if (IsMaster)
+                    {
+                        if (TmLeaderPNid == indexToPnid_[i]) 
+                        {
+                            AssignTmLeader(indexToPnid_[i], false);
+                        }
+                    }
+                }
             }
         }
     }
@@ -2994,8 +3179,8 @@
             delete [] commPortNums;
             delete [] syncPortNums;
 
-            TmLeaderNid = Nodes->GetFirstNid();
-            int TmLeaderPNid = LNode[TmLeaderNid]->GetNode()->GetPNid();
+            tmLeaderNid_ = Nodes->GetFirstNid();
+            int TmLeaderPNid = LNode[tmLeaderNid_]->GetNode()->GetPNid();
 
             // Any nodes not in the initial MPI_COMM_WORLD are down.
             for (int i=0; i<GetConfigPNodesCount(); ++i)
@@ -3035,7 +3220,7 @@
         }
         else
         {
-            TmLeaderNid = 0;
+            tmLeaderNid_ = 0;
         }
 
         // Initialize communicators for point-to-point communications
@@ -3060,6 +3245,23 @@
         if (nodeNames) delete [] nodeNames;
     }
 
+    if ( CommType == CommType_Sockets )
+    {
+        // Allgather() cluster sockets are established as remote
+        // monitor processes join the cluster
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            for ( int i =0; i < clusterConfig->GetPNodesCount() ; i++ )
+            {
+                trace_printf( "%s@%d %s (%d), state=%s, socks_[%d]=%d\n"
+                            , method_name, __LINE__
+                            , Node[indexToPnid_[i]]->GetName()
+                            , Node[indexToPnid_[i]]->GetPNid()
+                            , StateString(Node[indexToPnid_[i]]->GetState())
+                            , indexToPnid_[i], socks_[indexToPnid_[i]]);
+            }
+        }
+    }
     if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
     {
         for ( int i =0; i < MAX_NODE_MASKS ; i++ )
@@ -3072,7 +3274,10 @@
 
     // Kill the MPICH hydra_pmi_proxy to prevent it from killing all
     // processes in cluster when mpirun or monitor processes are killed
-    kill( getppid(), SIGKILL );
+    if (!IsAgentMode  || (IsAgentMode && IsMPIChild))
+    {
+        kill( getppid(), SIGKILL );
+    }
 
     TRACE_EXIT;
 }
@@ -3623,7 +3828,7 @@
         {   // Already connected to creator monitor
             comms_[i] = intraCommCreatorMon;
             otherMonRank_[i] = 0;
-            ++CurNodes;
+            ++currentNodes_;
 
             // Set bit indicating node is up
             upNodes_.upNodes[i/MAX_NODE_BITMASK] |= (1ull << (i%MAX_NODE_BITMASK));
@@ -3717,7 +3922,7 @@
 
             comms_[i] = intraComm;
             otherMonRank_[i] = 0;
-            ++CurNodes;
+            ++currentNodes_;
             Node[i]->SetSyncPort( nodeInfo[i].syncPort );
             Node[i]->SetState( State_Up );
 
@@ -3807,10 +4012,31 @@
     TEST_POINT( TP010_NODE_UP );
 
     // Connect with my creator monitor
-    joinSock_ = Monitor->Connect( IntegratingMonitorPort );
-    if ( joinSock_ < 0 )
+    bool lv_done = false;
+    bool lv_did_not_connect_in_first_attempt = false;
+    while ( ! lv_done )
     {
-        HandleReintegrateError( joinSock_, Reintegrate_Err1, -1, NULL, true );
+        joinSock_ = Monitor->Connect( IntegratingMonitorPort );
+        if ( joinSock_ < 0 )
+        {
+            if ( IsAgentMode )
+            {
+                lv_did_not_connect_in_first_attempt = true;
+                sleep( 15 );
+            }
+            else
+            {
+                HandleReintegrateError( joinSock_, Reintegrate_Err1, -1, NULL, true );
+            }
+        }
+        else
+        {
+            if ( lv_did_not_connect_in_first_attempt )
+            {
+                sleep( 10 );
+            }
+            lv_done = true;
+        }
     }
 
     mem_log_write(CMonLog::MON_REINTEGRATE_4, MyPNID);
@@ -3937,7 +4163,7 @@
             }
 
             otherMonRank_[nodeInfo[i].pnid] = 0;
-            ++CurNodes;
+            ++currentNodes_;
 
             // Store port numbers for the node
             strncpy(commPort, nodeInfo[i].commPort, MPI_MAX_PORT_NAME);
@@ -4084,7 +4310,7 @@
             }
 
             otherMonRank_[nodeInfo[i].pnid] = 0;
-            ++CurNodes;
+            ++currentNodes_;
 
             // Store port numbers for the node
             strncpy(commPort, nodeInfo[i].commPort, MPI_MAX_PORT_NAME);
@@ -4166,8 +4392,8 @@
     {
         for (int i=0; i<pnodeCount; i++)
         {
-            if (Node[nodeInfo[i].pnid] == NULL) continue;
             if (nodeInfo[i].pnid == -1) continue;
+            if (Node[nodeInfo[i].pnid] == NULL) continue;
             trace_printf( "%s@%d - Node info for pnid=%d (%s)\n"
                           "        Node[%d] commPort=%s\n"
                           "        Node[%d] syncPort=%s\n"
@@ -4181,6 +4407,7 @@
         }
         for ( int i =0; i < pnodeCount; i++ )
         {
+            if (nodeInfo[i].pnid == -1) continue;
             trace_printf( "%s@%d socks_[%d]=%d, sockPorts_[%d]=%d\n"
                         , method_name, __LINE__
                         , nodeInfo[i].pnid, socks_[nodeInfo[i].pnid]
@@ -4280,8 +4507,6 @@
     TRACE_ENTRY;
 
     integratingPNid_ = pnid;
-    // Indicate to the commAcceptor thread to stop accepting connections
-    CommAccept.stopAccepting();
 
     TRACE_EXIT;
 }
@@ -4358,7 +4583,7 @@
                     close( socks_[pnid] );
                     socks_[pnid] = -1;
                 }
-                --CurNodes;
+                --currentNodes_;
             }
 
             if (trace_settings & TRACE_RECOVERY)
@@ -4370,7 +4595,7 @@
 
             comms_[it->pnid] = it->comm;
             otherMonRank_[it->pnid] = it->otherRank;
-            ++CurNodes;
+            ++currentNodes_;
             // Set bit indicating node is up
             upNodes_.upNodes[it->pnid/MAX_NODE_BITMASK] |= (1ull << (it->pnid%MAX_NODE_BITMASK));
 
@@ -4461,14 +4686,14 @@
                 shutdown( socks_[pnid], SHUT_RDWR);
                 close( socks_[pnid] );
                 socks_[pnid] = -1;
-                --CurNodes;
+                --currentNodes_;
             }
 
             CNode *node= Nodes->GetNode( it->pnid );
             socks_[it->pnid] = it->socket;
             sockPorts_[it->pnid] = node->GetSyncSocketPort();
             otherMonRank_[it->pnid] = it->otherRank;
-            ++CurNodes;
+            ++currentNodes_;
 
             if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
             {
@@ -5815,7 +6040,7 @@
             // Evaluate each active (up) node in the cluster
             int pnodesCount = 0;
             for (int index = 0;
-                 index < GetConfigPNodesMax() && pnodesCount < CurNodes;
+                 index < GetConfigPNodesMax() && pnodesCount < currentNodes_;
                  ++index)
             {
                 if ( nodestate[index].seq_num != 0 )
@@ -5879,11 +6104,11 @@
 
             if (trace_settings & (TRACE_SYNC | TRACE_RECOVERY | TRACE_INIT))
             {
-                trace_printf("%s@%d concurringNodes=%d, CurNodes=%d\n",
-                             method_name, __LINE__, concurringNodes, CurNodes);
+                trace_printf("%s@%d concurringNodes=%d, currentNodes_=%d\n",
+                             method_name, __LINE__, concurringNodes, currentNodes_);
             }
 
-            if (concurringNodes == CurNodes)
+            if (concurringNodes == currentNodes_)
             {   // General agreement that node is down, proceed to mark it down
 
                 CNode *downNode = Nodes->GetNode( it->exitedPnid );
@@ -5924,7 +6149,7 @@
                           "%d but only %d of %d nodes also lost the "
                           "connection.  See up: %s.  See down: %s.  So node "
                           "%d is going down (at seq #%lld).\n", method_name,
-                          it->exitedPnid, concurringNodes, CurNodes,
+                          it->exitedPnid, concurringNodes, currentNodes_,
                           setSeesUp.c_str(), setSeesDown.c_str(),
                           MyPNID, seqNum_ );
                 mon_log_write(MON_CLUSTER_VALIDATE_STATE_2, SQ_LOG_ERR, buf);
@@ -5973,7 +6198,7 @@
 
         int pnodesCount2 = 0;
         for (int remIndex = 0;
-             remIndex < GetConfigPNodesMax() && pnodesCount2 < CurNodes;
+             remIndex < GetConfigPNodesMax() && pnodesCount2 < currentNodes_;
              ++remIndex)
         {
             bool someExited = false;
@@ -6023,7 +6248,7 @@
             {  // This remote node sees node pnid as up
                 int pnodesCount3 = 0;
                 for (int exitedPNid = 0;
-                     exitedPNid < GetConfigPNodesMax() && pnodesCount3 < CurNodes;
+                     exitedPNid < GetConfigPNodesMax() && pnodesCount3 < currentNodes_;
                      ++exitedPNid)
                 {
                     CNode *exitedNode = Nodes->GetNode( /*indexToPnid_[remIndex]*/exitedPNid );
@@ -6180,8 +6405,8 @@
     if (trace_settings & TRACE_INIT)
         trace_printf("%s@%d - Added down node to list, pnid=%d, name=(%s)\n", method_name, __LINE__, downNode->GetPNid(), downNode->GetName());
 
-    // assign new TmLeader if TMLeader node is dead.
-    AssignTmLeader(pnid, false);
+    // assign new leaders if needed
+    AssignLeaders(pnid, false);
 
     // Build available list of spare nodes
     CNode *spareNode;
@@ -6441,7 +6666,7 @@
                     abort();
             }
             Node[index]->SetState( State_Down );
-            --CurNodes;
+            --currentNodes_;
             // Clear bit in set of "up nodes"
             upNodes_.upNodes[index/MAX_NODE_BITMASK] &= ~(1ull << (index%MAX_NODE_BITMASK));
         }
@@ -6513,7 +6738,7 @@
                         // Programmer bonehead!
                         abort();
                 }
-                --CurNodes;
+                --currentNodes_;
 
                 // Clear bit in set of "up nodes"
                 upNodes_.upNodes[index/MAX_NODE_BITMASK] &= ~(1ull << (index%MAX_NODE_BITMASK));
@@ -6843,14 +7068,14 @@
 
     if (trace_settings & TRACE_SYNC_DETAIL)
         trace_printf("%s@%d - Node %d shutdown level=%d, state=%s.  Process "
-                     "count=%d, internal state=%d, CurNodes=%d, "
+                     "count=%d, internal state=%d, currentNodes_=%d, "
                      "local process count=%d\n",
                      method_name, __LINE__, MyNode->GetPNid(),
                      MyNode->GetShutdownLevel(),
                      StateString(MyNode->GetState()),
                      Nodes->ProcessCount(),
                      MyNode->getInternalState(),
-                     CurNodes, MyNode->GetNumProcs());
+                     currentNodes_, MyNode->GetNumProcs());
 
     // Check if we are also done
     if (( MyNode->GetState() != State_Down    ) &&
@@ -6869,7 +7094,7 @@
                 return false;
             }
             else if ( (Nodes->ProcessCount() <=
-                      (CurNodes*MAX_PRIMITIVES))        // only WDGs alive
+                      (currentNodes_*MAX_PRIMITIVES))        // only WDGs alive
                       && !MyNode->isInQuiesceState()    // post-quiescing will
                                                         // expire WDG (cluster)
                       && !waitForWatchdogExit_ )        // WDG not yet exiting
@@ -8275,6 +8500,19 @@
         return ( -1 );
     }
 
+    int    reuse = 1;   // sockopt reuse option
+    if ( setsockopt( sock, SOL_SOCKET, SO_REUSEADDR, (char *) &reuse, sizeof(int) ) )
+    {
+        char la_buf[MON_STRING_BUF_SIZE];
+        int err = errno;
+        sprintf( la_buf, "[%s], setsockopt(SO_REUSEADDR) failed! errno=%d (%s)\n"
+               , method_name, err, strerror( err ));
+        mon_log_write(MON_CLUSTER_MKSRVSOCK_4, SQ_LOG_ERR, la_buf); 
+        close( sock );
+        return ( -1 );
+    }
+
+
     // Bind socket.
     size = sizeof(sockinfo);
     memset( (char *) &sockinfo, 0, size );
diff --git a/core/sqf/monitor/linux/cluster.h b/core/sqf/monitor/linux/cluster.h
index 58d3540..ff49e56 100644
--- a/core/sqf/monitor/linux/cluster.h
+++ b/core/sqf/monitor/linux/cluster.h
@@ -113,7 +113,9 @@
 #ifndef USE_BARRIER
     void ArmWakeUpSignal (void);
 #endif
+    void AssignLeaders( int pnid, bool checkProcess );
     void AssignTmLeader( int pnid, bool checkProcess );
+    void AssignMonitorLeader( int pnid );
     void stats();
     void CompleteSyncCycle()
         { syncCycle_.lock(); syncCycle_.wait(); syncCycle_.unlock(); }
@@ -122,10 +124,12 @@
 
     void DoDeviceReq(char * ldevname);
     void ExpediteDown( void );
-    inline int  GetTmLeader( void ) { return( TmLeaderNid); }
-    inline void SetTmLeader( int tmLeaderNid ) { TmLeaderNid = tmLeaderNid; } 
+    inline int  GetTmLeader( void ) { return( tmLeaderNid_ ); }
+    inline void SetTmLeader( int tmLeaderNid ) { tmLeaderNid_ = tmLeaderNid; } 
+    inline int  GetMonitorLeader( void ) { return( monitorLeaderPNid_); }
+    inline void SetMonitorLeader( int monitorLeaderPNid ) { monitorLeaderPNid_ = monitorLeaderPNid; } 
     int  GetDownedNid( void );
-    inline int GetTmSyncPNid( void ) { return( TmSyncPNid ); } // Physical Node ID of current TmSync operations master
+    inline int GetTmSyncPNid( void ) { return( tmSyncPNid_ ); } // Physical Node ID of current TmSync operations master
     void InitClusterComm(int worldSize, int myRank, int *rankToPnid);
     void addNewComm(int nid, int otherRank, MPI_Comm comm);
     void addNewSock(int nid, int otherRank, int sockFd );
@@ -177,6 +181,7 @@
     bool ReinitializeConfigCluster( bool nodeAdded, int pnid );
 
     int incrGetVerifierNum();
+    int getConfigMaster() { return configMaster_; }
 
     enum { SYNC_MAX_RESPONSIVE = 1 }; // Max seconds before sync thread is "stuck"
 
@@ -201,10 +206,11 @@
     int            syncSock_;
     int            epollFD_;
     int           *indexToPnid_;
+    int            configMaster_;
 
     CNode  **Node;           // array of nodes
     CLNode **LNode;          // array of logical nodes
-    int      TmSyncPNid;     // Physical Node ID of current TmSync operations master
+    int      tmSyncPNid_;    // Physical Node ID of current TmSync operations master
 
 
     void AddTmsyncMsg( struct sync_buffer_def *tmSyncBuffer
@@ -223,14 +229,14 @@
     CLock syncCycle_;
 
 private:
-    int     CurNodes;       // Current # of nodes in the cluster
-    int     CurProcs;       // Current # if processes alive in MPI_COMM_WORLD
+    int     currentNodes_;      // Current # of nodes in the cluster
     int     configPNodesCount_; // # of physical nodes configured
     int     configPNodesMax_;   // max # of physical nodes that can be configured
-    int    *NodeMap;        // Mapping of Node ranks to COMM_WORLD ranks
-    int     TmLeaderNid;    // Nid of currently assigned TM Leader node
-    int     tmReadyCount_;  // # of DTM processes ready for transactions
-    size_t  minRecvCount_;  // minimum size of receive buffer for allgather
+    int    *nodeMap_;           // Mapping of Node ranks to COMM_WORLD ranks
+    int     tmLeaderNid_;       // Nid of currently assigned TM Leader node
+    int     monitorLeaderPNid_; // PNid of currently assigned Monitor leader node
+    int     tmReadyCount_;      // # of DTM processes ready for transactions
+    size_t  minRecvCount_;      // minimum size of receive buffer for allgather
 
     // Pointer to array of "sync_buffer_def" structures.  Used by
     // ShareWithPeers in "Allgather" operation.
diff --git a/core/sqf/monitor/linux/commaccept.cxx b/core/sqf/monitor/linux/commaccept.cxx
index 21b30a6..11c12d7 100644
--- a/core/sqf/monitor/linux/commaccept.cxx
+++ b/core/sqf/monitor/linux/commaccept.cxx
@@ -556,6 +556,25 @@
 
     node= Nodes->GetNode( nodeId.nodeName );
 
+    if ( node == NULL )
+    {
+        close( joinFd );
+
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], got connection from unknown "
+                  "node %d (%s). Ignoring it.\n"
+                , method_name
+                , nodeId.pnid
+                , nodeId.nodeName);
+        mon_log_write(MON_COMMACCEPT_9, SQ_LOG_ERR, buf);
+
+        // Requests is complete, begin accepting connections again
+        CommAccept.startAccepting();
+
+        return;
+    }
+
     if ( nodeId.ping )
     {
         // Reply with my node info
@@ -595,6 +614,10 @@
                     , method_name, node?node->GetName():"", ErrorMsg(rc));
             mon_log_write(MON_COMMACCEPT_19, SQ_LOG_ERR, buf);    
         }
+
+        // Requests is complete, begin accepting connections again
+        CommAccept.startAccepting();
+
         return;
     }
     
@@ -607,53 +630,6 @@
                           , nodeId.creatorShellVerifier );
     }
     
-    int pnid = -1;
-    if ( node != NULL )
-    {   // Store port numbers for the node
-        char commPort[MPI_MAX_PORT_NAME];
-        char syncPort[MPI_MAX_PORT_NAME];
-        strncpy(commPort, nodeId.commPort, MPI_MAX_PORT_NAME);
-        strncpy(syncPort, nodeId.syncPort, MPI_MAX_PORT_NAME);
-        char *pch1;
-        char *pch2;
-        pnid = nodeId.pnid;
-
-        node->SetCommPort( commPort );
-        pch1 = strtok (commPort,":");
-        pch1 = strtok (NULL,":");
-        node->SetCommSocketPort( atoi(pch1) );
-
-        node->SetSyncPort( syncPort );
-        pch2 = strtok (syncPort,":");
-        pch2 = strtok (NULL,":");
-        node->SetSyncSocketPort( atoi(pch2) );
-
-        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
-        {
-            trace_printf( "%s@%d - Setting node %d (%s), commPort=%s(%d), syncPort=%s(%d)\n"
-                        , method_name, __LINE__
-                        , node->GetPNid()
-                        , node->GetName()
-                        , pch1, atoi(pch1)
-                        , pch2, atoi(pch2) );
-        }
-    }
-    else
-    {
-        close( joinFd );
-
-        char buf[MON_STRING_BUF_SIZE];
-        snprintf( buf, sizeof(buf)
-                , "[%s], got connection from unknown "
-                  "node %d (%s). Ignoring it.\n"
-                , method_name
-                , nodeId.pnid
-                , nodeId.nodeName);
-        mon_log_write(MON_COMMACCEPT_9, SQ_LOG_ERR, buf);
-
-        return;
-    }
-
     // Sanity check, re-integrating node must be down
     if ( node->GetState() != State_Down )
     {
@@ -672,9 +648,43 @@
                 , StateString(node->GetState()));
         mon_log_write(MON_COMMACCEPT_10, SQ_LOG_ERR, buf);
 
+        // Requests is complete, begin accepting connections again
+        CommAccept.startAccepting();
+
         return;
     }
 
+    int pnid = -1;
+
+    // Store port numbers for the node
+    char commPort[MPI_MAX_PORT_NAME];
+    char syncPort[MPI_MAX_PORT_NAME];
+    strncpy(commPort, nodeId.commPort, MPI_MAX_PORT_NAME);
+    strncpy(syncPort, nodeId.syncPort, MPI_MAX_PORT_NAME);
+    char *pch1;
+    char *pch2;
+    pnid = nodeId.pnid;
+
+    node->SetCommPort( commPort );
+    pch1 = strtok (commPort,":");
+    pch1 = strtok (NULL,":");
+    node->SetCommSocketPort( atoi(pch1) );
+
+    node->SetSyncPort( syncPort );
+    pch2 = strtok (syncPort,":");
+    pch2 = strtok (NULL,":");
+    node->SetSyncSocketPort( atoi(pch2) );
+
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+    {
+        trace_printf( "%s@%d - Setting node %d (%s), commPort=%s(%d), syncPort=%s(%d)\n"
+                    , method_name, __LINE__
+                    , node->GetPNid()
+                    , node->GetName()
+                    , pch1, atoi(pch1)
+                    , pch2, atoi(pch2) );
+    }
+
     mem_log_write(CMonLog::MON_CONNTONEWMON_4, pnid);
 
     if ( MyNode->IsCreator() )
@@ -916,6 +926,8 @@
             interComm = MPI_COMM_NULL;
             rc = MPI_Comm_accept( MyCommPort, MPI_INFO_NULL, 0, MPI_COMM_SELF,
                                   &interComm );
+            // Stop accepting connections until this request completes
+            CommAccept.stopAccepting();
         }
         else
         {
@@ -988,6 +1000,8 @@
     
             mem_log_write(CMonLog::MON_CONNTONEWMON_1);
             joinFd = Monitor->AcceptCommSock();
+            // Stop accepting connections until this request completes
+            CommAccept.stopAccepting();
         }
         else
         {
diff --git a/core/sqf/monitor/linux/mlio.cxx b/core/sqf/monitor/linux/mlio.cxx
index 61803f8..b62cd46 100644
--- a/core/sqf/monitor/linux/mlio.cxx
+++ b/core/sqf/monitor/linux/mlio.cxx
@@ -1261,7 +1261,12 @@
   if (cmid == -1)
   {
       if (trace_settings & TRACE_INIT)
-         trace_printf("%s@%d" " failed shmget("  "%d" "), errno="  "%d" "\n", method_name, __LINE__, (shsize), errno);
+      {
+          int err = errno;
+          trace_printf( "%s@%d" " failed shmget(%d), errno=%d (%s)\n"
+                      , method_name, __LINE__
+                      , (shsize), err, strerror(err) );
+      }
       if ( errno == EEXIST)
       {
           // and try getting it with a smaller size
diff --git a/core/sqf/monitor/linux/monitor.cxx b/core/sqf/monitor/linux/monitor.cxx
index c5dd28b..124b1ff 100755
--- a/core/sqf/monitor/linux/monitor.cxx
+++ b/core/sqf/monitor/linux/monitor.cxx
@@ -53,6 +53,7 @@
 #include "tmsync.h"
 #include "cluster.h"
 #include "monitor.h"
+#include "props.h"
 
 #ifdef DMALLOC
 #include "dm.h"
@@ -99,12 +100,16 @@
 char Node_name[MPI_MAX_PROCESSOR_NAME] = {'\0'};
 sigset_t SigSet;
 bool Emulate_Down = false;
-long next_test_delay = 10000; // in usec.
-
+long next_test_delay = 100000; // in usec. (default 100 msec)
+CClusterConfig *ClusterConfig = NULL;
 bool IAmIntegrating = false;
 bool IAmIntegrated = false;
 char IntegratingMonitorPort[MPI_MAX_PORT_NAME] = {'\0'};
 bool IsRealCluster = true;
+bool IsAgentMode = false;
+bool IsMaster = false;
+bool IsMPIChild = false;
+char MasterMonitorName[MAX_PROCESS_PATH]= {'\0'};
 CommType_t CommType = CommType_Undefined;
 bool SMSIntegrating = false;
 int  CreatorShellPid = -1;
@@ -524,6 +529,7 @@
     const char method_name[] = "CMonitor::ProcCopy";
     TRACE_ENTRY;
 
+    int  stringDataLen = 0;
     struct clone_def *procObj = (struct clone_def *)bufPtr;
 
     procObj->nid = process->GetNid();
@@ -555,36 +561,94 @@
                         , process->GetPid()
                         , process->GetVerifier() );
 
-    char * stringData = &procObj->stringData;
+    char *stringData = &procObj->stringData;
 
-    // Copy the program name
-    procObj->nameLen = strlen(process->GetName()) + 1;
-    memcpy(stringData, process->GetName(),  procObj->nameLen );
-    stringData += procObj->nameLen;
+    if (strlen(process->GetName()))
+    {
+        // Copy the program name
+        procObj->nameLen = strlen(process->GetName()) + 1;
+        memcpy(stringData, process->GetName(),  procObj->nameLen );
+        stringData += procObj->nameLen;
+        stringDataLen = procObj->nameLen;
+    }
+    else
+    {
+        procObj->nameLen = 0;
+    }
 
-    // Copy the port
-    procObj->portLen = strlen(process->GetPort()) + 1;
-    memcpy(stringData, process->GetPort(),  procObj->portLen );
-    stringData += procObj->portLen;
+    if (strlen(process->GetPort()))
+    {
+        // Copy the port
+        procObj->portLen = strlen(process->GetPort()) + 1;
+        memcpy(stringData, process->GetPort(),  procObj->portLen );
+        stringData += procObj->portLen;
+        stringDataLen += procObj->portLen;
+    }
+    else
+    {
+        procObj->portLen = 0;
+    }
 
     if (process->IsPersistent())
     {
-        // Copy the standard in file name
-        procObj->infileLen = strlen(process->infile()) + 1;
-        memcpy(stringData, process->infile(), procObj->infileLen);
-        stringData += procObj->infileLen;
+        if (strlen(process->infile()))
+        {
+            // Copy the standard in file name
+            procObj->infileLen = strlen(process->infile()) + 1;
+            memcpy(stringData, process->infile(), procObj->infileLen);
+            stringData += procObj->infileLen;
+            stringDataLen += procObj->infileLen;
+        }
+        else
+        {
+            procObj->infileLen = 0;
+        }
 
-        // Copy the standard out file name
-        procObj->outfileLen = strlen(process->outfile()) + 1;
-        memcpy(stringData, process->outfile(),  procObj->outfileLen );
-        stringData += procObj->outfileLen;
+        if (strlen(process->outfile()))
+        {
+            // Copy the standard out file name
+            procObj->outfileLen = strlen(process->outfile()) + 1;
+            memcpy(stringData, process->outfile(),  procObj->outfileLen );
+            stringData += procObj->outfileLen;
+            stringDataLen += procObj->outfileLen;
+        }
+        else
+        {
+            procObj->outfileLen = 0;
+        }
 
-        // Copy the program argument strings
         procObj->argvLen =  process->userArgvLen();
-        memcpy(stringData, process->userArgv(), procObj->argvLen);
-        stringData += procObj->argvLen;
+        if (procObj->argvLen)
+        {
+            // Copy the program argument strings
+            memcpy(stringData, process->userArgv(), procObj->argvLen);
+            stringData += procObj->argvLen;
+            stringDataLen += procObj->argvLen;
+        }
 
-        procObj->persistent = true; 
+        procObj->persistent = true;
+
+        if (trace_settings & (TRACE_REQUEST | TRACE_INIT | TRACE_RECOVERY))
+                trace_printf( "%s@%d - Packing process string data:\n"
+                              "        name(%d)       =%s\n"
+                              "        port(%d)       =%s\n"
+                              "        infile(%d)     =%s\n"
+                              "        outfile(%d)    =%s\n"
+                              "        userArgv(%d)   =%s\n"
+                              "        stringData(%d) =%s\n"
+                            , method_name, __LINE__
+                            , procObj->nameLen
+                            , process->GetName()
+                            , procObj->portLen
+                            , process->GetPort()
+                            , procObj->infileLen
+                            , process->infile()
+                            , procObj->outfileLen
+                            , process->outfile()
+                            , procObj->argvLen
+                            , procObj->argvLen?process->userArgv():"" 
+                            , stringDataLen
+                            , stringDataLen?&procObj->stringData:"" );
     }
     else
     {
@@ -658,6 +722,9 @@
 
     CNode * node = NULL;
     CProcess * process = NULL;
+    int  stringDataLen;
+    char *name = NULL;
+    char *port = NULL;
     char *infile = NULL;
     char *outfile = NULL;
     char *userargv = NULL;
@@ -671,45 +738,72 @@
     {
         procObj = (struct clone_def *)buffer;
 
+        stringDataLen = 0;
         stringData = &procObj->stringData;
   
         node = Nodes->GetLNode (procObj->nid)->GetNode();
 
+        if (procObj->nameLen)
+        {
+            name = &procObj->stringData;
+            stringDataLen += procObj->nameLen;
+        }
+          
+        if (procObj->portLen)
+        {
+            port = &stringData[stringDataLen];
+            stringDataLen += procObj->portLen;
+        }
+          
         if (procObj->infileLen)
         {
-            infile = &stringData[procObj->nameLen + procObj->portLen];
-        }
-        else
-        {
-            infile = NULL;
+            infile = &stringData[stringDataLen];
+            stringDataLen += procObj->infileLen;
         }
           
         if (procObj->outfileLen)
         {
-            outfile = &stringData[procObj->nameLen + procObj->portLen + procObj->infileLen];
-        }
-        else
-        {
-            outfile = NULL;
+            outfile = &stringData[stringDataLen];
+            stringDataLen += procObj->outfileLen;
         }
 
         if (procObj->argvLen)
         {
-            userargv = &stringData[procObj->nameLen + procObj->portLen 
-                                    + procObj->infileLen + procObj->outfileLen];
+            userargv = &stringData[stringDataLen];
+            stringDataLen += procObj->argvLen;
         }
-        else
-        {
-            userargv = NULL;
-        }
+
+        if (trace_settings & (TRACE_REQUEST | TRACE_INIT | TRACE_RECOVERY))
+                trace_printf( "%s@%d - Unpacking process string data:\n"
+                              "        stringData(%d) =%s\n"
+                              "        name(%d)       =%s\n"
+                              "        port(%d)       =%s\n"
+                              "        infile(%d)     =%s\n"
+                              "        outfile(%d)    =%s\n"
+                              "        userArgc       =%d\n"
+                              "        userArgv(%d)   =%s\n"
+                            , method_name, __LINE__
+                            , stringDataLen
+                            , stringDataLen?&procObj->stringData:""
+                            , procObj->nameLen
+                            , procObj->nameLen?name:""
+                            , procObj->portLen
+                            , procObj->portLen?port:""
+                            , procObj->infileLen
+                            , procObj->infileLen?infile:""
+                            , procObj->outfileLen
+                            , procObj->outfileLen?outfile:""
+                            , procObj->argc
+                            , procObj->argvLen
+                            , procObj->argvLen?userargv:"" );
 
         process = node->CloneProcess (procObj->nid,
                                       procObj->type,
                                       procObj->priority,
                                       procObj->backup,
                                       procObj->unhooked,
-                                      &stringData[0], // process name
-                                      &stringData[procObj->nameLen],  // port
+                                      procObj->nameLen?name:(char *)"",
+                                      procObj->portLen?port:(char *)"",
                                       procObj->os_pid,
                                       procObj->verifier, 
                                       procObj->parent_nid,
@@ -720,8 +814,8 @@
                                       procObj->pathStrId,
                                       procObj->ldpathStrId,
                                       procObj->programStrId,
-                                      infile, 
-                                      outfile,
+                                      procObj->infileLen?infile:(char *)"",
+                                      procObj->outfileLen?outfile:(char *)"",
                                       &procObj->creation_time);
 
         if ( process && procObj->argvLen )
@@ -734,8 +828,7 @@
             process->SetPersistent(true);
         }
 
-        buffer = &stringData[procObj->nameLen + procObj->portLen + procObj->infileLen 
-                              + procObj->outfileLen + procObj->argvLen];
+        buffer = &stringData[stringDataLen];
     }
 
     TRACE_EXIT;
@@ -777,9 +870,9 @@
     TRACE_EXIT;
 }
 
-void CMonitor::CreateZookeeperClient( void )
+void CreateZookeeperClient( void )
 {
-    const char method_name[] = "CMonitor::CreateZookeeperClient";
+    const char method_name[] = "CreateZookeeperClient";
     TRACE_ENTRY;
 
     if ( ZClientEnabled )
@@ -873,9 +966,9 @@
     TRACE_EXIT;
 }
 
-void CMonitor::StartZookeeperClient( void )
+void StartZookeeperClient( void )
 {
-    const char method_name[] = "CMonitor::StartZookeeperClient";
+    const char method_name[] = "StartZookeeperClient";
     TRACE_ENTRY;
 
     int rc = -1;
@@ -955,19 +1048,71 @@
     char temp_fname[MAX_PROCESS_PATH];
     char buf[MON_STRING_BUF_SIZE];
     unsigned int initSleepTime = 1; // 1 second
+
     mallopt(M_ARENA_MAX, 4); // call to limit the number of arena's of  monitor to 4.This call doesn't seem to have any effect !
  
     CALL_COMP_DOVERS(monitor, argc, argv);
 
     const char method_name[] = "main";
 
+    if (argc < 2) {
+      printf("error: monitor needs an argument...exitting...\n");
+      exit(0);
+    }
+
+    int lv_arg_index = 1;
+    while ( lv_arg_index < argc )
+    {
+        // Installations like Cloudera Manager, the monitor is started in AGENT mode
+        if ( strcmp( argv[lv_arg_index], "COLD_AGENT" ) == 0 )
+        {
+            IsAgentMode = true;
+        }
+
+        lv_arg_index++;
+    }
+
     // Set flag to indicate whether we are operating in a real cluster
     // or a virtual cluster.   This is used throughout the monitor when
     // behavior differs for a real vs. virtual cluster environment.
-    if ( getenv("SQ_VIRTUAL_NODES") )
+    if ( !IsAgentMode )
     {
-        IsRealCluster = false;
-        Emulate_Down = true;
+        if ( getenv( "SQ_VIRTUAL_NODES" ) )
+        {
+            IsRealCluster = false;
+            Emulate_Down = true;
+        }
+        if (IsRealCluster)
+        {
+            // The monitor processes may be started by MPIrun utility
+            env = getenv("SQ_MON_CREATOR");
+            if ( env != NULL && strcmp(env, "MPIRUN") == 0 )
+            {
+                IsMPIChild = true;
+            }
+            // The monitor can be set to run in AGENT mode
+            env = getenv("SQ_MON_RUN_MODE");
+            if ( env != NULL && strcmp(env, "AGENT") == 0 )
+            {
+                IsAgentMode = true;
+            }
+        }
+    }
+
+    if ( IsAgentMode )
+    {
+        MON_Props xprops( true );
+        xprops.load( "monitor.env" );
+        MON_Smap_Enum xenum( &xprops );
+        while ( xenum.more( ) )
+        {
+            char *xkey = xenum.next( );
+            const char *xvalue = xprops.get( xkey );
+            if ( xkey && xkey[0] && xvalue )
+            {
+                setenv( xkey, xvalue, 1 );
+            }
+        }
     }
 
     MonLog = new CMonLog( "log4cxx.monitor.mon.config", "MON", "alt.mon", -1, -1, getpid(), "$MONITOR" );
@@ -1152,7 +1297,7 @@
             abort();
     }
 
-    if (argc > 3 && strcmp (argv[2], "-integrate") == 0)
+    if ((!IsAgentMode) && (argc > 3 && strcmp (argv[2], "-integrate") == 0))
     {
         switch( CommType )
         {
@@ -1169,13 +1314,13 @@
                 }
                 break;
             case CommType_Sockets:
-                if ( isdigit (*argv[3]) )
+                if ( IsAgentMode || isdigit (*argv[3]) )
                 {
                     // In agent mode and when re-integrating (node up), all
                     // monitors processes start as a cluster of 1 and join to the 
                     // creator monitor to establish the real cluster.
-                    // Therefore, MyPNID will always be zero when in and
-                    // it is necessary to use the node name to obtain the correct
+                    // Therefore, MyPNID will always be zero them it is 
+                    // necessary to use the node name to obtain the correct
                     // <pnid> from the configuration which occurs when creating the
                     // CMonitor object down below. By setting MyPNID to -1, when the 
                     // CCluster::InitializeConfigCluster() invoked during the creation
@@ -1218,8 +1363,15 @@
         // Trace cannot be specified on startup command but need to
         // check for trace environment variable settings.
         MonTrace->mon_trace_init("0", NULL);
+
     }
-    else 
+
+    if (IsAgentMode)
+    {    
+        CreatorShellPid = 1000; // per monitor.sh
+        CreatorShellVerifier = 0;
+    }
+
     if (argc == 3 && isdigit(*argv[2]) )
     {
         MonTrace->mon_trace_init(argv[2], "STDOUT");
@@ -1310,8 +1462,12 @@
        MonStats->MonitorBusyIncr();
 
     snprintf(buf, sizeof(buf),
-                 "[CMonitor::main], %s, Started! CommType: %s\n"
-                , CALL_COMP_GETVERS2(monitor), CommTypeString( CommType ));
+                 "[CMonitor::main], %s, Started! CommType: %s (%s%s%s)\n"
+                , CALL_COMP_GETVERS2(monitor)
+                , CommTypeString( CommType )
+                , IsRealCluster?"RealCluster":"VirtualCluster"
+                , IsAgentMode?"/AgentMode":""
+                , IsMPIChild?"/MPIChild":"" );
     mon_log_write(MON_MONITOR_MAIN_3, SQ_LOG_INFO, buf);
        
 #ifdef DMALLOC
@@ -1332,11 +1488,230 @@
         // Create thread for monitoring redirected i/o.
         // This is also used for monitor logs, so start it early. 
         Redirector.start();
+        
+        // Create global configuration now
+        ClusterConfig = new CClusterConfig();
+        if (ClusterConfig)
+        {
+            bool traceEnabled = (trace_settings & TRACE_TRAFCONFIG) ? true : false;
+            if (ClusterConfig->Initialize( traceEnabled, MonTrace->getTraceFileName()))
+            {
+                if (!ClusterConfig->LoadConfig())
+                {
+                     char la_buf[MON_STRING_BUF_SIZE];
+                     sprintf(la_buf, "[%s], Failed to load cluster configuration.\n", method_name);
+                     mon_log_write(MON_MONITOR_MAIN_12, SQ_LOG_CRIT, la_buf);
+                
+                     abort();
+                }
+            }
+            else
+            {
+                char la_buf[MON_STRING_BUF_SIZE];
+                sprintf(la_buf, "[%s], Failed to open cluster configuration.\n", method_name);
+                mon_log_write(MON_MONITOR_MAIN_13, SQ_LOG_CRIT, la_buf);
+            
+                abort();
+            }
+        }
+       else  
+       {
+           char la_buf[MON_STRING_BUF_SIZE];
+           sprintf(la_buf, "[%s], Failed to allocate cluster configuration.\n", method_name);
+           mon_log_write(MON_MONITOR_MAIN_14, SQ_LOG_CRIT, la_buf);
+          
+           abort();
+        }
 
-        // CNodeContainer loads static configuration from database
-        Nodes = new CNodeContainer ();
+        // Set up zookeeper and determine the master 
+         if ( IsAgentMode || IsRealCluster )
+        {
+            // Zookeeper client is enabled only in a real cluster
+            env = getenv("SQ_MON_ZCLIENT_ENABLED");
+
+            if ( env )
+            {
+                if ( env && isdigit(*env) )
+                {
+                    if ( strcmp(env,"0")==0 )
+                    {
+                        ZClientEnabled = false;
+                    }
+                }
+            }
+
+            if ( ZClientEnabled )
+            {
+                 CreateZookeeperClient( );
+            }
+        }
+        else
+        {
+            ZClientEnabled = false;
+        }
+        
+        if (IsAgentMode)
+        {
+            if ((ZClientEnabled) && (ZClient != NULL))
+            {
+                // Do not wait, just see if one exists
+                const char *masterMonitor = ZClient->WaitForAndReturnMaster(false);
+
+                if (masterMonitor)
+                {
+                    strcpy (MasterMonitorName, masterMonitor);
+                    // unfortunately, we have to do this to see if we are the master before
+                    // other things are set up.   This is how we must do that
+                    if (strcmp(Node_name, masterMonitor) == 0)
+                    {
+                        IsMaster = true;
+                    }
+                    else 
+                    {
+                        IsMaster = false;
+                    }
+                }
+                else
+                {
+                    strcpy (MasterMonitorName, ClusterConfig->GetConfigMasterByName());  
+                    if (strcmp (Node_name,  ClusterConfig->GetConfigMasterByName()) == 0)
+                    {
+                        IsMaster = true;
+                    }
+                    else
+                    {
+                        IsMaster = false;
+                    }
+                }
+      
+             }
+         }
+
+         if (IsAgentMode)
+         {
+            if (!IsMaster)
+            {
+                MyPNID=-1;
+                SMSIntegrating = IAmIntegrating = true;
+                char *monitorPort = getenv ("MONITOR_COMM_PORT");
+                if (monitorPort)
+                {
+                    strcpy( IntegratingMonitorPort, MasterMonitorName);
+                    strcat( IntegratingMonitorPort, ":");
+                    strcat( IntegratingMonitorPort, monitorPort);
+                }
+                if (trace_settings & TRACE_INIT)
+                {
+                    trace_printf( "%s@%d (MasterMonitor) IsAgentMode = TRUE, I am NOT the master, "
+                                  "MyPNID=%d, master port=%s\n"
+                                , method_name, __LINE__
+                                , MyPNID, IntegratingMonitorPort );
+                }
+            }
+            else
+            {
+                if (trace_settings & TRACE_INIT)
+                {
+                    trace_printf( "%s@%d (MasterMonitor) IsAgentMode = TRUE, I am the master, MyPNID=%d\n"
+                                , method_name, __LINE__, MyPNID );
+                }
+                IAmIntegrating = false; 
+            }
+        }
+        Nodes = new CNodeContainer (); 
         Config = new CConfigContainer ();
-        Monitor = new CMonitor (procTermSig);
+        Monitor = new CMonitor (procTermSig);  
+
+        if ( IsAgentMode )
+        {
+            if (trace_settings & TRACE_INIT)
+            {
+                trace_printf( "%s@%d MyPNID=%d\n"
+                            , method_name, __LINE__, MyPNID );
+            }
+            MonLog->setPNid( MyPNID );
+        }
+        
+        if (IsAgentMode)
+        {
+            CNode *myNode = Nodes->GetNode(MyPNID);
+            const char *masterMonitor=NULL;
+            if (myNode == NULL)
+            {
+                char la_buf[MON_STRING_BUF_SIZE];
+                sprintf( la_buf
+                       , "[%s], Failed to get my Node, MyPNID=%d\n"
+                       , method_name, MyPNID );
+                mon_log_write(MON_MONITOR_MAIN_15, SQ_LOG_CRIT, la_buf);
+                
+                abort();
+            }
+            
+            if ((ZClientEnabled) && (ZClient != NULL))
+            {
+                CNode *masterNode = Nodes->GetNode(MasterMonitorName);    
+                if (!masterNode)
+                {
+                    if (trace_settings & TRACE_INIT)
+                    {
+                          trace_printf("%s@%d (MasterMonitor) IsMaster == %d, masterNode is NULL, with MasterMonitorName %s\n", method_name, __LINE__, IsMaster, MasterMonitorName);
+                    }
+                    char la_buf[MON_STRING_BUF_SIZE];
+                    sprintf(la_buf, "[%s], Failed to get my Master Node.\n", method_name);
+                    mon_log_write(MON_MONITOR_MAIN_16, SQ_LOG_CRIT, la_buf);
+                
+                    abort();
+                }
+                else
+                {
+                    if (trace_settings & TRACE_INIT)
+                    {
+                          trace_printf("%s@%d (MasterMonitor) IsMaster == %d, masterNode=%s\n", method_name, __LINE__, IsMaster, masterNode->GetName() );
+                    }
+                }
+                Monitor->SetMonitorLeader( masterNode->GetPNid() );
+                if (MyPNID == masterNode->GetPNid())
+                {
+                     ZClient->CreateMasterZNode ( myNode->GetName() );
+                     strcpy (MasterMonitorName, myNode->GetName());
+                     if (trace_settings & TRACE_INIT)
+                     {
+                         trace_printf("%s@%d (MasterMonitor) IsMaster == %d, set monitor lead to %d\n", method_name, __LINE__, IsMaster, MyPNID);
+                     }           
+                 }
+                 else
+                 {
+                     masterMonitor = ZClient->WaitForAndReturnMaster(true);
+                     CNode *masterNode = NULL;
+                     if (masterMonitor)
+                     {
+                         strcpy (MasterMonitorName, masterMonitor);
+                         masterNode = Nodes->GetNode(MasterMonitorName); 
+                     }
+                
+                     if (masterNode)
+                     {
+                          if (trace_settings & TRACE_INIT)
+                          {
+                              trace_printf("%s@%d (MasterMonitor) IsMaster == %d, set monitor lead to %d\n", method_name, __LINE__, IsMaster, masterNode->GetPNid());
+                          } 
+                          Monitor->SetMonitorLeader( masterNode->GetPNid() );
+                     }
+                     else
+                     {
+                          if (trace_settings & TRACE_INIT)
+                          {
+                              trace_printf("%s@%d (MasterMonitor) IsMaster == %d, masterNode is NULL, with MasterMonitorName %s\n", method_name, __LINE__, IsMaster, MasterMonitorName);
+                          }
+                          char la_buf[MON_STRING_BUF_SIZE];
+                          sprintf(la_buf, "[%s], Failed to get my Master Node.\n", method_name);
+                          mon_log_write(MON_MONITOR_MAIN_17, SQ_LOG_CRIT, la_buf);
+                 
+                          abort();
+                     }
+                }
+            }
+        }
         if (!IAmIntegrating)
         {
             Config->Init ();
@@ -1405,7 +1780,6 @@
         {
             strcpy (Node_name, myNode->GetName()); 
         }
-        
         // create with no caching, user read/write, group read/write, other read
         fd = open( port_fname
                    , O_RDWR | O_TRUNC | O_CREAT | O_DIRECT 
@@ -1451,7 +1825,6 @@
                 MPI_Abort(MPI_COMM_SELF,99);
         }
         free( ioBuffer );
-
         int ret = SQ_theLocalIOToClient->initWorker();
         if (ret)
         {
@@ -1478,33 +1851,7 @@
                 printf("%s@%d" " RLIMIT_SIGPENDING cur=%d, max=%d\n", method_name, __LINE__, (int)Rl.rlim_cur, (int)Rl.rlim_max);
             }
         }
-
-        if ( IsRealCluster )
-        {
-            // Zookeeper client is enabled only in a real cluster
-            env = getenv("SQ_MON_ZCLIENT_ENABLED");
-            if ( env )
-            {
-                if ( env && isdigit(*env) )
-                {
-                    if ( strcmp(env,"0")==0 )
-                    {
-                        ZClientEnabled = false;
-                    }
-                }
-            }
-
-            if ( ZClientEnabled )
-            {
-                Monitor->CreateZookeeperClient();
-            }
-        }
-        else
-        {
-            ZClientEnabled = false;
-        }
-    
-        if ( IAmIntegrating )
+      if ( IAmIntegrating )
         {
             // This monitor is integrating to (joining) an existing cluster
             Monitor->ReIntegrate( 0 );
@@ -1514,7 +1861,7 @@
                 trace_printf("%s@%d" " After UpdateCluster" "\n", method_name, __LINE__);
         }
         else
-        {
+        {  
             Monitor->EnterSyncCycle();
             done = Monitor->exchangeNodeData();
             Monitor->ExitSyncCycle();
@@ -1530,7 +1877,18 @@
     {
         if ( ZClientEnabled )
         {
-            Monitor->StartZookeeperClient();
+            {
+                StartZookeeperClient();
+                // Set watch for master
+                if (IsAgentMode)
+                {
+                    ZClient->WatchMasterNode( MasterMonitorName );
+                }
+                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                {
+                    trace_printf( "%s@%d (MasterMonitor) set watch for MasterMonitorName %s\n", method_name, __LINE__, MasterMonitorName );
+                }
+            }
         }
     }
 
diff --git a/core/sqf/monitor/linux/monitor.h b/core/sqf/monitor/linux/monitor.h
index 1b44c57..49308b9 100644
--- a/core/sqf/monitor/linux/monitor.h
+++ b/core/sqf/monitor/linux/monitor.h
@@ -63,7 +63,6 @@
     ~CMonitor( void );
 
     bool  CompleteProcessStartup( struct message_def *msg );
-    void  CreateZookeeperClient( void );
     void  IncOpenCount(void);
     void  IncNoticeCount(void);
     void  IncProcessCount(void);
@@ -71,7 +70,6 @@
     void  DecrNoticeCount(void);
     void  DecrProcessCount(void);
     void  StartPrimitiveProcesses( void );  
-    void  StartZookeeperClient( void );
     void  openProcessMap ( void );
     void  writeProcessMapEntry ( const char * buf );
     void  writeProcessMapBegin( const char *name
diff --git a/core/sqf/monitor/linux/msgdef.h b/core/sqf/monitor/linux/msgdef.h
index 3f5c8c8..3532ac8 100644
--- a/core/sqf/monitor/linux/msgdef.h
+++ b/core/sqf/monitor/linux/msgdef.h
@@ -61,9 +61,11 @@
                                // NOTE: Increase with caution as this number
                                //  is also used to gather local CPU statistics
                                //  and a large number may degrade performance
-#define MAX_NODES        256   // This can be higher when needed and will
+#define MAX_NODES TC_NODES_MAX // This can be higher when needed and will
                                // have performance implications
-                               // Increment by 64 to match node state bitmask
+                               // NOTE: Must increment by 64 to match node state
+                               //       bitmask. See trafconfig.h TC_NODES_MAX in
+                               //       Trafodion Configuration API
 #define MAX_LNODES_PER_NODE 1  // The 1 is a per physical node limit 
                                // (it can be more, but it is not currently used)
 #define MAX_LNODES       (MAX_NODES*MAX_LNODES_PER_NODE)  
@@ -211,20 +213,9 @@
     RoleType_Aggregation = 0x0002,          // Maps to ZoneType_Aggregation, Backend or Any
     RoleType_Storage     = 0x0004           // Maps to ZoneType_Storage, Backend or Any
 } RoleType;
-#if 0
-typedef enum {
-    ZoneType_Undefined   = 0x0000,          // No zone type defined
-    ZoneType_Edge        = 0x0001,          // Zone of service only nodes
-    ZoneType_Aggregation = 0x0002,          // Zone of compute only nodes
-    ZoneType_Storage     = 0x0004,          // Zone of storage only nodes
-    ZoneType_Excluded    = 0x0010,          // Excluded cores
-    ZoneType_Any         = ( ZoneType_Edge | ZoneType_Aggregation | ZoneType_Storage ),
-    ZoneType_Frontend    = ( ZoneType_Edge | ZoneType_Aggregation ),
-    ZoneType_Backend     = ( ZoneType_Aggregation | ZoneType_Storage )
-} ZoneType;
-#else
+
 typedef TcZoneType_t ZoneType;
-#endif
+
 // Service Request types
 // note: other data structures depend on the ordering of the REQTYPE elements.
 //       if the ordering changes corresponding changes must be made to 
@@ -330,32 +321,9 @@
                                             // types, add any new message types 
                                             // before this one
 } MSGTYPE;
-#if 0
-typedef enum {
-    ProcessType_Undefined=0,                // No process type as been defined
-    ProcessType_TSE,                        // Identifies a Table Storage Engine (DP2)
-    ProcessType_DTM,                        // Identifies a Distributed Transaction Monitor process
-    ProcessType_ASE,                        // Identifies a Audit Storage Engine (ADP)
-    ProcessType_Generic,                    // Identifies a generic process
-    ProcessType_Watchdog,                   // Identifies the monitor's watchdog processes
-    ProcessType_AMP,                        // Identifies a AMP process
-    ProcessType_Backout,                    // Identifies a Backout process
-    ProcessType_VolumeRecovery,             // Identifies a Volume Recovery process
-    ProcessType_MXOSRVR,                    // Identifies a MXOSRVR process
-    ProcessType_SPX,                        // Identifies a SeaPilot ProXy process
-    ProcessType_SSMP,                       // Identifies a SQL Statistics Merge Process (SSMP)
-    ProcessType_PSD,                        // Identifies the monitor's process start daemon processes
-    ProcessType_SMS,                        // Identifies a SeaMonster Service process
-    ProcessType_TMID,                       // Identifies a Transaction Management ID process
-    ProcessType_PERSIST,                    // Identifies a generic persistent process
 
-    ProcessType_Invalid                     // marks the end of the process
-                                            // types, add any new process
-                                            // types before this one
-} PROCESSTYPE;
-#else
 typedef TcProcessType_t PROCESSTYPE;
-#endif
+
 typedef enum {
     ShutdownLevel_Undefined=-1,
     ShutdownLevel_Normal=0,                 // Wait for all transactions and processes to end
diff --git a/core/sqf/monitor/linux/pnode.cxx b/core/sqf/monitor/linux/pnode.cxx
index 8aa0671..783640f 100644
--- a/core/sqf/monitor/linux/pnode.cxx
+++ b/core/sqf/monitor/linux/pnode.cxx
@@ -49,6 +49,8 @@
 
 #include "replicate.h"
 #include "reqqueue.h"
+#include "healthcheck.h"
+
 extern CReqQueue ReqQueue;
 extern char MyPath[MAX_PROCESS_PATH];
 extern int MyPNID;
@@ -64,9 +66,13 @@
 extern CMonStats *MonStats;
 extern CRedirector Redirector;
 extern CReplicate Replicator;
+extern CHealthCheck HealthCheck;
 extern CMonTrace *MonTrace;
-
+extern bool IsAgentMode;
 extern bool IAmIntegrating;
+extern char MasterMonitorName[MAX_PROCESS_PATH];
+extern char Node_name[MPI_MAX_PROCESSOR_NAME];
+extern CClusterConfig *ClusterConfig;
 
 const char *StateString( STATE state);
 const char *SyncStateString( SyncState state);
@@ -464,13 +470,14 @@
     int         tmCount = 0;
     CLNode     *lnode;
     CProcess   *process;
-    bool        tmReady;
+    bool        tmReady = false;
 
     const char method_name[] = "CNode::CheckActivationPhase";
     TRACE_ENTRY;
 
     // check for a TM process in each lnode
     lnode = GetFirstLNode();
+
     tmReady = lnode ? true : false;
     for ( ; lnode ; lnode = lnode->GetNextP() )
     {
@@ -1701,8 +1708,10 @@
         }
         else
         {
-           if (pnid >= maxNode) // only for workstation acting as single node
-              rank = -1;
+            if (pnid >= maxNode) // only for workstation acting as single node
+            {
+                rank = -1; // -1 creates node in down state
+            }
             node = new CNode( (char *)pnodeConfig->GetName(), pnid, rank );
             assert( node != NULL );
         }
@@ -1977,6 +1986,13 @@
 
     int pnid, pnidConfig;
 
+    // lock sync thread since we are making a change the monitor's
+    // operational view of the cluster
+    if ( !Emulate_Down )
+    {
+        Monitor->EnterSyncCycle();
+    }
+
     for (int count = 0; count < nodeMapCount; count++)
     {
         pnidConfig = *buffer++;
@@ -1991,6 +2007,12 @@
 
     UpdateCluster();
 
+    // unlock sync thread
+    if ( !Emulate_Down )
+    {
+        Monitor->ExitSyncCycle();
+    }
+
     TRACE_EXIT;
     return;
 }
@@ -3121,7 +3143,7 @@
             if ( node->GetState() == State_Up && node->IsSpareNode() )
             {
                 spareNodesConfigList_.push_back( node );
-                if ( IAmIntegrating )
+                if (IAmIntegrating)
                 {
                     // do nothing. spareNodesList will get populated in the join phase.
                 }
@@ -3153,40 +3175,11 @@
     const char method_name[] = "CNodeContainer::LoadConfig";
     TRACE_ENTRY;
 
+    // The configuration is now global.  To minimize impact for the time being, just set the local
+    // pointer to the global configuration
     if ( !clusterConfig_ )
     {
-        clusterConfig_ = new CClusterConfig();
-    }
-    if ( clusterConfig_ )
-    {
-        bool traceEnabled = (trace_settings & TRACE_TRAFCONFIG) ? true : false;
-        if ( clusterConfig_->Initialize( traceEnabled, MonTrace->getTraceFileName() ) )
-        {
-            if ( ! clusterConfig_->LoadConfig() )
-            {
-                char la_buf[MON_STRING_BUF_SIZE];
-                sprintf(la_buf, "[%s], Failed to load cluster configuration.\n", method_name);
-                mon_log_write(MON_NODECONT_LOAD_CONFIG_1, SQ_LOG_CRIT, la_buf);
-                
-                abort();
-            }
-        }
-        else
-        {
-            char la_buf[MON_STRING_BUF_SIZE];
-            sprintf(la_buf, "[%s], Failed to open cluster configuration.\n", method_name);
-            mon_log_write(MON_NODECONT_LOAD_CONFIG_2, SQ_LOG_CRIT, la_buf);
-            
-            abort();
-        }
-    }
-    else
-    {
-        char la_buf[MON_STRING_BUF_SIZE];
-        sprintf(la_buf, "[%s], Failed to allocate cluster configuration.\n", method_name);
-        mon_log_write(MON_NODECONT_LOAD_CONFIG_3, SQ_LOG_CRIT, la_buf);
-        
-        abort();
+        clusterConfig_ = ClusterConfig;
     }
 
     TRACE_EXIT;
diff --git a/core/sqf/monitor/linux/process.cxx b/core/sqf/monitor/linux/process.cxx
index 6a8e08b..bce018b 100644
--- a/core/sqf/monitor/linux/process.cxx
+++ b/core/sqf/monitor/linux/process.cxx
@@ -72,6 +72,9 @@
 
 #include "replicate.h"
 
+extern bool IsAgentMode;
+extern bool IsMaster;
+
 extern bool PidMap;
 extern int Measure;
 extern int trace_level;
@@ -1651,13 +1654,39 @@
     }
 
     string LDpath;
-    if ( ldpathStrId_.nid != -1 )
-        Config->strIdToString(ldpathStrId_, LDpath);
-    if ( !LDpath.empty() )
+    static bool sv_getenv_ld_library_path_done = false;
+    static string sv_ld_library_path;
+    if (IsAgentMode)
     {
-        setEnvStrVal ( childEnv, nextEnv, "LD_LIBRARY_PATH", LDpath.c_str() );
+        if (! sv_getenv_ld_library_path_done)
+        {
+            sv_getenv_ld_library_path_done = true;
+            sv_ld_library_path = getenv( "LD_LIBRARY_PATH" );
+            if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
+            {
+                trace_printf( "%s@%d" " - LD_LIBRARY_PATH = " "%s" "\n", method_name, __LINE__, sv_ld_library_path.c_str() );
+            }
+        }
+        LDpath = sv_ld_library_path;
         if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
-            trace_printf("%s@%d - LD_LIBRARY_PATH = %s\n", method_name, __LINE__, LDpath.c_str());
+        {
+            trace_printf( "%s@%d" " - LD_LIBRARY_PATH = " "%s" "\n", method_name, __LINE__, LDpath.c_str() );
+        }
+    }
+    else
+    {
+        if (ldpathStrId_.nid != -1)
+        {
+            Config->strIdToString( ldpathStrId_, LDpath );
+        }
+    }
+    if (!LDpath.empty())
+    {
+        setEnvStrVal( childEnv, nextEnv, "LD_LIBRARY_PATH", LDpath.c_str( ) );
+        if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
+        {
+            trace_printf( "%s@%d - LD_LIBRARY_PATH = %s\n", method_name, __LINE__, LDpath.c_str() );
+        }
     }
 
     setEnvStr ( childEnv, nextEnv, "LD_BIND_NOW=true" );
@@ -1695,15 +1724,39 @@
             trace_printf("%s@%d - PWD=%s\n", method_name, __LINE__,
                          pwd.c_str());
     }
-    
-
 
     string path;
-    if ( pathStrId_.nid != -1 )
-        Config->strIdToString( pathStrId_, path);
-    setEnvStrVal ( childEnv, nextEnv, "PATH", path.c_str() );
+    static bool sv_getenv_path_done = false;
+    static string sv_path;
+    if (IsAgentMode)
+    {
+        if (! sv_getenv_path_done)
+        {
+            sv_getenv_path_done = true;
+            sv_path = getenv( "PATH" );
+            if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
+            {
+                trace_printf( "%s@%d" " - PATH = " "%s" "\n", method_name, __LINE__, sv_path.c_str() );
+            }
+        }
+        path = sv_path;
+        if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
+        {
+            trace_printf( "%s@%d" " - PATH = " "%s" "\n", method_name, __LINE__, path.c_str() );
+        }
+    }
+    else
+    {
+        if (pathStrId_.nid != -1)
+        {
+            Config->strIdToString( pathStrId_, path );
+        }
+    }
+    setEnvStrVal( childEnv, nextEnv, "PATH", path.c_str( ) );
     if (trace_settings & (TRACE_SYNC_DETAIL | TRACE_REQUEST_DETAIL | TRACE_PROCESS_DETAIL))
-        trace_printf("%s@%d" " - PATH = " "%s" "\n", method_name, __LINE__, path.c_str());
+    {
+        trace_printf( "%s@%d" " - PATH = " "%s" "\n", method_name, __LINE__, path.c_str() );
+    }
 
     // Set values from registry as environment variables
     setEnvFromRegistry ( childEnv, nextEnv );
diff --git a/core/sqf/monitor/linux/reqprocinfo.cxx b/core/sqf/monitor/linux/reqprocinfo.cxx
index f7eeefe..4148f36 100644
--- a/core/sqf/monitor/linux/reqprocinfo.cxx
+++ b/core/sqf/monitor/linux/reqprocinfo.cxx
@@ -287,11 +287,14 @@
     verifier_ = msg_->u.request.u.process_info.verifier;
     processName_ = msg_->u.request.u.process_info.process_name;
 
+    int       pnid = -1;
     int       target_nid = -1;
     int       target_pid = -1;
     string    target_process_name;
     Verifier_t target_verifier = -1;
-    CProcess *requester = NULL;
+    CClusterConfig *clusterConfig = NULL;
+    CLNodeConfig   *lnodeConfig = NULL; 
+    CProcess       *requester = NULL;
 
     target_nid = msg_->u.request.u.process_info.target_nid;
     target_pid = msg_->u.request.u.process_info.target_pid;
@@ -390,42 +393,54 @@
                             , msg_->u.request.u.process_info.type);
             }
 
-            if (target_pid == -1)
+            clusterConfig = Nodes->GetClusterConfig();
+            if (clusterConfig)
             {
-                // get info for all processes in node
-                if (target_nid >= 0 && target_nid < Nodes->GetLNodesConfigMax())
+                if (clusterConfig->IsConfigReady())
                 {
-                    count = ProcessInfo_BuildReply(Nodes->GetNode(target_nid)->GetFirstProcess(), 
-                                                   msg_,
-                                                   msg_->u.request.u.process_info.type,
-                                                   false,
-                                                   msg_->u.request.u.process_info.target_process_pattern);
-                }
-            }
-            else
-            {
-                // get info for single process in node
-                if ((requester->GetType() == ProcessType_TSE ||
-                     requester->GetType() == ProcessType_ASE ||
-                     requester->GetType() == ProcessType_AMP)  &&
-                    (requester->GetNid() == target_nid &&
-                     requester->GetPid() == target_pid))
-                {
-                    ProcessInfo_CopyData(requester,
-                                         msg_->u.reply.u.process_info.process[0]);
-                    count = 1;
-                }
-                else if (target_nid >= 0 && target_nid < Nodes->GetLNodesConfigMax())
-                { // find by nid/pid (check node state, don't check process state, backup is Ok)
-                    CProcess *process = Nodes->GetProcess( target_nid
-                                                         , target_pid
-                                                         , target_verifier
-                                                         , true, false, true );
-                    if (process)
+                    lnodeConfig = clusterConfig->GetLNodeConfig( target_nid );
+                    if (lnodeConfig)
                     {
-                        ProcessInfo_CopyData(process,
-                                             msg_->u.reply.u.process_info.process[0]);
-                        count = 1;
+                        
+                        if (target_pid == -1)
+                        {
+                            // get info for all processes in node
+                            if (target_nid >= 0 && target_nid < Nodes->GetLNodesConfigMax())
+                            {
+                                count = ProcessInfo_BuildReply(Nodes->GetNode(target_nid)->GetFirstProcess(), 
+                                                               msg_,
+                                                               msg_->u.request.u.process_info.type,
+                                                               false,
+                                                               msg_->u.request.u.process_info.target_process_pattern);
+                            }
+                        }
+                        else
+                        {
+                            // get info for single process in node
+                            if ((requester->GetType() == ProcessType_TSE ||
+                                 requester->GetType() == ProcessType_ASE ||
+                                 requester->GetType() == ProcessType_AMP)  &&
+                                (requester->GetNid() == target_nid &&
+                                 requester->GetPid() == target_pid))
+                            {
+                                ProcessInfo_CopyData(requester,
+                                                     msg_->u.reply.u.process_info.process[0]);
+                                count = 1;
+                            }
+                            else if (target_nid >= 0 && target_nid < Nodes->GetLNodesConfigMax())
+                            { // find by nid/pid (check node state, don't check process state, backup is Ok)
+                                CProcess *process = Nodes->GetProcess( target_nid
+                                                                     , target_pid
+                                                                     , target_verifier
+                                                                     , true, false, true );
+                                if (process)
+                                {
+                                    ProcessInfo_CopyData(process,
+                                                         msg_->u.reply.u.process_info.process[0]);
+                                    count = 1;
+                                }
+                            }
+                        }
                     }
                 }
             }
diff --git a/core/sqf/monitor/linux/reqqueue.cxx b/core/sqf/monitor/linux/reqqueue.cxx
index becb0cd..b4d2529 100644
--- a/core/sqf/monitor/linux/reqqueue.cxx
+++ b/core/sqf/monitor/linux/reqqueue.cxx
@@ -56,6 +56,7 @@
 
 extern bool IAmIntegrating;
 extern bool IAmIntegrated;
+
 extern CommType_t CommType;
 
 CReqResource::CReqResource()
@@ -2355,7 +2356,6 @@
     if (trace_settings & (TRACE_REQUEST | TRACE_INIT | TRACE_RECOVERY))
         trace_printf("%s@%d - Spare Nodes List unpacked\n", method_name, __LINE__);
 
-    //Nodes->UnpackNodeMappings( (intBuffPtr_t&)buffer, header.nodeMapCount_ );
     Nodes->UnpackNodeMappings( (intBuffPtr_t&)buffer, header.nodeMapCount_ );
 
     if (trace_settings & (TRACE_REQUEST | TRACE_INIT | TRACE_RECOVERY))
@@ -2469,8 +2469,8 @@
     }
 
     // estimate size of snapshot buffer
-    // about 100 bytes per process, 2 times total
-    int procSize = Nodes->ProcessCount() * 2 * 100;
+    // about 500 bytes per process, 2 times total
+    int procSize = Nodes->ProcessCount() * 2 * 500;
     int idsSize = Nodes->GetSNodesCount() * sizeof(int); // spare pnids
     idsSize += (Nodes->GetPNodesCount() + Nodes->GetLNodesCount()) * sizeof(int); // pnid/nid map
     idsSize += Nodes->GetLNodesCount() * sizeof(int);    // nids
@@ -2481,7 +2481,7 @@
 
     mem_log_write(MON_REQQUEUE_SNAPSHOT_4, procSize, idsSize);
 
-    snapshotBuf = (char *) malloc (procSize + idsSize); 
+    snapshotBuf = (char *) malloc (procSize + idsSize);
 
     if (!snapshotBuf) 
     {
@@ -2497,6 +2497,7 @@
 
     clock_gettime(CLOCK_REALTIME, &startTime);
 
+    memset( snapshotBuf, 0, (procSize + idsSize) );
     char *buf = snapshotBuf;
 
     CCluster::snapShotHeader_t header;
@@ -2550,6 +2551,7 @@
         return;
     }
         
+    memset( compBuf, 0, compSize );
     z_result = compress((Bytef *)compBuf, (unsigned long *)&compSize, 
                         (Bytef *)snapshotBuf, header.fullSize_);
  
diff --git a/core/sqf/monitor/linux/shell.cxx b/core/sqf/monitor/linux/shell.cxx
index 5ad73ab..7d7db50 100644
--- a/core/sqf/monitor/linux/shell.cxx
+++ b/core/sqf/monitor/linux/shell.cxx
@@ -1102,15 +1102,16 @@
     switch (recv_msg->type )
     {
     case MsgType_Change:
-        printf ("[%s] Configuration Change Notice for Group: %s Key: %s\n", 
-                MyName, 
+        printf ("[%s] %s - Configuration Change Notice for Group: %s Key: %s Value: %s\n", 
+                MyName, time_string(),
                 recv_msg->u.request.u.change.group,
-                recv_msg->u.request.u.change.key);
+                recv_msg->u.request.u.change.key,
+                recv_msg->u.request.u.change.value);
         break;
 
     case MsgType_Event:
-        printf("[%s] Event %d received\n",
-               MyName, recv_msg->u.request.u.event_notice.event_id);
+        printf("[%s] %s - Event %d received\n",
+               MyName, time_string(), recv_msg->u.request.u.event_notice.event_id);
         break;
 
     case MsgType_NodeAdded:
@@ -1177,8 +1178,8 @@
         break;
 
     case MsgType_NodeDown:
-        printf ("[%s] Node %d (%s) is DOWN\n", 
-                MyName, recv_msg->u.request.u.down.nid,
+        printf ("[%s] %s - Node %d (%s) is DOWN\n", 
+                MyName, time_string(), recv_msg->u.request.u.down.nid,
                 recv_msg->u.request.u.down.node_name );
         NodeState[recv_msg->u.request.u.down.nid] = false;
 
@@ -1211,15 +1212,15 @@
 
 
     case MsgType_NodePrepare:
-        printf("[%s] Node %s (%d) node-up preparation, takeover=%s\n",
-               MyName, recv_msg->u.request.u.prepare.node_name,
+        printf("[%s] %s - Node %s (%d) node-up preparation, takeover=%s\n",
+               MyName, time_string(), recv_msg->u.request.u.prepare.node_name,
                recv_msg->u.request.u.prepare.nid,
                ((recv_msg->u.request.u.prepare.takeover)? "true": "false"));
         break;
 
     case MsgType_NodeQuiesce:
-        printf ("[%s] Node %d (%s) is QUIESCEd\n", 
-                MyName, msg->u.request.u.quiesce.nid,
+        printf ("[%s] %s - Node %d (%s) is QUIESCEd\n", 
+                MyName, time_string(), msg->u.request.u.quiesce.nid,
                 msg->u.request.u.quiesce.node_name );
         NodeState[msg->u.request.u.quiesce.nid] = false;
         if ( waitDeathPending )
@@ -1248,15 +1249,15 @@
     case MsgType_ProcessCreated:
         if ( recv_msg->u.request.u.process_created.return_code == MPI_SUCCESS )
         {
-            printf ("[%s] Process %s successfully created. Nid=%d, Pid=%d\n",
-                    MyName, recv_msg->u.request.u.process_created.process_name,
+            printf ("[%s] %s - Process %s successfully created. Nid=%d, Pid=%d\n",
+                    MyName, time_string(), recv_msg->u.request.u.process_created.process_name,
                     recv_msg->u.request.u.process_created.nid,
                     recv_msg->u.request.u.process_created.pid);
         }
         else
         {
-            printf ("[%s] Process %s NOT created. Nid=%d, Pid=%d\n",
-                    MyName, recv_msg->u.request.u.process_created.process_name,
+            printf ("[%s] %s - Process %s NOT created. Nid=%d, Pid=%d\n",
+                    MyName, time_string(), recv_msg->u.request.u.process_created.process_name,
                     recv_msg->u.request.u.process_created.nid,
                     recv_msg->u.request.u.process_created.pid);
         }
@@ -1265,15 +1266,15 @@
     case MsgType_ProcessDeath:
         if ( recv_msg->u.request.u.death.aborted )
         {
-            printf ("[%s] Process %s abnormally terminated. Nid=%d, Pid=%d\n",
-                    MyName, recv_msg->u.request.u.death.process_name, 
+            printf ("[%s] %s - Process %s abnormally terminated. Nid=%d, Pid=%d\n",
+                    MyName, time_string(), recv_msg->u.request.u.death.process_name, 
                     recv_msg->u.request.u.death.nid,
                     recv_msg->u.request.u.death.pid);
         }
         else
         {
-            printf ("[%s] Process %s terminated normally. Nid=%d, Pid=%d\n", 
-                    MyName, recv_msg->u.request.u.death.process_name, 
+            printf ("[%s] %s - Process %s terminated normally. Nid=%d, Pid=%d\n", 
+                    MyName, time_string(), recv_msg->u.request.u.death.process_name, 
                     recv_msg->u.request.u.death.nid,
                     recv_msg->u.request.u.death.pid);
         }
@@ -1298,18 +1299,18 @@
         break;
 
     case MsgType_Shutdown:
-        printf("[%s] Shutdown notice, level=%d received\n",
-               MyName, recv_msg->u.request.u.shutdown.level);
+        printf("[%s] %s - Shutdown notice, level=%d received\n",
+               MyName, time_string(), recv_msg->u.request.u.shutdown.level);
         nodePendingComplete();
         break;
 
     case MsgType_TmSyncAbort:
-        printf("[%s] TmSync abort notice received\n",
-               MyName);
+        printf("[%s] %s - TmSync abort notice received\n",
+               MyName, time_string());
         break;
     case MsgType_TmSyncCommit:
-        printf("[%s] TmSync commit notice received\n",
-               MyName);
+        printf("[%s] %s - TmSync commit notice received\n",
+               MyName, time_string());
         break;
 
     case MsgType_ReintegrationError:
@@ -1321,8 +1322,8 @@
         break;
 
     default:
-        printf("[%s] Unexpected notice type(%d) received\n",
-               MyName, recv_msg->type);
+        printf("[%s] %s - Unexpected notice type(%d) received\n",
+               MyName, time_string(), recv_msg->type);
 
     }
 
@@ -3868,7 +3869,7 @@
     // If this is a real cluster
     if ( nid == -1 )
     {
-        // Get current physical state of target nodes
+        // Get current physical state of all nodes
         if ( !update_node_state( node_name, false ) )
         {
             return( rc ) ;
@@ -4133,7 +4134,8 @@
     }
     if (!foundConfig)
     {
-        printf ("[%s] Persistent process configuration does not exist\n", MyName);
+        printf("[%s] %s - Persistent process configuration does not exist\n"
+              , MyName, time_string() );
     }
 }
 
@@ -4213,7 +4215,8 @@
     }
     if (!foundConfig)
     {
-        printf ("[%s] Persistent process configuration does not exist\n", MyName);
+        printf("[%s] %s - Persistent process configuration does not exist\n"
+              , MyName, time_string() );
     }
 }
 
@@ -4268,7 +4271,9 @@
                                           , persistZones );
             if ( !find_process( processName ) )
             {
-                printf( "Persistent process %s does not exist\n", processName);
+                printf( "[%s] %s - Persistent process %s does not exist\n"
+                      , MyName, time_string()
+                      , processName );
                 continue;
             }
             kill_process( -1, -1, processName, true );
@@ -4288,7 +4293,9 @@
                                       , persistZones );
         if ( !find_process( processName ) )
         {
-            printf( "Persistent process %s does not exist\n", processName);
+            printf( "[%s] %s - Persistent process %s does not exist\n"
+                  , MyName, time_string()
+                  , processName );
             break;
         }
         kill_process( -1, -1, processName, true );
@@ -4306,7 +4313,9 @@
                                       , persistZones );
         if ( !find_process( processName ) )
         {
-            printf( "Persistent process %s does not exist\n", processName);
+            printf( "[%s] %s - Persistent process %s does not exist\n"
+                  , MyName, time_string()
+                  , processName );
             break;
         }
         kill_process( -1, -1, processName, true );
@@ -4377,7 +4386,9 @@
                                           , persistZones );
             if ( find_process( processName ) )
             {
-                printf( "Persistent process %s already exists\n", processName);
+                printf( "[%s] %s - Persistent process %s already exists\n"
+                      , MyName, time_string()
+                      , processName );
                 continue;
             }
             if (programArgc)
@@ -4403,7 +4414,9 @@
                                //, (char *)persistConfig->GetProgramName() );
             if (pid > 0)
             {
-                printf( "Persistent process %s created\n", processName);
+                printf( "[%s] %s - Persistent process %s created\n"
+                      , MyName, time_string()
+                      , processName );
                 if (process_type == ProcessType_DTM)
                 {
                     DTMexists = true;
@@ -4431,7 +4444,9 @@
                                       , persistZones );
         if ( find_process( processName ) )
         {
-            printf( "Persistent process %s already exists\n", processName);
+            printf( "[%s] %s - Persistent process %s already exists\n"
+                  , MyName, time_string()
+                  , processName );
             break;
         }
         if (programArgc)
@@ -4456,7 +4471,9 @@
                            , programNameAndArgs );
         if (pid > 0)
         {
-            printf( "Persistent process %s created\n", processName);
+            printf( "[%s] %s - Persistent process %s created\n"
+                  , MyName, time_string()
+                  , processName );
             if (process_type == ProcessType_DTM)
             {
                 DTMexists = true;
@@ -4482,7 +4499,9 @@
                                       , persistZones );
         if ( find_process( processName ) )
         {
-            printf( "Persistent process %s already exists\n", processName);
+            printf( "[%s] %s - Persistent process %s already exists\n"
+                  , MyName, time_string()
+                  , processName );
             break;
         }
         if (programArgc)
@@ -4507,7 +4526,9 @@
                            , programNameAndArgs );
         if (pid > 0)
         {
-            printf( "Persistent process %s created\n", processName);
+            printf( "[%s] %s - Persistent process %s created\n"
+                  , MyName, time_string()
+                  , processName );
             if (process_type == ProcessType_DTM)
             {
                 DTMexists = true;
@@ -4516,8 +4537,8 @@
         else
         {
             if ( trace_settings & TRACE_SHELL_CMD )
-                trace_printf("%s@%d [%s] persistexec failed!\n",
-                             method_name, __LINE__, MyName);
+                trace_printf("%s@%d [%s] persist exec failed!\n"
+                             , method_name, __LINE__, MyName);
         }
         break;
     default:
@@ -6177,7 +6198,7 @@
     printf ("[%s] -- persist exec <persist-process-prefix>\n", MyName);
     printf ("[%s] -- persist info [<persist-process-prefix>]\n", MyName);
     printf ("[%s] -- persist kill <persist-process-prefix>\n", MyName);
-    printf ("[%s] -- ps [{CS|DTM|GEN|PSD|SMS|SSMP|WDG}] [<process_name>|<nid,pid>]\n", MyName);
+    printf ("[%s] -- ps [{CS|DTM|GEN|PSD|SMS|SSMP|WDG}] [<nid>|<process_name>|<nid,pid>]\n", MyName);
     printf ("[%s] -- pwd\n", MyName);
     printf ("[%s] -- quit\n", MyName);
     printf ("[%s] -- scanbufs\n", MyName);
@@ -6372,6 +6393,7 @@
 void node_cmd (char *cmd_tail)
 {
     int nid;
+    int pnid;
     char token[MAX_TOKEN];
     char delimiter;
     char *cmd = cmd_tail;
@@ -6423,7 +6445,7 @@
                 {
                     sprintf( msgString, "[%s] Node add is not available with Virtual Nodes!",MyName);
                     write_startup_log( msgString );
-                    printf ("[%s] Node add is not available with Virtual Nodes!\n", MyName);    
+                    printf ("%s\n", msgString);
                 }
                 else
                 {
@@ -6474,7 +6496,7 @@
                 {
                     sprintf( msgString, "[%s] Node delete is not available with Virtual Nodes!",MyName);
                     write_startup_log( msgString );
-                    printf ("[%s] Node delete is not available with Virtual Nodes!\n", MyName);    
+                    printf ("%s\n", msgString);
                 }
                 else
                 {
@@ -6487,7 +6509,7 @@
                     {
                         sprintf( msgString, "[%s] Node delete is not enabled, to enable export SQ_ELASTICY_ENABLED=1",MyName);
                         write_startup_log( msgString );
-                        printf ("[%s] Node delete is not enabled, to enable export SQ_ELASTICY_ENABLED=1\n", MyName);    
+                        printf ("%s\n", msgString);
                     }
                 }
             }
@@ -6516,15 +6538,15 @@
                 if ( *cmd )
                 {
                     nid = atoi (cmd);
-                    if ((!isNumeric(cmd)) || (nid >= LNodesConfigMax) || (nid < 0))
+                    pnid = get_pnid_by_nid( nid );
+                    if ( pnid == -1 )
                     {
-                        printf ("[%s] Invalid nid\n", MyName);
+                        printf( "[%s] Node id %d does not exist in configuration!\n"
+                              , MyName, nid );
+                        return;
                     }
-                    else
-                    {
-                        node_info(nid);
-                        CurNodes = NumLNodes-NumDown;
-                    }
+                    node_info(nid);
+                    CurNodes = NumLNodes-NumDown;
                 }
                 else
                 {
@@ -6546,7 +6568,7 @@
                 {
                     sprintf( msgString, "[%s] Node name is not available with Virtual Nodes!",MyName);
                     write_startup_log( msgString );
-                    printf ("[%s] Node name is not available with Virtual Nodes!\n", MyName);    
+                    printf ("%s\n", msgString);
                 }
                 else
                 {
@@ -6559,7 +6581,7 @@
                     {
                         sprintf( msgString, "[%s] Node name is not enabled, to enable export SQ_ELASTICY_ENABLED=1",MyName);
                         write_startup_log( msgString );
-                        printf ("[%s] Node name is not enabled, to enable export SQ_ELASTICY_ENABLED=1\n", MyName);    
+                        printf ("%s\n", msgString);
                     }
                 }
             }
@@ -6714,9 +6736,9 @@
 
     char *cmd_tail = cmd;
     char delim;
-    char msgString[MAX_BUFFER] = { 0 };
     char token[MAX_TOKEN] = { 0 };
     int nid = -1;
+    int pnid = -1;
 
     if ( trace_settings & TRACE_SHELL_CMD )
         trace_printf ("%s@%d [%s] processing node config command.\n",
@@ -6730,33 +6752,22 @@
         if ( isNumeric( token ) )
         {
             nid = atoi (token);
-            if (nid < 0 || nid > LNodesConfigMax - 1)
+            pnid = get_pnid_by_nid( nid );
+            if ( pnid == -1 )
             {
-                sprintf( msgString, "[%s] Node id is not configured!",MyName);
-                write_startup_log( msgString );
-                printf ("%s\n", msgString);
-               return;
+                printf( "[%s] Node id %d does not exist in configuration!\n"
+                      , MyName, nid );
+                return;
             }
-            snprintf( msgString, sizeof(msgString)
-                    , "[%s] Executing node config. (nid=%s)"
-                    , MyName, token );
-            write_startup_log( msgString );
         }
         else
         {
             if ( get_node_name( token ) != 0 ) 
             {
-                sprintf( msgString, "[%s] Node %s is not configured!"
-                       , MyName, token);
-                write_startup_log( msgString );
-                printf( "[%s] Node %s is not configured!\n"
-                      , MyName, token);
+                printf( "[%s] Node %s does not exist in configuration!\n"
+                      , MyName, token );
                 return;
             }
-            snprintf( msgString, sizeof(msgString)
-                    , "[%s] Executing node config. (node_name=%s)"
-                    , MyName, token );
-            write_startup_log( msgString );
         }
     }
 
@@ -6793,11 +6804,10 @@
         {
             if ( get_node_name( token ) != 0 ) 
             {
-                sprintf( msgString, "[%s] Node %s is not configured!"
+                sprintf( msgString, "[%s] Node %s does not exist in configuration!"
                        , MyName, token);
                 write_startup_log( msgString );
-                printf( "[%s] Node %s is not configured!\n"
-                      , MyName, token);
+                printf ("%s\n", msgString);
                 return;
             }
             STRCPY(node_name, token);
@@ -6805,13 +6815,14 @@
                     , "[%s] Executing node delete. (node_name=%s)"
                     , MyName, node_name );
             write_startup_log( msgString );
+            printf ("%s\n", msgString);
         }
     }
     else
     {
         sprintf( msgString, "[%s] Invalid node delete options syntax!",MyName);
         write_startup_log( msgString );
-        printf ("[%s] Invalid node delete options syntax!\n", MyName);
+        printf ("%s\n", msgString);
         return;
     }
 
@@ -6824,6 +6835,7 @@
 
     int numLNodes = -1;
     int nid;
+    int pnid;
     char *cmd_tail = cmd;
     char delim;
     char msgString[MAX_BUFFER] = { 0 };
@@ -6852,20 +6864,38 @@
         }
         write_startup_log( msgString );
         printf ("%s\n", msgString);
+
         nid = atoi (token);
-        if (nid < 0 || nid > LNodesConfigMax - 1)
+        pnid = get_pnid_by_nid( nid );
+        if ( pnid == -1 )
         {
-            sprintf( msgString, "[%s] Invalid node id!",MyName);
+            sprintf( msgString, "[%s] Node id %d does not exist in configuration!"
+                   , MyName, nid);
             write_startup_log( msgString );
             printf ("%s\n", msgString);
-           return;
+            return;
         }
     }
     else
     {
+        if (cmd_tail[0] != 0)
+        {
+            snprintf( msgString, sizeof(msgString)
+                    , "[%s] Executing node down. (node_name=%s) \"%s\""
+                    , MyName, token, cmd_tail );
+        }
+        else
+        {
+            snprintf( msgString, sizeof(msgString)
+                    , "[%s] Executing node down. (node_name=%s)"
+                    , MyName, token );
+        }
+        write_startup_log( msgString );
+        printf ("%s\n", msgString);
+
         if ( get_node_name( token ) != 0 ) 
         {
-            sprintf( msgString, "[%s] Node %s is not configured!"
+            sprintf( msgString, "[%s] Node %s does not exist in configuration!"
                    , MyName, token);
             write_startup_log( msgString );
             printf ("%s\n", msgString);
@@ -6873,20 +6903,6 @@
         }
         STRCPY(node_name, token);
         nid = get_first_nid( node_name );
-        if (cmd_tail[0] != 0)
-        {
-            snprintf( msgString, sizeof(msgString)
-                    , "[%s] Executing node down. (node_name=%s) \"%s\""
-                    , MyName, node_name, cmd_tail );
-        }
-        else
-        {
-            snprintf( msgString, sizeof(msgString)
-                    , "[%s] Executing node down. (node_name=%s)"
-                    , MyName, node_name );
-        }
-        write_startup_log( msgString );
-        printf ("%s\n", msgString);
     }
 
     numLNodes = get_lnodes_count( nid );
@@ -6895,7 +6911,6 @@
         return;
     }
 
-    int pnid;
     int zid = -1;
     STATE state;
 
@@ -6907,7 +6922,7 @@
     {
         sprintf( msgString, "[%s] Node is already down! (nid=%d, state=%s)\n", MyName, nid, StateString(state) );
         write_startup_log( msgString );
-        printf ("[%s] Node is already down! (nid=%d, state=%s)\n", MyName, nid, StateString(state) );
+        printf ("%s\n", msgString);
         return;
     }
     else
@@ -6918,7 +6933,7 @@
             {
                 sprintf( msgString, "[%s] Multiple logical nodes in physical node. Use <nid> '!' to down all logical nodes in physical node\n", MyName);
                 write_startup_log( msgString );
-                printf ("[%s] Multiple logical nodes in physical node. Use <nid> '!' to down all logical nodes in physical node\n", MyName);
+                printf ("%s\n", msgString);
                 return;
             }
         }
@@ -7058,7 +7073,7 @@
             {
                 sprintf( msgString, "[%s] Invalid up options syntax!",MyName);
                 write_startup_log( msgString );
-                printf ("[%s] Invalid up options syntax!\n", MyName);
+                printf ("%s\n", msgString);
                 delimiter = ' ';
                 break;
             }
@@ -7068,7 +7083,7 @@
         {
             sprintf( msgString, "[%s] Invalid up syntax!",MyName);
             write_startup_log( msgString );
-            printf ("[%s] Invalid up syntax!\n", MyName);
+            printf ("%s\n", msgString);
         }
         else if (delimiter == '}')
         {
@@ -7080,6 +7095,11 @@
     {
         if ( VirtualNodes )
         {
+            sprintf( msgString, "[%s] Executing node up. (nid=%s)"
+                   , MyName, cmd_tail);
+            write_startup_log( msgString );
+            printf ("%s\n", msgString);
+
             get_token( cmd_tail, token, &delim );
             if ( isNumeric( token ) )
             {
@@ -7088,7 +7108,7 @@
                 {
                     sprintf( msgString, "[%s] Invalid node id!",MyName);
                     write_startup_log( msgString );
-                    printf ("[%s] Invalid node id!\n", MyName);
+                    printf ("%s\n", msgString);
                 }
                 else
                 {
@@ -7100,27 +7120,47 @@
             {
                 sprintf( msgString, "[%s] Invalid node id!",MyName);
                 write_startup_log( msgString );
-                printf ("[%s] Invalid node id!\n", MyName);
+                printf ("%s\n", msgString);
             }
         }
         else
         {
-            if ( get_node_name( cmd_tail ) == 0 ) 
+            sprintf( msgString, "[%s] Executing node up. (node=%s)"
+                   , MyName, cmd_tail);
+            write_startup_log( msgString );
+            printf ("%s\n", msgString);
+
+            get_token( cmd_tail, token, &delim );
+            if ( isNumeric( token ) )
             {
-                if ( ClusterConfig.GetStorageType() == TCDBSQLITE)
-                {
-                    if ( copy_config_db( cmd_tail ) == 0 ) 
-                    {
-                        node_up( -1, cmd_tail, nowait );
-                    }
-                }
+                sprintf( msgString, "[%s] Invalid node name (%s)!"
+                       , MyName, token);
+                write_startup_log( msgString );
+                printf ("%s\n", msgString);
+                return;
             }
             else
             {
-                sprintf( msgString, "[%s] Invalid node name!",MyName);
-                write_startup_log( msgString );
-                printf ("[%s] Invalid node name!\n", MyName);
+                if ( get_node_name( token ) == 0 ) 
+                {
+                    if ( ClusterConfig.GetStorageType() == TCDBSQLITE)
+                    {
+                        if ( copy_config_db( cmd_tail ) != 0 )
+                        {
+                            return;
+                        }
+                    }
+                }
+                else
+                {
+                    sprintf( msgString, "[%s] Node %s does not exist in configuration!"
+                           , MyName, token);
+                    write_startup_log( msgString );
+                    printf ("%s\n", msgString);
+                    return;
+                }
             }
+            node_up( -1, cmd_tail, nowait );
         }
     }
 }
@@ -7485,6 +7525,7 @@
 {
     int nid;
     int pid;
+    int pnid;
     char process_name[MAX_PROCESS_NAME];
     char token[MAX_TOKEN];
     PROCESSTYPE process_type = ProcessType_Undefined;
@@ -7546,7 +7587,7 @@
         }
     }
 
-    // check if we have a process <name> or <nid,pid>
+    // check if we have a process <name> or <nid> or <nid,pid>
     if (isdigit (*cmd_tail))
     {
         cmd_tail = get_token (cmd_tail, token, &delimiter);
@@ -7558,7 +7599,15 @@
         }
         else
         {
-            printf ("[%s] Invalid process Nid,Pid!\n", MyName);
+            nid = atoi (token);
+            pid = -1;
+            //printf ("[%s] Invalid process Nid,Pid!\n", MyName);
+            //return;
+        }
+        pnid = get_pnid_by_nid( nid );
+        if ( pnid == -1 )
+        {
+            printf( "[%s] Invalid node, nid=%d\n", MyName, nid );
             return;
         }
     }
@@ -8251,8 +8300,6 @@
     }
     else if (strcmp (token, "up") == 0)
     {
-        sprintf( msgString, "[%s] Executing node up. (node=%s)",MyName,cmd_tail);
-        write_startup_log( msgString );
         if (Started)
         {
             node_up_cmd( cmd_tail, delimiter );
diff --git a/core/sqf/monitor/linux/tmsync.cxx b/core/sqf/monitor/linux/tmsync.cxx
index 60d9f40..b87f0f4 100644
--- a/core/sqf/monitor/linux/tmsync.cxx
+++ b/core/sqf/monitor/linux/tmsync.cxx
@@ -321,7 +321,7 @@
                 exchangeTmSyncData( sync, false );
                 syncCycle_.unlock();
                 ExchangeTmSyncState( false );
-                if (( Monitor->TmSyncPNid == MyPNID                           ) &&
+                if (( Monitor->tmSyncPNid_ == MyPNID                           ) &&
                     ( Nodes->GetTmState( SyncState_Start ) == SyncState_Start )   )
                 {
                     // send unsolicited messages to other TMs in
@@ -353,7 +353,7 @@
                 else
                 {
                     if (trace_settings & (TRACE_SYNC | TRACE_TMSYNC))
-                       trace_printf("%s@%d" " - Tm Sync failed to start, TmSyncPNid=%d, MyPNID=%d, " "TmSyncState=%d, expecting=%d\n", method_name, __LINE__, TmSyncPNid, MyPNID, Nodes->GetTmState( SyncState_Start ), SyncState_Start);
+                       trace_printf("%s@%d" " - Tm Sync failed to start, tmSyncPNid_=%d, MyPNID=%d, " "TmSyncState=%d, expecting=%d\n", method_name, __LINE__, tmSyncPNid_, MyPNID, Nodes->GetTmState( SyncState_Start ), SyncState_Start);
                     if (MyNode->GetTmSyncState() == SyncState_Start)
                     {
                         MyNode->SetTmSyncState( SyncState_Null );
@@ -449,7 +449,7 @@
             {
                 trace_printf("%s@%d - Request (%p) nid=%d, handle=%d, tag=%d, unsol=%d, comp=%d\n", method_name, __LINE__, req, req->Nid, req->Handle, req->Tag, req->Unsolicited, req->Completed);
             }
-            if ( TmSyncPNid == MyPNID )
+            if ( tmSyncPNid_ == MyPNID )
             {
                 if ( MyNode->GetLNodesCount() > 1 )
                 {
@@ -666,7 +666,7 @@
             TmSyncReplyCode |= msg->u.reply.u.unsolicited_tm_sync.return_code;
             tmsync_req->Completed = true;
             UnsolicitedComplete( msg );
-            if ( TmSyncPNid == MyPNID )
+            if ( tmSyncPNid_ == MyPNID )
             {
                 if (trace_settings & (TRACE_REQUEST | TRACE_TMSYNC))
                     trace_printf("%s@%d - Local Unsolicited TmSync reply, handle="
@@ -1102,7 +1102,7 @@
        trace_printf("%s@%d" " - PendingTmSync=%d, total=%d, replies=%d, pending=%d\n", method_name, __LINE__, PendingSlaveTmSync, GetTotalSlaveTmSyncCount(), GetTmSyncReplies(), GetPendingSlaveTmSyncCount() );
 
     if (( MyNode->GetTmSyncState() == SyncState_Abort ) &&
-        ( TmSyncPNid != MyPNID ) &&
+        ( tmSyncPNid_ != MyPNID ) &&
         ( GetTmSyncReplies() == GetTotalSlaveTmSyncCount() )   )
     {
         CommitTmDataBlock( MPI_ERR_UNKNOWN );
diff --git a/core/sqf/monitor/linux/zclient.cxx b/core/sqf/monitor/linux/zclient.cxx
index 36a0600..0ca03b1 100644
--- a/core/sqf/monitor/linux/zclient.cxx
+++ b/core/sqf/monitor/linux/zclient.cxx
@@ -488,6 +488,106 @@
     return rc;
 }
 
+const char* CZClient::WaitForAndReturnMaster( bool doWait )
+{
+    const char method_name[] = "CZClient::WaitForAndReturnMaster";
+    TRACE_ENTRY;
+    
+    bool found = false;
+    int rc = -1;
+    int retries = 0;
+    Stat stat;
+
+    struct String_vector nodes = {0, NULL};
+    stringstream ss;
+    ss.str( "" );
+    ss << zkRootNode_.c_str() 
+       << zkRootNodeInstance_.c_str() 
+       << ZCLIENT_MASTER_ZNODE;
+    string masterMonitor( ss.str( ) );
+
+    // wait for 3 minutes for giving up.  
+    while ( (!found) && (retries < 180)) 
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d trafCluster=%s\n"
+                        , method_name, __LINE__, masterMonitor.c_str() );
+        }
+        // Verify the existence of the parent ZCLIENT_MASTER_ZNODE
+        rc = ZooExistRetry( ZHandle, masterMonitor.c_str( ), 0, &stat );
+        
+        if ( rc == ZNONODE )
+        {
+            if (doWait == false)
+            {
+                break;
+            } 
+            usleep(1000000); // sleep for a second as to not overwhelm the system   
+            retries++;
+            continue;
+        }
+        else if ( rc == ZOK )
+        {
+            // Now get the list of available znodes in the cluster.
+            //
+            // This will return child znodes for each monitor process that has
+            // registered, including this process.
+            rc = zoo_get_children( ZHandle, masterMonitor.c_str( ), 0, &nodes );
+            if ( nodes.count > 0 )
+            {
+                if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+                {
+                    trace_printf( "%s@%d nodes.count=%d\n"
+                                , method_name, __LINE__
+                                , nodes.count );
+                }
+                found = true;
+            }
+            else
+            {
+                if (doWait == false)
+                {
+                    break;
+                }
+                usleep(1000000); // sleep for a second as to not overwhelm the system   
+                retries++;
+                continue;
+            }
+        }
+         
+        else  // error
+        { 
+            if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+            {
+                trace_printf( "%s@%d Error (MasterMonitor) WaitForAndReturnMaster returned rc (%d), retries %d\n"
+                        , method_name, __LINE__, rc, retries );
+            }
+            char buf[MON_STRING_BUF_SIZE];
+            snprintf( buf, sizeof(buf)
+                    , "[%s], ZooExistRetry() for %s failed with error %s\n"
+                    ,  method_name, masterMonitor.c_str( ), zerror(rc));
+            mon_log_write(MON_ZCLIENT_WAITFORANDRETURNMASTER, SQ_LOG_ERR, buf);
+            break;
+        }
+    }
+         
+    //should we assert nodes.count == 1?
+    if (found)
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) Master Monitor found (%s)\n"
+                        , method_name, __LINE__, masterMonitor.c_str() );
+        }
+        TRACE_EXIT;
+        return nodes.data[0];
+    }
+
+    TRACE_EXIT;
+    return NULL;
+}
+
 int CZClient::GetClusterZNodes( String_vector *nodes )
 {
     const char method_name[] = "CZClient::GetClusterZNodes";
@@ -700,7 +800,7 @@
 
 int CZClient::InitializeZClient( void )
 {
-    const char method_name[] = "CZClient::MakeClusterZNodes";
+    const char method_name[] = "CZClient::InitializeZClient";
     TRACE_ENTRY;
 
     int rc;
@@ -799,6 +899,69 @@
     return( expired );
 }
 
+int CZClient::CreateMasterZNode(  const char *nodeName )
+{
+    const char method_name[] = "CZClient::CreateMasterZNode";
+    TRACE_ENTRY;
+
+    int rc;
+    int retries = 0;
+    
+    stringstream masterpath;
+    masterpath.str( "" );
+    masterpath << zkRootNode_.c_str() 
+            << zkRootNodeInstance_.c_str() 
+            << ZCLIENT_MASTER_ZNODE<< "/"
+            << nodeName;
+            
+    string monZnode = masterpath.str( );
+
+    stringstream ss;
+    ss.str( "" );
+    ss <<nodeName << ":" << MyPNID;
+    string monData = ss.str( ); 
+
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+    {
+        trace_printf( "%s@%d RegisterZNode(%s:%s)\n"
+                    , method_name, __LINE__
+                    , monZnode.c_str()
+                    , monData.c_str() );
+    }
+
+    rc = RegisterZNode( monZnode.c_str(), monData.c_str(), ZOO_EPHEMERAL );
+    while ( ((rc == ZCONNECTIONLOSS) || (rc == ZOPERATIONTIMEOUT)) && retries < ZOOKEEPER_RETRY_COUNT)
+    {
+        sleep(ZOOKEEPER_RETRY_WAIT);
+        retries++;
+        rc = RegisterZNode( monZnode.c_str(), monData.c_str(), ZOO_EPHEMERAL );
+    }
+    
+    if (rc != ZOK)
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d Error (MasterMonitor) Create master node for %s with rc = %d)\n"
+                    , method_name, __LINE__, monZnode.c_str( ), rc);
+        }
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], RegisterZNode(%s) failed with error %s\n"
+                , method_name, monData.c_str(), zerror(rc) );
+        mon_log_write(MON_ZCLIENT_CREATEMASTERZNODE, SQ_LOG_ERR, buf);
+
+        TRACE_EXIT;
+        return(rc); // Return the error
+    }
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+    {
+        trace_printf( "%s@%d (MasterMonitor) Created master node for %s with rc = %d)\n"
+                , method_name, __LINE__, monZnode.c_str( ), rc);
+    }
+    TRACE_EXIT;
+    return(rc);
+}
+
 int CZClient::MakeClusterZNodes( void )
 {
     const char method_name[] = "CZClient::MakeClusterZNodes";
@@ -908,6 +1071,40 @@
         break;
     }
 
+    ss.str( "" );
+    ss << zkRootNode_.c_str() 
+       << zkRootNodeInstance_.c_str() 
+       << ZCLIENT_MASTER_ZNODE;
+    string masterDir( ss.str( ) );
+
+    rc = ZooExistRetry( ZHandle, masterDir.c_str( ), 0, &stat );
+    switch (rc)
+    {
+    case ZOK:
+        break;
+    case ZNONODE:
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d RegisterZNode(%s)\n"
+                        , method_name, __LINE__
+                        , masterDir.c_str() );
+        }
+        rc = RegisterZNode( masterDir.c_str(), NULL, 0 );
+        if ( rc && rc != ZNODEEXISTS )
+        {
+            return(rc); // Return the error
+        }
+        rc = ZOK;
+        break;
+    default:
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], zoo_exists(%s) failed with error %s\n"
+                , method_name, masterDir.c_str(), zerror(rc) );
+        mon_log_write(MON_ZCLIENT_CHECKCLUSTERZNODES_3, SQ_LOG_ERR, buf);
+        break;
+    }
+    
     TRACE_EXIT;
     return(rc);
 }
@@ -1484,6 +1681,53 @@
     TRACE_EXIT;
 }
 
+int CZClient::WatchMasterNode( const char *nodeName )
+{
+    const char method_name[] = "CZClient::WatchMasterNode";
+    TRACE_ENTRY;
+
+    int rc;
+    stringstream newpath;
+    newpath.str( "" );
+    newpath << zkRootNode_.c_str() 
+            << zkRootNodeInstance_.c_str() 
+            << ZCLIENT_MASTER_ZNODE << "/"
+            << nodeName;
+    string monZnode = newpath.str( );
+
+    lock();
+    rc = SetZNodeWatch( monZnode );
+    unlock();
+    if ( rc != ZOK )
+    {
+       if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d Error (MasterMonitor) WatchMasterNode failed with rc = %d for %s\n"
+                        , method_name, __LINE__
+                        , rc
+                        , nodeName);
+        }
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], SetZNodeWatch(%s) failed!\n"
+                , method_name
+                , monZnode.c_str() );
+        mon_log_write(MON_ZCLIENT_WATCHNODE_1, SQ_LOG_ERR, buf); 
+    }
+    else
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) WatchMasterNode set on monZnode=%s\n"
+                        , method_name, __LINE__
+                        , monZnode.c_str() );
+        }
+    }
+
+    TRACE_EXIT;
+    return(rc);
+}
+
 int CZClient::WatchNode( const char *nodeName )
 {
     const char method_name[] = "CZClient::WatchNode";
@@ -1524,6 +1768,108 @@
     return(rc);
 }
 
+int CZClient::WatchNodeMasterDelete( const char *nodeName )
+{
+    const char method_name[] = "CZClient::WatchMasterDelete";
+    TRACE_ENTRY;
+    
+    int rc = -1;
+    stringstream newpath;
+    newpath.str( "" );
+    newpath << zkRootNode_.c_str() 
+            << zkRootNodeInstance_.c_str() 
+            << ZCLIENT_MASTER_ZNODE
+            << nodeName;
+           
+    string monZnode = newpath.str( );
+    
+    if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+    {
+        trace_printf( "%s@%d zoo_delete(%s)\n"
+                    , method_name, __LINE__
+                    , monZnode.c_str() );
+    }
+   
+    rc = zoo_delete( ZHandle
+                   , monZnode.c_str( )
+                   , -1 );
+    if ( rc == ZOK )
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) WatchNodeMasterDelete deleted %s, with rc == ZOK\n"
+                        , method_name, __LINE__
+                        , nodeName );
+        }
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], znode (%s) deleted!\n"
+                , method_name, nodeName );
+        mon_log_write(MON_ZCLIENT_WATCHMASTERNODEDELETE_1, SQ_LOG_INFO, buf);
+    }
+    else if ( rc == ZNONODE )
+    {
+        // This is fine since we call it indiscriminately
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) WatchNodeMasterDelete already deleted %s, with rc == ZNONODE (fine)\n"
+                        , method_name, __LINE__
+                        , nodeName );
+        }
+    }
+    else if ( rc == ZCONNECTIONLOSS || 
+              rc == ZOPERATIONTIMEOUT )
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) znode (%s) already deleted or cannot be accessed, rc=%d (%s)\n"
+                        , method_name, __LINE__
+                        , nodeName, rc, zerror(rc)  );
+        }
+        rc = ZOK;
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], znode (%s) already deleted or cannot be accessed, rc=%d (%s)\n"
+                , method_name, nodeName, rc, zerror(rc)  );
+        mon_log_write(MON_ZCLIENT_WATCHMASTERNODEDELETE_2, SQ_LOG_INFO, buf);
+    }
+    else
+    {
+        if (trace_settings & (TRACE_INIT | TRACE_RECOVERY))
+        {
+            trace_printf( "%s@%d (MasterMonitor) WatchNodeMasterDelete deleted %s, with rc == ZOK\n"
+                        , method_name, __LINE__
+                        , nodeName );
+        }
+        char buf[MON_STRING_BUF_SIZE];
+        snprintf( buf, sizeof(buf)
+                , "[%s], zoo_delete(%s) failed with error %s\n"
+                , method_name, nodeName, zerror(rc) );
+        mon_log_write(MON_ZCLIENT_WATCHMASTERNODEDELETE_3, SQ_LOG_CRIT, buf);
+        switch ( rc )
+        {
+        case ZSYSTEMERROR:
+        case ZRUNTIMEINCONSISTENCY:
+        case ZDATAINCONSISTENCY:
+        case ZMARSHALLINGERROR:
+        case ZUNIMPLEMENTED:
+        case ZBADARGUMENTS:
+        case ZINVALIDSTATE:
+        case ZSESSIONEXPIRED:
+        case ZCLOSING:
+            // Treat these error like a session expiration, since
+            // we can't communicate with quorum servers
+            HandleMyNodeExpiration();
+            break;
+        default:
+            break;
+        }
+    }
+    
+    TRACE_EXIT;
+    return( rc );
+}
+
 int CZClient::WatchNodeDelete( const char *nodeName )
 {
     const char method_name[] = "CZClient::WatchNodeDelete";
diff --git a/core/sqf/monitor/linux/zclient.h b/core/sqf/monitor/linux/zclient.h
index ea9bca3..6108021 100644
--- a/core/sqf/monitor/linux/zclient.h
+++ b/core/sqf/monitor/linux/zclient.h
@@ -104,6 +104,7 @@
 
 #define ZCLIENT_TRAFODION_ZNODE     "/trafodion"
 #define ZCLIENT_INSTANCE_ZNODE      "/instance"
+#define ZCLIENT_MASTER_ZNODE        "/master"
 
 typedef list<string>    ZNodeList_t;
 
@@ -137,6 +138,7 @@
             , const char *instanceZNode );
     ~CZClient( void );
 
+    int     CreateMasterZNode(  const char *nodeName );
     int     GetSessionTimeout( void) { return( zkSessionTimeout_ ); }
     bool    IsZNodeExpired( const char *nodeName, int &zerr );
     void    MonitorZCluster( void );
@@ -148,8 +150,11 @@
     int     StartWork( void );
     void    StopMonitoring( void );
     void    TriggerCheck( int type, const char *znodePath );
+    const char* WaitForAndReturnMaster( bool doWait );
     int     WatchNode( const char *nodeName );
+    int     WatchMasterNode( const char *nodeName );
     int     WatchNodeDelete( const char *nodeName );
+    int     WatchNodeMasterDelete( const char *nodeName );
 
 private:
     int     ZooExistRetry(zhandle_t *zh, const char *path, int watch, struct Stat *stat);
diff --git a/core/sqf/monitor/test/runtest b/core/sqf/monitor/test/runtest
index 9c08949..8c1193b 100755
--- a/core/sqf/monitor/test/runtest
+++ b/core/sqf/monitor/test/runtest
@@ -102,7 +102,8 @@
 #
 # Setup test execution
 #
-export PATH=$PATH:$PWD/Linux-x86_64/64/dbg
+ARCH=`arch`
+export PATH=$PATH:$PWD/Linux-${ARCH}/64/dbg
 cd $TRAF_HOME/monitor/test
 echo $PWD
 
diff --git a/core/sqf/sqenvcom.sh b/core/sqf/sqenvcom.sh
index eec2a3b..c5a9164 100644
--- a/core/sqf/sqenvcom.sh
+++ b/core/sqf/sqenvcom.sh
@@ -673,6 +673,18 @@
 # Control SQ default startup behavior (c=cold, w=warm, if removed sqstart will autocheck)
 export SQ_STARTUP=r
 
+# Monitor process creator:
+#   MPIRUN - monitor process is created by mpirun
+# Uncomment SQ_MON_CREATOR when running monitor in AGENT mode
+#export SQ_MON_CREATOR=MPIRUN
+
+# Monitor process run mode:
+#   AGENT - monitor process runs in agent mode versus MPI collective
+# Uncomment the three environment variables below
+#export SQ_MON_RUN_MODE=AGENT
+#export MONITOR_COMM_PORT=23399
+#export MONITOR_SYNC_PORT=23398
+
 # Alternative logging capability in monitor
 export SQ_MON_ALTLOG=0
 
@@ -707,6 +719,13 @@
 # Trafodion Configuration Zookeeper store
 #export TC_ZCONFIG_SESSION_TIMEOUT=120
 
+# increase SQ_MON,ZCLIENT,WDT timeout only to jenkins env.
+if [[ "$TRAF_HOME" == *"/home/jenkins"* ]]; then
+export SQ_MON_EPOLL_WAIT_TIMEOUT=20
+export SQ_MON_ZCLIENT_SESSION_TIMEOUT=360
+export SQ_WDT_KEEPALIVETIMERVALUE=360
+fi
+
 # set to 0 to disable phandle verifier
 export SQ_PHANDLE_VERIFIER=1
 
diff --git a/core/sqf/sql/scripts/dcscheck b/core/sqf/sql/scripts/dcscheck
index 4a30716..914697c 100755
--- a/core/sqf/sql/scripts/dcscheck
+++ b/core/sqf/sql/scripts/dcscheck
@@ -23,7 +23,6 @@
 cfg_mxo_cnt=0
 act_mxo_cnt=0
 down_mxo_cnt=0
-backup_dcsmaster_cnt=0
 cfg_dcsmaster_cnt=0
 actual_dcsmaster_cnt=0
 down_dcsmaster_cnt=0
@@ -71,16 +70,10 @@
 ### Get the list of DcsServer and DcsMaster
   $L_PDSH $jpscmd |/bin/egrep 'DcsMaster|DcsServer' > $tmpjps
 
-### Get the configured primary node for DcsMaster
-  if [ -s ${DCS_INSTALL_DIR}/conf/master ]; then
-     primary_dcsmaster=`cat ${DCS_INSTALL_DIR}/conf/master | /bin/egrep -v '^#|^$'`
-  fi
-
 ### Get the configured number of DcsMaster's
-  if [ -s ${DCS_INSTALL_DIR}/conf/backup-masters ]; then
-     let backup_dcsmaster_cnt=`/bin/egrep -cv '#|^$' ${DCS_INSTALL_DIR}/conf/backup-masters`
-     let cfg_dcsmaster_cnt=$backup_dcsmaster_cnt+1
-     list_of_backups=`cat ${DCS_INSTALL_DIR}/conf/backup-masters | /bin/egrep -v '^#|^$'|paste -sd ' ' -`
+  if [ -s ${DCS_INSTALL_DIR}/conf/masters ]; then
+     let cfg_dcsmaster_cnt=`/bin/egrep -cv '#|^$' ${DCS_INSTALL_DIR}/conf/masters`
+     list_of_masters=`cat ${DCS_INSTALL_DIR}/conf/masters | /bin/egrep -v '^#|^$'|paste -sd ' ' -`
    else
      let cfg_dcsmaster_cnt=1
    fi
@@ -159,16 +152,13 @@
    masterport=`cat $dcstmp | /usr/bin/tail -n 1 | cut -d ":" -f2`
    if [ ! -z "$masterport" ]; then
      echo -e "DcsMaster listen port: $masterport\n"
-     if [[ ! -z "$primary_dcsmaster" ]]; then
-        echo "Configured Primary DcsMaster: \"$primary_dcsmaster\""
-     fi
-     if [[ ! -z "$list_of_backups" ]]; then
-        echo "Configured Backup DcsMasters: \"$list_of_backups\""
+     if [[ ! -z "$list_of_masters" ]]; then
+        echo "Configured DcsMasters: \"$list_of_masters\""
      fi
      if ( [ ! -z "$activeMaster" ] && [ ! -z "$activeDcsPid" ] ); then
-        echo "Active DcsMaster            : \"$activeMaster\", pid $activeDcsPid"
+        echo "Active DcsMaster     : \"$activeMaster\", pid $activeDcsPid"
      else
-        echo "Active DcsMaster            : \"$activeMaster\""
+        echo "Active DcsMaster     : \"$activeMaster\""
      fi
      echo
    fi
diff --git a/core/sqf/sql/scripts/dcsstart b/core/sqf/sql/scripts/dcsstart
index 86d7d3a..a14da20 100755
--- a/core/sqf/sql/scripts/dcsstart
+++ b/core/sqf/sql/scripts/dcsstart
@@ -23,7 +23,7 @@
 #
 
 #Check if Trafodion is up and operational
-sqcheck -f
+sqcheck -f > /dev/null
 sq_stat=$?
 if ( [ $sq_stat == 0 ] || [ $sq_stat == 1 ] ); then
    DCS_START_CMD=${DCS_INSTALL_DIR}/bin/start-dcs.sh
@@ -33,13 +33,7 @@
       exit 1;
    fi
 
-   if [ ! -e ${DCS_START_CMD} ]; then
-     echo "${DCS_START_CMD} not found."
-     exit 1;
-   fi
-
    echo "Starting the DCS environment now"
-   cd ${DCS_INSTALL_DIR}
    ${DCS_START_CMD}
    cds
 else
diff --git a/core/sqf/sql/scripts/dcsstop b/core/sqf/sql/scripts/dcsstop
index 5b37e91..9eca963 100755
--- a/core/sqf/sql/scripts/dcsstop
+++ b/core/sqf/sql/scripts/dcsstop
@@ -29,16 +29,11 @@
    exit 1;
 fi
 
-if [ ! -e ${DCS_STOP_CMD} ]; then
-   echo "${DCS_STOP_CMD} not found."
-   exit 1;
-fi
+echo "Shutting down the DCS environment now"
+${DCS_STOP_CMD}
 
 if [[ $ENABLE_HA == "true" ]]; then
    ${DCS_INSTALL_DIR}/bin/scripts/dcsunbind.sh
 fi
 
-echo "Shutting down the DCS environment now"
-cd ${DCS_INSTALL_DIR}
-${DCS_STOP_CMD}
 cds
diff --git a/core/sqf/sql/scripts/install_local_hadoop b/core/sqf/sql/scripts/install_local_hadoop
index 7c2b062..497d917 100755
--- a/core/sqf/sql/scripts/install_local_hadoop
+++ b/core/sqf/sql/scripts/install_local_hadoop
@@ -1641,6 +1641,11 @@
 fi
 # end of HBase setup
 
+if [[ ! -z $FAST_LOCAL_HADOOP ]]; then
+    echo "FAST_LOCAL_HADOOP is set. Exiting..."
+    exit 0
+fi
+
 cd $MY_SW_ROOT
 
 if [ $INSTALL_TPCDS -eq 1 ]; then
diff --git a/core/sqf/sql/scripts/install_traf_components b/core/sqf/sql/scripts/install_traf_components
index 35cd7c8..794ce53 100755
--- a/core/sqf/sql/scripts/install_traf_components
+++ b/core/sqf/sql/scripts/install_traf_components
@@ -174,6 +174,9 @@
 
   echo "Configured $DCS_HOME/conf/dcs-site.xml" | tee -a ${MY_LOG_FILE}
 
+  echo "localhost" > masters
+  echo "Configured $DCS_HOME/conf/masters" | tee -a ${MY_LOG_FILE}
+
   echo "localhost 4" > servers
   echo "Configured $DCS_HOME/conf/servers" | tee -a ${MY_LOG_FILE}
 
@@ -290,7 +293,7 @@
   echo "Adding swtrafci script..." | tee -a ${MY_LOG_FILE}
   cat <<EOF >$TRAF_HOME/sql/scripts/swtrafci
 #!/bin/sh
-$TRAF_HOME/trafci/bin/trafci
+$TRAF_HOME/trafci/bin/trafci.sh -h localhost:$MY_DCS_MASTER_PORT -u db__root -p zz
 EOF
   chmod +x $TRAF_HOME/sql/scripts/swtrafci
 else
diff --git a/core/sqf/sql/scripts/sqcheck b/core/sqf/sql/scripts/sqcheck
index c5825a8..6449ec7 100755
--- a/core/sqf/sql/scripts/sqcheck
+++ b/core/sqf/sql/scripts/sqcheck
@@ -112,14 +112,12 @@
 }
 
 function getDcsInfo {
- dcsznode=/$USER/dcs/master
 
  if [ -d $DCS_INSTALL_DIR ];then
 
 ### Get the configured number of DcsMaster's
-  if [ -s ${DCS_INSTALL_DIR}/conf/backup-masters ]; then
-     let backup_dcsmaster_cnt=`/bin/egrep -cv '#|^$' ${DCS_INSTALL_DIR}/conf/backup-masters`
-     let cfg_dcsmaster_cnt=$backup_dcsmaster_cnt+1
+  if [ -s ${DCS_INSTALL_DIR}/conf/masters ]; then
+     let cfg_dcsmaster_cnt=`/bin/egrep -cv '#|^$' ${DCS_INSTALL_DIR}/conf/masters`
    else
      let cfg_dcsmaster_cnt=1
    fi
diff --git a/core/sqf/src/tm/tm_internal.h b/core/sqf/src/tm/tm_internal.h
index bca0f77..cf735b2 100644
--- a/core/sqf/src/tm/tm_internal.h
+++ b/core/sqf/src/tm/tm_internal.h
@@ -46,7 +46,6 @@
 
 #define MAX_FILE_NAME 64
 #define MAX_NUM_TRANS   1024
-#define MAX_NODES 256
 #define MAX_SYNC_TXS 50
 #define MAX_RECEIVE_BUFFER 200000
 // low number for testing
diff --git a/core/sqf/src/tm/tmlibmsg.h b/core/sqf/src/tm/tmlibmsg.h
index c8eea04..1049b7d 100644
--- a/core/sqf/src/tm/tmlibmsg.h
+++ b/core/sqf/src/tm/tmlibmsg.h
@@ -43,8 +43,8 @@
 
 //#include "dumapp.h"
 
+#include "trafconf/trafconfig.h"
 #include "dtm/tm.h"
-
 #include "dtm/xa.h"
 #include "rm.h"
 #include "../../inc/fs/feerrors.h" //legacy error codes for SQL
@@ -86,7 +86,7 @@
 #define MAX_NUM_TRANS           5000
 #define STEADYSTATE_LOW_TRANS   5
 #define STEADYSTATE_HIGH_TRANS  1000
-#define MAX_NODES 256
+#define MAX_NODES               TC_NODES_MAX
 #define MAX_SYNC_TXS 50
 #define MAX_TXN_TAGS UINT_MAX
 
diff --git a/core/sqf/src/tm/tools/dtmci.cpp b/core/sqf/src/tm/tools/dtmci.cpp
index fff798f..5d19734 100644
--- a/core/sqf/src/tm/tools/dtmci.cpp
+++ b/core/sqf/src/tm/tools/dtmci.cpp
@@ -50,8 +50,6 @@
     
 const char ga_timestamp[] = "v 3.1.0, Nov 26, 2014";
 
-#define MAX_NODES 256
-
 using namespace std;
 extern const char *ms_getenv_str(const char *pp_key);
 
diff --git a/core/sqf/src/tm/tools/pwd.cpp b/core/sqf/src/tm/tools/pwd.cpp
index f95c126..97606a2 100644
--- a/core/sqf/src/tm/tools/pwd.cpp
+++ b/core/sqf/src/tm/tools/pwd.cpp
@@ -47,8 +47,6 @@
     
 const char ga_timestamp[] = "v 3.1.0, Nov 26, 2014";
 
-#define MAX_NODES 256
-
 using namespace std;
 extern const char *ms_getenv_str(const char *pp_key);
 
diff --git a/core/sqf/src/tm/tools/tmshutdown.cpp b/core/sqf/src/tm/tools/tmshutdown.cpp
index 5961c5b..b0ca8b0 100644
--- a/core/sqf/src/tm/tools/tmshutdown.cpp
+++ b/core/sqf/src/tm/tools/tmshutdown.cpp
@@ -37,7 +37,6 @@
 // Version
 DEFINE_EXTERN_COMP_DOVERS(tmshutdown)
 
-#define MAX_NODES 256
 #define MAX_ARGLEN 32
 using namespace std;
 
diff --git a/core/sqf/src/trafconf/clusterconf.cpp b/core/sqf/src/trafconf/clusterconf.cpp
index 0ebffeb..907919d 100644
--- a/core/sqf/src/trafconf/clusterconf.cpp
+++ b/core/sqf/src/trafconf/clusterconf.cpp
@@ -49,6 +49,7 @@
 CClusterConfig::CClusterConfig( void )
               : CPNodeConfigContainer(TC_NODES_MAX)
               , CLNodeConfigContainer(TC_NODES_MAX)
+              , configMaster_(-1)
               , nodeReady_(false)
               , persistReady_(false)
               , newPNodeConfig_(true)
@@ -61,6 +62,8 @@
     const char method_name[] = "CClusterConfig::CClusterConfig";
     TRACE_ENTRY;
 
+    memset( &configMasterName_, 0, TC_PROCESSOR_NAME_MAX );
+
     TRACE_EXIT;
 }
 
@@ -373,6 +376,13 @@
     for (int i =0; i < nodeCount; i++ )
     {
         ProcessLNode( nodeConfigData[i], pnodeConfigInfo, lnodeConfigInfo );
+        // We want to pick the first configured node so all monitors pick the same one
+        // This only comes into play for a Trafodion start from scratch
+        if (i == 0)
+        {
+            configMaster_ = pnodeConfigInfo.pnid;
+            strncpy( configMasterName_ , pnodeConfigInfo.nodename, sizeof(configMasterName_) );
+        }
         AddNodeConfiguration( pnodeConfigInfo, lnodeConfigInfo );
     }
 
diff --git a/core/sqf/src/trafconf/clusterconf.h b/core/sqf/src/trafconf/clusterconf.h
index 1a8942f..ff4b17e 100644
--- a/core/sqf/src/trafconf/clusterconf.h
+++ b/core/sqf/src/trafconf/clusterconf.h
@@ -43,6 +43,8 @@
 
     void            Clear( void );
     bool            DeleteNodeConfig( int  pnid );
+    int             GetConfigMaster ( ) { return configMaster_;} 
+    char *          GetConfigMasterByName() {return configMasterName_;} 
     bool            Initialize( void );
     bool            Initialize( bool traceEnabled, const char *traceFile );
     void            InitCoreMask( cpu_set_t &coreMask );
@@ -73,6 +75,8 @@
 protected:
 private:
 
+    int             configMaster_;
+    char            configMasterName_[TC_PROCESSOR_NAME_MAX];
     bool            nodeReady_;    // true when node configuration loaded
     bool            persistReady_; // true when persist configuration loaded
     bool            newPNodeConfig_;
diff --git a/core/sql/arkcmp/CmpConnection.cpp b/core/sql/arkcmp/CmpConnection.cpp
index aa72355..67cd642 100644
--- a/core/sql/arkcmp/CmpConnection.cpp
+++ b/core/sql/arkcmp/CmpConnection.cpp
@@ -554,7 +554,9 @@
   
   if (cmpStatement)
   {
-    *this << *cmpStatement->diags();
+    ComDiagsArea *diags = cmpStatement->diags();
+    if (diags->getNumber() > 0)
+       *this << *cmpStatement->diags();
     if (cmpStatement->reply()) 
       *this << *cmpStatement->reply();
   }
diff --git a/core/sql/arkcmp/CmpContext.cpp b/core/sql/arkcmp/CmpContext.cpp
index f824e1e..a915081 100644
--- a/core/sql/arkcmp/CmpContext.cpp
+++ b/core/sql/arkcmp/CmpContext.cpp
@@ -714,8 +714,6 @@
       ActiveSchemaDB()->getDefaults().getSqlParser_NADefaults_Ptr();
   gpClusterInfo = clusterInfo_;
   SqlParser_Diags = diags();
-  if (CmpCommon::diags())
-     CmpCommon::diags()->clear();
   cmpMemMonitor = cmpMemMonitor_;
 }
 
@@ -763,7 +761,7 @@
                           char *&gen_code, UInt32 &gen_code_len,
                           UInt32 parserFlags, const char *parentQid,
                           Int32 parentQidLen,
-                          ComDiagsArea *diagsArea)
+                          ComDiagsArea *&diagsArea)
 {
 
   CmpStatement::ReturnStatus rs = CmpStatement::CmpStatement_SUCCESS;
@@ -1081,10 +1079,15 @@
       }
   }
 
-  // get any errors or warnings from compilation out before distroy it
-  if (diagsArea)
-    diagsArea->mergeAfter(*CmpCommon::diags());
-
+  ComDiagsArea *compileDiagsArea = CmpCommon::diags();
+  if (compileDiagsArea->getNumber() > 0)
+  {
+     // get any errors or warnings from compilation out before distroy it
+     if (diagsArea == NULL)
+       diagsArea = ComDiagsArea::allocate(outHeap);
+     diagsArea->mergeAfter(*compileDiagsArea);
+     compileDiagsArea->clear();
+  }
   // cleanup and return
   if (cmpStatement && cmpStatement->readyToDie())
     delete cmpStatement;
diff --git a/core/sql/arkcmp/CmpContext.h b/core/sql/arkcmp/CmpContext.h
index 5df2eca..eceb9c1 100644
--- a/core/sql/arkcmp/CmpContext.h
+++ b/core/sql/arkcmp/CmpContext.h
@@ -381,7 +381,7 @@
                 CmpMessageObj::MessageTypeEnum op, char *&gen_code,
                 UInt32 &gen_code_len, UInt32 parserFlags,
                 const char *parentQid, Int32 parentQidLen,
-                ComDiagsArea *diagsArea = NULL);
+                ComDiagsArea *&diagsArea);
 
   // set/reset an env in compiler envs
   void setArkcmpEnvDirect(const char *name, const char *value,
diff --git a/core/sql/bin/SqlciErrors.txt b/core/sql/bin/SqlciErrors.txt
index c79010b..e66e170 100644
--- a/core/sql/bin/SqlciErrors.txt
+++ b/core/sql/bin/SqlciErrors.txt
@@ -140,7 +140,7 @@
 1138 ZZZZZ 99999 ADVANCED CRTCL DIALOUT --- unused ---
 1139 ZZZZZ 99999 BEGINNER MAJOR DBADMIN System-generated column $0~ColumnName of base table $1~TableName cannot appear in the search condition of a check constraint definition.
 1140 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Row-length $0~int0 exceeds the maximum allowed row-length of $1~int1 for table $2~TableName.
-1141 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Key length $0-int0 exceeds the maximum allowed key length of $1~int1
+1141 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Key length $0~int0 exceeds the maximum allowed key length of $1~int1.
 1142 0A000 99999 BEGINNER MAJOR DBADMIN --- unused ---
 1143 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Validation for constraint $0~ConstraintName failed; incompatible data exists in referencing base table $1~TableName and referenced base table $2~String0.  To display the data that violates the constraint, please use the following DML statement: $3~String1
 1144 ZZZZZ 99999 BEGINNER MAJOR DBADMIN A quoted string was expected in first key clause for column $0~ColumnName on table $1~TableName, but the value detected is ($2~String0).
@@ -1331,6 +1331,7 @@
 4320 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Stream access is not allowed on multi-partitioned table or index, when flag ATTEMPT_ASYNCHRONOUS_ACCESS is set to OFF. Object in scope: $0~TableName.
 4321 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An embedded update/delete is not allowed on a partitioned table, when flag ATTEMPT_ASYNCHRONOUS_ACCESS is set to OFF. Object in scope: $0~TableName.
 4322 0A000 99999 BEGINNER MAJOR DBADMIN A column with BLOB datatype cannot be used in this clause or function.
+4323 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Use of predefined UDF $0~String0 is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
 4330 ZZZZZ 99999 BEGINNER MAJOR DBADMIN An invalid audit compression option $0~Int0 was used in the call to INTERPRET_AS_ROW.
 4331 ZZZZZ 99999 BEGINNER MAJOR DBADMIN KEY_RANGE_COMPARISON of partitioning keys on hash partitioned table $1~TableName is not allowed.
 4332 ZZZZZ 99999 BEGINNER MAJOR DBADMIN Table $1~TableName not exposed. Tables in scope: $2~string0.
diff --git a/core/sql/cli/Cli.cpp b/core/sql/cli/Cli.cpp
index 41e9bf7..d157eda 100644
--- a/core/sql/cli/Cli.cpp
+++ b/core/sql/cli/Cli.cpp
@@ -8314,7 +8314,7 @@
   ContextCli   & currContext = *(cliGlobals->currContext());
   ComDiagsArea & diags       = currContext.diags();
 
-  ComDiagsArea * myDiags = ComDiagsArea::allocate(currContext.exHeap());
+  ComDiagsArea * myDiags = NULL;
 
   ExeCliInterface *cliInterface = NULL;
   if (inCliInterface && (*inCliInterface))
@@ -8413,11 +8413,7 @@
 	cliRC = cliInterface->executeImmediate(query);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	cliRC = 0;
       }
@@ -8439,12 +8435,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
            	// create lob descriptor chunks table salted
        	str_sprintf(query, "create ghost table %s (descPartnKey largeint not null, descSysKey largeint not null, chunkNum int not null, chunkLen largeint not null, dataOffset largeint, stringParam varchar(400), primary key(descPartnKey, descSysKey, chunkNum)) salt using 8 partitions",
@@ -8459,13 +8450,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
-	
 	cliRC = 0;
       }
       break;
@@ -8483,11 +8468,7 @@
 	currContext.resetSqlParserFlags(0x1);
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	str_sprintf(query, "drop ghost table %s",
 		    lobDescChunksName);
@@ -8501,14 +8482,7 @@
 
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
-
-	  
-	
 	cliRC = 0;
       }
       break;
@@ -8526,10 +8500,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
 	    goto error_return;
-	  }
 
 	str_sprintf(query, "cleanup table %s",
 		    lobDescChunksName);
@@ -8542,10 +8513,7 @@
 	currContext.resetSqlParserFlags(0x1);
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
 	    goto error_return;
-	  }	   
 	cliRC = 0;
       }
       break;
@@ -8567,10 +8535,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);	    
 	    goto error_return;
-	  }
 
         
         // insert into lob descriptor chunks table
@@ -8601,11 +8566,7 @@
         currContext.resetSqlParserFlags(0x1);
 
         if (cliRC < 0)
-          {
-            cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
-            goto error_return;
-          }
+	    goto error_return;
         
 	if (inoutDescPartnKey)
 	  *inoutDescPartnKey = descPartnKey;
@@ -8650,11 +8611,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	str_sprintf(query, "select numChunks from table(ghost table %s) h where h.descPartnKey = %ld and h.syskey = %ld for read committed access",
 		    lobDescHandleName,  
@@ -8670,11 +8627,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 	
 	// insert into lob descriptor chunks table
 	if (blackBox && (blackBoxLen && (*blackBoxLen > 0)))
@@ -8704,11 +8657,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
       	if (inoutDescPartnKey)
 	  *inoutDescPartnKey = descPartnKey;
 
@@ -8741,11 +8690,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 
 
@@ -8761,11 +8706,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
         if ((lobType != Lob_External_HDFS_File) && blackBox)
           {
@@ -8801,11 +8742,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
-	    goto error_return;
-	  }
+	   goto error_return;
        
 	// update lob handle with the returned values
 	if (outLobHandle)
@@ -8839,11 +8776,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
-	    goto error_return;
-	  }
+	   goto error_return;
 
 	// delete from lob descriptor chunks table
 	str_sprintf(query, "delete from table(ghost table %s) where descPartnKey = %ld and descSysKey = %ld",
@@ -8857,11 +8790,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
         
       }
       break;
@@ -8882,16 +8811,11 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
+
         cliRC = cliInterface->fetch();
 	if (cliRC < 0)
 	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-
 	    cliInterface->fetchRowsEpilogue(0);
 	    goto error_return;
 	  }
@@ -8922,10 +8846,7 @@
 	  {	    
 	    cliRC = cliInterface->fetchRowsEpilogue(0);
 	    if (cliRC < 0)
-	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);	    
 		goto error_return;	 	  
-	      }
 	  }
 	
 	// This lob has only one chunk. Read and return the single descriptor.
@@ -8943,16 +8864,11 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	cliRC = cliInterface->fetch();
 	if (cliRC < 0)
 	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
 
 	    cliInterface->fetchRowsEpilogue(0);
 
@@ -9007,11 +8923,7 @@
 
 	cliRC = cliInterface->fetchRowsEpilogue(0);
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	cliRC = saveCliErr;
       }
@@ -9030,11 +8942,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	if (inCliInterface)
 	  *inCliInterface = cliInterface;
@@ -9046,8 +8954,6 @@
 	cliRC = cliInterface->fetch();
 	if (cliRC < 0)
 	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-
 	    cliInterface->fetchRowsEpilogue(0);
 
 	    if (inCliInterface)
@@ -9093,7 +8999,6 @@
 	cliRC = cliInterface->fetchRowsEpilogue(0);
 	if (cliRC < 0)
 	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
 	    
 	    if (inCliInterface)
 	      *inCliInterface = NULL;
@@ -9121,11 +9026,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	// delete data from the chunks desc table
 	str_sprintf(query, "delete from table(ghost table %s)",
@@ -9139,11 +9040,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
-	    goto error_return;
-	  }
+           goto error_return;
 
       }
       break;
@@ -9208,11 +9105,7 @@
 
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	cliRC = saveCliErr;
       }
@@ -9241,20 +9134,19 @@
 
   NADELETEBASIC(query, currContext.exHeap());
 
+  if (cliRC < 0)
+    {
+      if (myDiags == NULL)
+         myDiags = ComDiagsArea::allocate(currContext.exHeap());
+      cliInterface->retrieveSQLDiagnostics(myDiags);
+      diags.mergeAfter(*myDiags);
+      myDiags->decrRefCount();
+    }
   if (NOT (inCliInterface && (*inCliInterface)))
     {
       delete cliInterface;
       cliInterface = NULL;
     }
-
-  if (cliRC < 0)
-    {
-      if (myDiags->getNumber() > 0)
-	{
-	  diags.mergeAfter(*myDiags);
-	}
-    }
-  myDiags->deAllocate();
   if (cliRC < 0)
      return cliRC;
   else if (cliRC == 100)
@@ -9297,7 +9189,7 @@
   ContextCli   & currContext = *(cliGlobals->currContext());
   ComDiagsArea & diags       = currContext.diags();
 
-  ComDiagsArea * myDiags = ComDiagsArea::allocate(currContext.exHeap());
+  ComDiagsArea * myDiags = NULL;
 
   ExeCliInterface *cliInterface = NULL;
   cliInterface = new (currContext.exHeap()) 
@@ -9360,11 +9252,7 @@
   currContext.resetSqlParserFlags(0x1);
 
   if (cliRC < 0)
-    {
-      cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
       goto error_return;
-    }  
   {     
   //Allocate an inmemory array of numEntries.
   ExLobInMemoryDescChunksEntry *dcInMemoryArray = new ExLobInMemoryDescChunksEntry[numEntries];
@@ -9381,18 +9269,11 @@
   currContext.resetSqlParserFlags(0x1);
 
   if (cliRC < 0)
-    {
-      cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
       goto error_return;
-    }
   cliRC = cliInterface->fetch();
   if (cliRC < 0)
     {
-      cliInterface->retrieveSQLDiagnostics(myDiags);
-
       cliInterface->fetchRowsEpilogue(0);
-
       goto error_return;
     }
   
@@ -9436,21 +9317,14 @@
       i++;
       if (cliRC < 0)
         {
-          cliInterface->retrieveSQLDiagnostics(myDiags);
-
           cliInterface->fetchRowsEpilogue(0);
-
           goto error_return;
         }
     }
 	
   cliRC = cliInterface->fetchRowsEpilogue(0);
   if (cliRC < 0)
-    {
-      cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
       goto error_return;
-    }
  
   // adjust in memory array to calculate holes and new offsets.
   ExpLOBoper::calculateNewOffsets(dcInMemoryArray,numEntries);
@@ -9486,8 +9360,6 @@
 
           if (cliRC < 0)
             {
-              cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
               //tbd Give warning and rollback just these updates  and return with warning. For now return error and abort the iud operation itself since there is no support for nested transactions or SUSPEND and RESUME. 
               goto error_return;
             }
@@ -9539,12 +9411,12 @@
 
   if (cliRC < 0)
     {
-      if (myDiags->getNumber() > 0)
-	{
-	  diags.mergeAfter(*myDiags);
-	}
+      if (myDiags == NULL)
+         myDiags = ComDiagsArea::allocate(currContext.exHeap());
+      diags.mergeAfter(*myDiags);
+      myDiags->decrRefCount();
+    	
     }
-  myDiags->deAllocate();
   if (cliRC < 0)
      return cliRC;
   else if (cliRC == 100)
@@ -9577,7 +9449,8 @@
   ContextCli   & currContext = *(cliGlobals->currContext());
   ComDiagsArea & diags       = currContext.diags();
 
-  ComDiagsArea * myDiags = ComDiagsArea::allocate(currContext.exHeap());
+  ComDiagsArea * myDiags = NULL;
+
   char logBuf[4096];
   lobDebugInfo("In LOBddlInterface",0,__LINE__,lobTrace);
   ExeCliInterface *cliInterface = NULL;
@@ -9612,11 +9485,7 @@
 	currContext.resetSqlParserFlags(0x1);
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 	
 	// populate the lob metadata table
 	for (Lng32 i = 0; i < numLOBs; i++)
@@ -9634,11 +9503,7 @@
 	    currContext.resetSqlParserFlags(0x1);
 	    
 	    if (cliRC < 0)
-	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);
-		
-		goto error_return;
-	      }
+	       goto error_return;
 	    
 	  } // for
 
@@ -9654,7 +9519,7 @@
 			    &rc, NULL, (char*)"ExpLOBInterfaceCreate",
 
 			    getLobErrStr(rc));
-            goto error_return;
+            goto non_cli_error_return;
           }
           
 	for (Lng32 i = 0; i < numLOBs; i++)
@@ -9674,7 +9539,7 @@
 			    &rc, NULL, (char*)"ExpLOBInterfaceCreate",
 
 			    getLobErrStr(rc));
-		goto error_return;
+		goto non_cli_error_return;
 	      }
 	    
 	    // create LOB descriptor and LOB header tables
@@ -9701,11 +9566,7 @@
 					     NULL,
 					     0,lobTrace);
 	    if (cliRC < 0)
-	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);
-		
-		goto error_return;
-	      }
+	       goto error_return;
 	    
 	  } // for
 	
@@ -9726,11 +9587,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(&diags);
-	    
-	    goto error_return;
-	  }
+           goto error_return;
 	
 	// drop descriptor table
 	for (Lng32 i = 0; i < numLOBs; i++)
@@ -9760,11 +9617,7 @@
 					     NULL,
 					     0,lobTrace);
 	    if (cliRC < 0)
-	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);
-		
 		goto error_return;
-	      }
 	    
 	  } // for
         //If all the descriptor tables got dropped correctly, drop the hdfs 
@@ -9782,7 +9635,7 @@
 			    (ExeErrorCode)(8442), NULL, &cliRC    , 
 			    &rc, NULL, (char*)"ExpLOBInterfaceCreate",
 			    getLobErrStr(rc));
-            goto error_return;
+            goto non_cli_error_return;
 	      
           }
         for (Lng32 i = 0; i < numLOBs; i++)
@@ -9800,8 +9653,8 @@
 			    (ExeErrorCode)(8442), NULL, &cliRC    , 
 			    &rc, NULL, (char*)"ExpLOBInterfaceDrop  ",
 			    getLobErrStr(rc));
-		goto error_return;
-                }
+		goto non_cli_error_return;
+              }
           }//for
       }
       break;
@@ -9819,11 +9672,8 @@
 	currContext.resetSqlParserFlags(0x1);
 	
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(&diags);
-	    
-	    goto error_return;
-	  }
+           goto error_return;
+
 	//Initialize LOB interface 
         
         Int32 rc= ExpLOBoper::initLOBglobal(exLobGlob,currContext.exHeap(),&currContext,hdfsServer,hdfsPort);
@@ -9835,7 +9685,7 @@
 			    (ExeErrorCode)(8442), NULL, &cliRC    , 
                             &rc, NULL, (char*)"ExpLOBInterfaceCreate",
                             getLobErrStr(rc));
-            goto error_return;	      
+            goto non_cli_error_return;	      
           }
 	// drop descriptor table
 	for (Lng32 i = 0; i < numLOBs; i++)
@@ -9853,7 +9703,7 @@
 			    (ExeErrorCode)(8442), NULL, &cliRC    , 
 			    &rc, NULL, (char*)"ExpLOBInterfaceDrop  ",
 			    getLobErrStr(rc));
-		goto error_return;
+		goto non_cli_error_return;
 	      }
 	    
 	    // drop LOB descriptor and LOB header tables
@@ -9880,11 +9730,7 @@
 					     NULL,
 					     0,lobTrace);
 	    if (cliRC < 0)
-	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);
-		
 		goto error_return;
-	      }
 	    
 	  } // for
 	
@@ -9911,11 +9757,7 @@
 	currContext.resetSqlParserFlags(0x1);
 
 	if (cliRC < 0)
-	  {
-	    cliInterface->retrieveSQLDiagnostics(myDiags);
-	    
 	    goto error_return;
-	  }
 
 	cliRC = 0;
 	Lng32 j = 0;
@@ -9925,7 +9767,6 @@
 	    cliRC = cliInterface->fetch();
 	    if (cliRC < 0)
 	      {
-		cliInterface->retrieveSQLDiagnostics(myDiags);
 		
 		cliInterface->fetchRowsEpilogue(0);
 		
@@ -9971,7 +9812,6 @@
 		if (ccliRC < 0)
 		  {
 		    cliRC = ccliRC;
-		    cliInterface->retrieveSQLDiagnostics(myDiags);
 		    goto error_return;
 		  }
 	      }
@@ -9990,19 +9830,19 @@
     } // switch
 
  error_return:
+  if (cliRC < 0)
+    {
+      if (myDiags == NULL)
+         myDiags = ComDiagsArea::allocate(currContext.exHeap());
+      cliInterface->retrieveSQLDiagnostics(myDiags);
+      diags.mergeAfter(*myDiags);
+      myDiags->decrRefCount();
+    }
+ non_cli_error_return:
   ExpLOBinterfaceCleanup(exLobGlob);
   NADELETEBASIC(query, currContext.exHeap());
   NADELETEBASIC(hdfsServer,currContext.exHeap());
   delete cliInterface;
- 
-  if (cliRC < 0)
-    {
-      if (myDiags->getNumber() > 0)
-	{
-	  diags.mergeAfter(*myDiags);
-	}
-    }
-  myDiags->deAllocate();
   if (cliRC < 0)
      return cliRC;
   else if (cliRC == 100)
@@ -10358,7 +10198,6 @@
 				       const char * qryName,
 				       ExeCliInterface ** cliInterfaceArr,
 				       SequenceGeneratorAttributes* sga,
-				       ComDiagsArea * myDiags,
 				       ContextCli &currContext,
 				       ComDiagsArea & diags,
 				       NAHeap *exHeap)
@@ -10370,6 +10209,8 @@
 
   Int64 rowsAffected = 0;
 
+  ComDiagsArea *myDiags = NULL;
+
   ExeCliInterface * cliInterface = NULL;
   ExeCliInterface * cqdCliInterface = NULL;
 
@@ -10406,9 +10247,11 @@
       cliRC = cqdCliInterface->holdAndSetCQD("limit_max_numeric_precision", "ON");
       if (cliRC < 0)
 	{
+         if (myDiags == NULL)
+             myDiags = ComDiagsArea::allocate(exHeap);
 	  cqdCliInterface->retrieveSQLDiagnostics(myDiags);
 	  diags.mergeAfter(*myDiags);
-	  
+          myDiags->decrRefCount();
 	  return cliRC;
 	}
 
@@ -10418,11 +10261,12 @@
 						    stmtName);
       if (cliRC < 0)
 	{
+          if (myDiags == NULL)
+             myDiags = ComDiagsArea::allocate(exHeap);
 	  cliInterface->retrieveSQLDiagnostics(myDiags);
 	  diags.mergeAfter(*myDiags);
-	  
+          myDiags->decrRefCount();
 	  cqdCliInterface->restoreCQD("limit_max_numeric_precision");
-
 	  return cliRC;
 	}
 
@@ -10438,7 +10282,6 @@
 					      SequenceGeneratorAttributes* sga,
 					      NABoolean recycleQry,
                                               NABoolean startLocalXn,
-					      ComDiagsArea * myDiags,
 					      ContextCli &currContext,
 					      ComDiagsArea & diags,
 					      NAHeap *exHeap,
@@ -10456,6 +10299,8 @@
   Int64 rowsAffected = 0;
 
   ExeCliInterface * cliInterface = NULL;
+
+  ComDiagsArea *myDiags = NULL;
   
   char queryBuf[2000];
 
@@ -10469,7 +10314,7 @@
                                             "update %s.\"%s\".%s set upd_ts = cast(? as largeint not null) where seq_uid = %ld",
                                             SEQ_UPD_TS_QRY_IDX,
                                             "SEQ_UPD_TS_QRY_IDX",
-                                            cliInterfaceArr, sga, myDiags, currContext, diags, exHeap);
+                                            cliInterfaceArr, sga, currContext, diags, exHeap);
           if (cliRC < 0)
             return cliRC;
         }
@@ -10487,9 +10332,11 @@
         ((char*)inputValues, inputValuesLen, NULL, NULL, &rowsAffected);
       if (cliRC < 0)
         {
+         if (myDiags == NULL)
+             myDiags = ComDiagsArea::allocate(exHeap);
           cliInterface->retrieveSQLDiagnostics(myDiags);
           diags.mergeAfter(*myDiags);
-          
+          myDiags->decrRefCount();
           return cliRC;
         }
       
@@ -10510,7 +10357,7 @@
                                         "select  case when cast(? as largeint not null) = 1 then t.startVal else t.nextValue end, t.redefTS from (update %s.\"%s\".%s set next_value = (case when cast(? as largeint not null) = 1 then start_value + cast(? as largeint not null) else (case when next_value + cast(? as largeint not null) > max_value then max_value+1 else next_value + cast(? as largeint not null) end) end), num_calls = num_calls + 1 where seq_uid = %ld return old.start_value, old.next_value, old.redef_ts) t(startVal, nextValue, redefTS);",
                                         SEQ_PROCESS_QRY_IDX,
                                         "SEQ_PROCESS_QRY_IDX",
-                                        cliInterfaceArr, sga, myDiags, currContext, diags, exHeap);
+                                        cliInterfaceArr, sga, currContext, diags, exHeap);
       if (cliRC < 0)
         return cliRC;
     }
@@ -10532,9 +10379,11 @@
      &rowsAffected);
   if (cliRC < 0)
     {
+      if (myDiags == NULL)
+         myDiags = ComDiagsArea::allocate(exHeap);
       cliInterface->retrieveSQLDiagnostics(myDiags);
       diags.mergeAfter(*myDiags);
-
+      myDiags->decrRefCount();
       if (diags.mainSQLCODE() == -EXE_NUMERIC_OVERFLOW)
         {
           cliRC = -EXE_SG_MAXVALUE_EXCEEDED;
@@ -10575,7 +10424,7 @@
                                             "select upd_ts from %s.\"%s\".%s where seq_uid = %ld",
                                             SEQ_SEL_TS_QRY_IDX,
                                             "SEQ_SEL_TS_QRY_IDX",
-                                            cliInterfaceArr, sga, myDiags, currContext, diags, exHeap);
+                                            cliInterfaceArr, sga, currContext, diags, exHeap);
           if (cliRC < 0)
             return cliRC;
         }
@@ -10585,9 +10434,11 @@
         (NULL, 0, (char*)outputValues, &outputValuesLen, &rowsAffected);
       if (cliRC < 0)
         {
+          if (myDiags == NULL)
+             myDiags = ComDiagsArea::allocate(exHeap);
           cliInterface->retrieveSQLDiagnostics(myDiags);
           diags.mergeAfter(*myDiags);
-          
+          myDiags->decrRefCount();
           return cliRC;
         }
       
@@ -10620,7 +10471,6 @@
 						   ExeCliInterface ** cliInterfaceArr,
 						   SequenceGeneratorAttributes* sga,
 						   NABoolean recycleQry,
-						   ComDiagsArea * myDiags,
 						   ContextCli &currContext,
 						   ComDiagsArea & diags,
 						   NAHeap *exHeap,
@@ -10629,6 +10479,7 @@
 {
   Lng32 cliRC = 0;
   Lng32 retCliRC = 0;
+  ComDiagsArea *myDiags = NULL;
 
   if (! cliInterfaceArr[SEQ_CQD_IDX])
     cliInterfaceArr[SEQ_CQD_IDX] = new (currContext.exHeap()) 
@@ -10661,9 +10512,11 @@
           cliRC = cqdCliInterface->beginWork();
           if (cliRC < 0)
             {
+              if (myDiags != NULL)
+                 myDiags = ComDiagsArea::allocate(exHeap);
               cqdCliInterface->retrieveSQLDiagnostics(myDiags);
               diags.mergeAfter(*myDiags);
-
+              myDiags->decrRefCount();
               retCliRC = cliRC;
               goto label_return;
             }
@@ -10674,7 +10527,6 @@
 					       sga,
 					       recycleQry,
                                                startLocalXn,
-					       myDiags,
 					       currContext,
 					       diags,
 					       exHeap,
@@ -10753,8 +10605,6 @@
   ContextCli   & currContext = *(cliGlobals->currContext());
   ComDiagsArea & diags       = currContext.diags();
 
-  ComDiagsArea * myDiags = ComDiagsArea::allocate(currContext.exHeap());
-
   ExeCliInterface ** cliInterfaceArr = NULL;
   if (inCliInterfaceArr && (*inCliInterfaceArr))
     {
@@ -10782,14 +10632,12 @@
 						cliInterfaceArr,
 						sga,
 						FALSE,
-						myDiags,
 						currContext,
 						diags,
 						currContext.exHeap(),
 						nextValue,
 						endValue);
   if (cliRC < 0) {
-     myDiags->deAllocate();     
      return cliRC;
   }
   
@@ -10800,19 +10648,15 @@
 						    cliInterfaceArr,
 						    sga,
 						    TRUE,
-						    myDiags,
 						    currContext,
 						    diags,
 						    currContext.exHeap(),
 						    nextValue,
 						    endValue);
       if (cliRC < 0) {
-         myDiags->deAllocate();     
 	 return cliRC;
       }
     }
-
-  myDiags->deAllocate();     
   sga->setSGNextValue(nextValue);
   sga->setSGEndValue(endValue);
 
diff --git a/core/sql/cli/Context.cpp b/core/sql/cli/Context.cpp
index 38622ae..ab84711 100644
--- a/core/sql/cli/Context.cpp
+++ b/core/sql/cli/Context.cpp
@@ -60,7 +60,6 @@
 #include "ComUser.h"
 #include "CmpSeabaseDDLauth.h"
 
-#include "hdfs.h"
 #include "StmtCompilationMode.h"
 
 #include "ExCextdecs.h"
@@ -160,11 +159,13 @@
     dropInProgress_(FALSE),
     isEmbeddedArkcmpInitialized_(FALSE),
     embeddedArkcmpContext_(NULL),
+    prevCmpContext_(NULL),
     ddlStmtsExecuted_(FALSE),
     numCliCalls_(0),
     jniErrorStr_(&exHeap_),
     hbaseClientJNI_(NULL),
     hiveClientJNI_(NULL),
+    hdfsClientJNI_(NULL),
     arkcmpArray_(&exHeap_),
     cmpContextInfo_(&exHeap_),
     cmpContextInUse_(&exHeap_),
@@ -2525,11 +2526,12 @@
     {
       char *dummyReply = NULL;
       ULng32 dummyLen;
+      ComDiagsArea *diagsArea = NULL;
       cmpStatus = CmpCommon::context()->compileDirect(pMessage,
                                (ULng32) sizeof(userMessage), &exHeap_,
                                SQLCHARSETCODE_UTF8, EXSQLCOMP::DATABASE_USER,
                                dummyReply, dummyLen, getSqlParserFlags(),
-                               NULL, 0);
+                               NULL, 0, diagsArea);
       if (cmpStatus != 0)
         {
           char emsText[120];
@@ -2544,6 +2546,11 @@
           exHeap_.deallocateMemory((void*)dummyReply);
           dummyReply = NULL;
         }
+      if (diagsArea != NULL)
+      {
+         diagsArea->decrRefCount();
+         diagsArea = NULL;
+      }
     }
 
   // if there is an error using embedded compiler or we are already in the 
@@ -2725,11 +2732,12 @@
     {
       char *dummyReply = NULL;
       ULng32 dummyLen;
+      ComDiagsArea *diagsArea = NULL;
       cmpStatus = CmpCommon::context()->compileDirect((char *) &flags,
                                (ULng32) sizeof(Lng32), &exHeap_,
                                SQLCHARSETCODE_UTF8, EXSQLCOMP::END_SESSION,
                                dummyReply, dummyLen, getSqlParserFlags(),
-                               NULL, 0);
+                               NULL, 0, diagsArea);
       if (cmpStatus != 0)
         {
           char emsText[120];
@@ -2744,6 +2752,11 @@
           exHeap_.deallocateMemory((void*)dummyReply);
           dummyReply = NULL;
         }
+      if (diagsArea != NULL)
+      {
+         diagsArea->decrRefCount();
+         diagsArea = NULL;
+      }
     }
 
   // if there is an error using embedded compiler or we are already in the 
@@ -3033,7 +3046,7 @@
            CmpMessageObj::MessageTypeEnum(xnMsgType),
            dummyReply, dummyLength,
            currCtxt->getSqlParserFlags(),
-           NULL, 0);
+           NULL, 0, diagsArea);
       if (cmpRet != 0)
         {
           char emsText[120];
@@ -3116,7 +3129,7 @@
   ComDiagsArea *tempDiagsArea = &diagsArea_;
   tempDiagsArea->clear();
  
-  IpcServer *ssmpServer = ssmpManager_->getSsmpServer(
+  IpcServer *ssmpServer = ssmpManager_->getSsmpServer(exHeap(),
                                  cliGlobals->myNodeName(), 
                                  cliGlobals->myCpu(), tempDiagsArea);
   if (ssmpServer == NULL)
@@ -3284,7 +3297,7 @@
   }
   ComDiagsArea *tempDiagsArea = &diagsArea_;
   ExSsmpManager *ssmpManager = cliGlobals->getSsmpManager();
-  IpcServer *ssmpServer = ssmpManager->getSsmpServer(nodeName, 
+  IpcServer *ssmpServer = ssmpManager->getSsmpServer(exHeap(), nodeName, 
            (cpu == -1 ?  cliGlobals->myCpu() : cpu), tempDiagsArea);
   if (ssmpServer == NULL)
     return NULL; // diags are in diagsArea_
@@ -4641,22 +4654,31 @@
   return (cmpCntxtInfo->getUseCount() == 1? 0: 1); // success
 }
 
-Int32 ContextCli::switchBackCmpContext(void)
+void ContextCli::copyDiagsAreaToPrevCmpContext()
 {
   ex_assert(cmpContextInUse_.entries(), "Invalid use of switch back call");
 
+  CmpContext *curr = embeddedArkcmpContext_;
+
+  if (cmpContextInUse_.getLast(prevCmpContext_) == FALSE)
+    return; 
+  if (curr->diags()->getNumber() > 0)
+     prevCmpContext_->diags()->mergeAfter(*curr->diags());
+}
+
+Int32 ContextCli::switchBackCmpContext(void)
+{
+  if (prevCmpContext_ == NULL) 
+  {
+     ex_assert(cmpContextInUse_.entries(), "Invalid use of switch back call");
+     if (cmpContextInUse_.getLast(prevCmpContext_) == FALSE)
+        return -1; 
+  }
   // switch back
   CmpContext *curr = embeddedArkcmpContext_;
-  CmpContext *prevCmpCntxt;
 
-  if (cmpContextInUse_.getLast(prevCmpCntxt) == FALSE)
-    return -1;  // failed to get previous CmpContext, should not have happened.
-
-  embeddedArkcmpContext_ = prevCmpCntxt;
-  cmpCurrentContext = prevCmpCntxt;  // restore the thread global
-
-  // merge diags to previous CmpContext
-  prevCmpCntxt->diags()->mergeAfter(*curr->diags());
+  embeddedArkcmpContext_ = prevCmpContext_;
+  cmpCurrentContext = prevCmpContext_;  // restore the thread global
 
   // book keeping
   CmpContextInfo *cmpCntxtInfo;
@@ -4674,6 +4696,7 @@
   cmpCurrentContext->switchBackContext();
 
   deinitializeArkcmp();
+  prevCmpContext_ = NULL;
 
   return 0;  // success
 }
diff --git a/core/sql/cli/Context.h b/core/sql/cli/Context.h
index 973a647..66ddc5b 100644
--- a/core/sql/cli/Context.h
+++ b/core/sql/cli/Context.h
@@ -62,6 +62,7 @@
 #include "ExpSeqGen.h"
 #include "ssmpipc.h"
 #include "hdfs.h"
+#include "HdfsClient_JNI.h"
 
 class CliGlobals;
 class HashQueue;
@@ -200,6 +201,11 @@
   HiveClient_JNI *getHiveClient() { return hiveClientJNI_; }
   void setHiveClient(HiveClient_JNI *hiveClientJNI)
   { hiveClientJNI_ = hiveClientJNI; }
+
+  HdfsClient *getHDFSClient() { return hdfsClientJNI_; }
+  void setHDFSClient(HdfsClient *hdfsClientJNI)
+  { hdfsClientJNI_ = hdfsClientJNI; }
+
   //expose cmpContextInfo_ to get HQC info of different contexts
   const NAArray<CmpContextInfo *> & getCmpContextInfo() const { return cmpContextInfo_; }
 
@@ -299,6 +305,8 @@
   NABoolean isEmbeddedArkcmpInitialized_;
   CmpContext * embeddedArkcmpContext_;
 
+  CmpContext * prevCmpContext_;
+
   // pointer to the array of server  versions used to communicate with ARKCMP.
   ARRAY(ExSqlComp *) arkcmpArray_;
 
@@ -512,6 +520,7 @@
   NAString jniErrorStr_; 
   HBaseClient_JNI *hbaseClientJNI_;
   HiveClient_JNI *hiveClientJNI_;
+  HdfsClient *hdfsClientJNI_;
 
   // this points to data used by trafSE (traf storage engine) that is context specific.
   // It points to a list of 'black box' of data allocated by user and is returned
@@ -803,6 +812,7 @@
   Int32 switchToCmpContext(Int32 cmpCntxtType);
   Int32 switchToCmpContext(void *cmpCntxt);
   Int32 switchBackCmpContext(void);
+  void copyDiagsAreaToPrevCmpContext();
   NABoolean isDropInProgress() { return dropInProgress_; }
   void setDropInProgress() { dropInProgress_ = TRUE; };
 
diff --git a/core/sql/cli/ExSqlComp.cpp b/core/sql/cli/ExSqlComp.cpp
index 3a514b0..01f8396 100644
--- a/core/sql/cli/ExSqlComp.cpp
+++ b/core/sql/cli/ExSqlComp.cpp
@@ -634,7 +634,7 @@
                       (ULng32) sizeof(userMessage));
   }
 
-  ComDiagsArea loopDiags(h_);
+  ComDiagsArea *loopDiags = NULL;
   ExControlArea *ca = ctxt->getControlArea();
   Queue *q = ca->getControlList();
 
@@ -797,10 +797,14 @@
           }
           else
           {
-            loopDiags.mergeAfter(*diagArea_);
-            diagArea_->clear();
+            if (diagArea_->getNumber() > 0)
+            {
+               if (loopDiags == NULL)
+                   loopDiags = ComDiagsArea::allocate(h_);
+               loopDiags->mergeAfter(*diagArea_);
+               diagArea_->clear();
+            }
           }
-          
           ret = SUCCESS;
         }
         else
@@ -834,10 +838,13 @@
     } // control list is NOT empty
   } // if (ret != ERROR)
   //
-  if (ret != SUCCESS || diagArea_->getNumber() || loopDiags.getNumber())
+  if (ret != SUCCESS || diagArea_->getNumber() || loopDiags != NULL )
   {
-    diagArea_->mergeAfter(loopDiags);
-    loopDiags.clear();
+    if (loopDiags != NULL)
+    {
+       diagArea_->mergeAfter(*loopDiags);
+       loopDiags->decrRefCount();
+    }
     if (ret != ERROR)
       ret = diagArea_->getNumber(DgSqlCode::ERROR_) ? ERROR : WARNING;
     if (ret == ERROR)
diff --git a/core/sql/cli/Globals.cpp b/core/sql/cli/Globals.cpp
index ed30502..fb09008 100644
--- a/core/sql/cli/Globals.cpp
+++ b/core/sql/cli/Globals.cpp
@@ -60,6 +60,7 @@
 #include <semaphore.h>
 #include <pthread.h>
 #include "HBaseClient_JNI.h"
+#include "HdfsClient_JNI.h"
 #include "LmLangManagerC.h"
 #include "LmLangManagerJava.h"
 #include "CliSemaphore.h"
diff --git a/core/sql/cli/Globals.h b/core/sql/cli/Globals.h
index 284d992..b8e04ea 100644
--- a/core/sql/cli/Globals.h
+++ b/core/sql/cli/Globals.h
@@ -88,6 +88,7 @@
 class CLISemaphore;
 class HBaseClient_JNI;
 class HiveClient_JNI;
+class HdfsClient;
 class TransMode;
 class ContextTidMap;
 class LmLanguageManager;
diff --git a/core/sql/cli/Statement.cpp b/core/sql/cli/Statement.cpp
index 09f4118..1aeb708 100644
--- a/core/sql/cli/Statement.cpp
+++ b/core/sql/cli/Statement.cpp
@@ -1687,7 +1687,7 @@
 		  //!aqRetry  && cliGlobals_->isEmbeddedArkcmpInitialized())
                 {
                   Int32 compStatus;
-                  ComDiagsArea *da = ComDiagsArea::allocate(&heap_);
+                  ComDiagsArea *da = NULL;
 
                   // clean up diags area of regular arkcmp, it could contain
                   // old errors from last use
@@ -1698,14 +1698,17 @@
                   compStatus = CmpCommon::context()->compileDirect(
                                    (char *)data, dataLen,
                                    // use arkcmp heap to store the plan
+                                   // check why indexIntoCompilerArray is used here?
                                    cliGlobals_->getArkcmp(indexIntoCompilerArray)->getHeap(),
                                    charset, op,
                                    fetched_gen_code, fetched_gen_code_len,
                                    context_->getSqlParserFlags(), 
                                    NULL, 0, da);
-
-                  diagsArea.mergeAfter(*da);
-                  da->decrRefCount();
+                  if (da != NULL) 
+                  {
+                     diagsArea.mergeAfter(*da);
+                     da->decrRefCount();
+                  }
 
                   if (compStatus == ExSqlComp::SUCCESS)
                     {
diff --git a/core/sql/comexe/ComTdbBlockingHdfsScan.h b/core/sql/comexe/ComTdbBlockingHdfsScan.h
index 88a3b40..fe0b980 100755
--- a/core/sql/comexe/ComTdbBlockingHdfsScan.h
+++ b/core/sql/comexe/ComTdbBlockingHdfsScan.h
@@ -24,7 +24,6 @@
 #define COM_HDFS_SCAN_H
 
 #include "ComTdb.h"
-#include "hdfs.h"  // tPort 
 
 //
 // Task Definition Block
diff --git a/core/sql/comexe/ComTdbHdfsScan.h b/core/sql/comexe/ComTdbHdfsScan.h
index 86534be..ea995fb 100755
--- a/core/sql/comexe/ComTdbHdfsScan.h
+++ b/core/sql/comexe/ComTdbHdfsScan.h
@@ -54,7 +54,9 @@
     CONTINUE_ON_ERROR           = 0x0020,
     LOG_ERROR_ROWS              = 0x0040,
     ASSIGN_RANGES_AT_RUNTIME    = 0x0080,
-    USE_LIBHDFS_SCAN            = 0x0100
+    TREAT_EMPTY_AS_NULL         = 0x0100,
+    USE_LIBHDFS_SCAN            = 0x0200,
+    COMPRESSED_FILE             = 0x0400
   };
 
   // Expression to filter rows.
@@ -288,7 +290,12 @@
   {(v ? flags_ |= USE_LIBHDFS_SCAN : flags_ &= ~USE_LIBHDFS_SCAN); }
   NABoolean getUseLibhdfsScan() const
                                 { return (flags_ & USE_LIBHDFS_SCAN) != 0; }
-  
+
+  void setCompressedFile(NABoolean v)
+  {(v ? flags_ |= COMPRESSED_FILE : flags_ &= ~COMPRESSED_FILE); }
+  NABoolean isCompressedFile() const
+                                { return (flags_ & COMPRESSED_FILE) != 0; }
+
   UInt32 getMaxErrorRows() const { return maxErrorRows_;}
   void setMaxErrorRows(UInt32 v ) { maxErrorRows_= v; }
   
diff --git a/core/sql/common/ComRtUtils.cpp b/core/sql/common/ComRtUtils.cpp
index f2619f9..35f9ca7 100644
--- a/core/sql/common/ComRtUtils.cpp
+++ b/core/sql/common/ComRtUtils.cpp
@@ -81,6 +81,7 @@
 #include "seabed/ms.h"
 #include "seabed/fs.h"
 
+#include "HdfsClient_JNI.h"
 struct ModName {
 public:
   const char * name;
diff --git a/core/sql/common/ComSmallDefs.h b/core/sql/common/ComSmallDefs.h
index b6baf11..02df604 100644
--- a/core/sql/common/ComSmallDefs.h
+++ b/core/sql/common/ComSmallDefs.h
@@ -193,6 +193,7 @@
 // Procedures are defined in CmpSeabaseDDLroutine.h
 #define SEABASE_LIBMGR_SCHEMA "_LIBMGR_"
 #define SEABASE_LIBMGR_LIBRARY "DB__LIBMGRNAME"
+#define SEABASE_LIBMGR_LIBRARY_CPP "DB__LIBMGR_LIB_CPP"
 
 // reserved column names for traf internal system usage
 #define TRAF_SALT_COLNAME "_SALT_"
diff --git a/core/sql/common/ComUser.cpp b/core/sql/common/ComUser.cpp
index 796d94b..2b6831f 100644
--- a/core/sql/common/ComUser.cpp
+++ b/core/sql/common/ComUser.cpp
@@ -72,7 +72,7 @@
                                NULL, 0, NULL);
 
   assert(rc >= 0);
-  assert(dbUserID >= SUPER_USER)
+  assert(dbUserID >= SUPER_USER);
 
   return dbUserID;
   
diff --git a/core/sql/common/NAString.cpp b/core/sql/common/NAString.cpp
index 21128fb..b0bfb0b 100644
--- a/core/sql/common/NAString.cpp
+++ b/core/sql/common/NAString.cpp
@@ -1815,7 +1815,8 @@
 
 	  } // unquoted space
 
-	else if (*s == '.')
+	else if ((*s == '.') &&
+                 (!isDigit8859_1((unsigned char)s[1]))) // ignore dots in numeric constants
 	  {
 	    dot[2] = dot[1];
 	    dot[1] = dot[0];
diff --git a/core/sql/common/Platform.h b/core/sql/common/Platform.h
index fe7598a..153f9b9 100644
--- a/core/sql/common/Platform.h
+++ b/core/sql/common/Platform.h
@@ -35,6 +35,8 @@
  *
  *****************************************************************************
  */
+#define __STDC_LIMIT_MACROS
+#define __STDC_FORMAT_MACROS
 
 /*
 // On Linux, either NA_BIG_ENDIAN or NA_LITTLE_ENDIAN may have already
diff --git a/core/sql/executor/ExCancel.cpp b/core/sql/executor/ExCancel.cpp
index 32b5c53..b425ca4 100755
--- a/core/sql/executor/ExCancel.cpp
+++ b/core/sql/executor/ExCancel.cpp
@@ -249,14 +249,13 @@
               break;
             }
           }
-          ComDiagsArea *tempDiagsArea =
-                ComDiagsArea::allocate(getGlobals()->getDefaultHeap());
-          tempDiagsArea->clear();
+          
+          ComDiagsArea *tempDiagsArea = NULL;
  
           ContextCli *context = getGlobals()->castToExExeStmtGlobals()->
                 castToExMasterStmtGlobals()->getStatement()->getContext();
           ExSsmpManager *ssmpManager = context->getSsmpManager(); 
-          cbServer_ = ssmpManager->getSsmpServer(
+          cbServer_ = ssmpManager->getSsmpServer((NAHeap *)getGlobals()->getDefaultHeap(),
                                  nodeName_,
                                  cpu_, tempDiagsArea);
           if (cbServer_ == NULL) {
@@ -266,8 +265,6 @@
              step_ = DONE;
              break;
           }
-          else
-             tempDiagsArea->decrRefCount();
 
           //Create the stream on the IpcHeap, since we don't dispose 
           // of it immediately.  We just add it to the list of completed 
diff --git a/core/sql/executor/ExExeUtil.h b/core/sql/executor/ExExeUtil.h
index e119ed1..85173c8 100644
--- a/core/sql/executor/ExExeUtil.h
+++ b/core/sql/executor/ExExeUtil.h
@@ -3893,7 +3893,6 @@
   NABoolean emptyTarget_;
   NABoolean oneFile_;
   ExpHbaseInterface * ehi_;
-  HdfsClient *hdfsClient_;
 };
 
 class ExExeUtilHbaseUnLoadPrivateState : public ex_tcb_private_state
diff --git a/core/sql/executor/ExExeUtilCli.cpp b/core/sql/executor/ExExeUtilCli.cpp
index 3effdb6..be80cf7 100644
--- a/core/sql/executor/ExExeUtilCli.cpp
+++ b/core/sql/executor/ExExeUtilCli.cpp
@@ -2088,17 +2088,17 @@
   return SQL_EXEC_DeleteContext(*(SQLCTX_HANDLE*)contextHandle);
 }
 
-Lng32 ExeCliInterface::retrieveSQLDiagnostics(ComDiagsArea * toDiags)
+Lng32 ExeCliInterface::retrieveSQLDiagnostics(ComDiagsArea *toDiags)
 {
   Lng32 retcode;
 
-  if (diagsArea_)
+  if (diagsArea_ != NULL)
     {
       diagsArea_->clear();
       diagsArea_->deAllocate();
     }
 
-  if (toDiags)
+  if (toDiags != NULL)
     {
       retcode = SQL_EXEC_MergeDiagnostics_Internal(*toDiags);
       SQL_EXEC_ClearDiagnostics(NULL);
diff --git a/core/sql/executor/ExExeUtilCli.h b/core/sql/executor/ExExeUtilCli.h
index 9068602..8c1aa9b 100644
--- a/core/sql/executor/ExExeUtilCli.h
+++ b/core/sql/executor/ExExeUtilCli.h
@@ -245,7 +245,7 @@
   Lng32 currentContext(char* contextHandle); // out buf will return context handle
   Lng32 deleteContext(char* contextHandle); // in buf contains context handle
 
-  Lng32 retrieveSQLDiagnostics(ComDiagsArea * toDiags);
+  Lng32 retrieveSQLDiagnostics(ComDiagsArea *toDiags);
 
   CollHeap * getHeap() { return heap_; }
 
diff --git a/core/sql/executor/ExExeUtilGet.cpp b/core/sql/executor/ExExeUtilGet.cpp
index 539a8cf..ff40c40 100644
--- a/core/sql/executor/ExExeUtilGet.cpp
+++ b/core/sql/executor/ExExeUtilGet.cpp
@@ -69,7 +69,6 @@
 
 #include "ExpHbaseInterface.h"
 #include "sql_buffer_size.h"
-#include "hdfs.h"
 
 #include "NAType.h"
 
@@ -6103,7 +6102,6 @@
   ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(),
 					(char*)"", //exe_util_tdb.server(), 
 					(char*)""); //exe_util_tdb.zkPort(),
-
   regionInfoList_ = NULL;
   
   tableName_ = new(glob->getDefaultHeap()) char[2000];
@@ -6874,9 +6872,8 @@
   stats_ = (ComTdbClusterStatsVirtTableColumnStruct*)statsBuf_;
 
   ehi_ = ExpHbaseInterface::newInstance(glob->getDefaultHeap(),
-					(char*)"", //exe_util_tdb.server(), 
-					(char*)""); //exe_util_tdb.zkPort());
-
+					(char*)"", 
+					(char*)""); 
   regionInfoList_ = NULL;
   
   // get hbase rootdir location. Max linux pathlength is 1024.
diff --git a/core/sql/executor/ExExeUtilGetStats.cpp b/core/sql/executor/ExExeUtilGetStats.cpp
index 72bdd2b..9e99b5c 100644
--- a/core/sql/executor/ExExeUtilGetStats.cpp
+++ b/core/sql/executor/ExExeUtilGetStats.cpp
@@ -44,7 +44,6 @@
 #include  "ComTdb.h"
 #include  "ex_tcb.h"
 #include  "ComSqlId.h"
-
 #include  "ExExeUtil.h"
 #include  "ex_exe_stmt_globals.h"
 #include  "exp_expr.h"
diff --git a/core/sql/executor/ExExeUtilLoad.cpp b/core/sql/executor/ExExeUtilLoad.cpp
index 115f2a4..97c185c 100644
--- a/core/sql/executor/ExExeUtilLoad.cpp
+++ b/core/sql/executor/ExExeUtilLoad.cpp
@@ -1960,7 +1960,7 @@
 void ExExeUtilHBaseBulkUnLoadTcb::createHdfsFileError(Int32 hdfsClientRetCode)
 {
   ComDiagsArea * diagsArea = NULL;
-  char* errorMsg = hdfsClient_->getErrorText((HDFS_Client_RetCode)hdfsClientRetCode);
+  char* errorMsg = HdfsClient::getErrorText((HDFS_Client_RetCode)hdfsClientRetCode);
   ExRaiseSqlError(getHeap(), &diagsArea, (ExeErrorCode)(8447), NULL,
                   NULL, NULL, NULL, errorMsg, (char *)GetCliGlobals()->currContext()->getJniErrorStr().data());
   ex_queue_entry *pentry_up = qparent_.up->getTailEntry();
@@ -1980,7 +1980,6 @@
        emptyTarget_(FALSE),
        oneFile_(FALSE)
 {
-  hdfsClient_ = NULL;
   ehi_ = ExpHbaseInterface::newInstance(getGlobals()->getDefaultHeap(),
                                    (char*)"", //Later may need to change to hblTdb.server_,
                                    (char*)""); //Later may need to change to hblTdb.zkPort_);
@@ -2005,12 +2004,6 @@
     NADELETEBASIC (snapshotsList_, getMyHeap());
     snapshotsList_ = NULL;
   }
-
-  if (hdfsClient_)
-  {
-    NADELETE(hdfsClient_, HdfsClient, getMyHeap());
-    hdfsClient_ = NULL;
-  }
   NADELETE(ehi_, ExpHbaseInterface, getGlobals()->getDefaultHeap());
   ehi_ = NULL;
 }
@@ -2192,16 +2185,6 @@
       }
       setEmptyTarget(hblTdb().getEmptyTarget());
       setOneFile(hblTdb().getOneFile());
-      if (!hdfsClient_)
-      {
-        hdfsClient_ = HdfsClient::newInstance((NAHeap *)getMyHeap(), hdfsClientRetCode);
-        if (hdfsClientRetCode != HDFS_CLIENT_OK)
-        {
-          createHdfsFileError(hdfsClientRetCode);
-          step_ = UNLOAD_END_ERROR_;
-          break;
-        }
-      }
       if ((retcode = ehi_->init(NULL)) != HBASE_ACCESS_SUCCESS)
       {
          ExHbaseAccessTcb::setupError((NAHeap *)getMyHeap(),qparent_, retcode, 
@@ -2213,7 +2196,7 @@
       if (!hblTdb().getOverwriteMergeFile() &&  hblTdb().getMergePath() != NULL)
       {
         NABoolean exists = FALSE;
-        hdfsClientRetCode = hdfsClient_->hdfsExists( hblTdb().getMergePath(), exists);
+        hdfsClientRetCode = HdfsClient::hdfsExists( hblTdb().getMergePath(), exists);
         if (hdfsClientRetCode != HDFS_CLIENT_OK)
         {
           createHdfsFileError(hdfsClientRetCode);
@@ -2298,7 +2281,7 @@
 
       NAString uldPath ( hblTdb().getExtractLocation());
 
-      hdfsClientRetCode = hdfsClient_->hdfsCleanUnloadPath( uldPath);
+      hdfsClientRetCode = HdfsClient::hdfsCleanUnloadPath( uldPath);
       if (hdfsClientRetCode != HDFS_CLIENT_OK)
       {
         createHdfsFileError(hdfsClientRetCode);
@@ -2443,7 +2426,7 @@
 
       NAString srcPath ( hblTdb().getExtractLocation());
       NAString dstPath ( hblTdb().getMergePath());
-      hdfsClientRetCode = hdfsClient_->hdfsMergeFiles( srcPath, dstPath);
+      hdfsClientRetCode = HdfsClient::hdfsMergeFiles( srcPath, dstPath);
       if (hdfsClientRetCode != HDFS_CLIENT_OK)
       {
         createHdfsFileError(hdfsClientRetCode);
diff --git a/core/sql/executor/ExExplain.cpp b/core/sql/executor/ExExplain.cpp
index b0e2a90..46731e3 100644
--- a/core/sql/executor/ExExplain.cpp
+++ b/core/sql/executor/ExExplain.cpp
@@ -1527,7 +1527,7 @@
     return NULL;
   }
 
-  IpcServer *ssmpServer = ssmpManager->getSsmpServer(nodeName, cpu, diagsArea_);
+  IpcServer *ssmpServer = ssmpManager->getSsmpServer((NAHeap *)getHeap(), nodeName, cpu, diagsArea_);
   if (ssmpServer == NULL)
     return NULL; // diags are in diagsArea_
 
@@ -1724,9 +1724,9 @@
   if (vi->get(0, ptr, len))
     goto label_error2;
   
-  explainFragLen_ = str_decoded_len(len); // remove trailing null terminator
+  explainFragLen_ = str_decoded_len(len - 1); // remove trailing null terminator
   explainFrag_ = new(getHeap()) char[explainFragLen_];
-  if (str_decode(explainFrag_, explainFragLen_, ptr, len) < 0)
+  if (str_decode(explainFrag_, explainFragLen_, ptr, len - 1) < 0)
     {
       diagsArea = pEntryDown->getAtp()->getDiagsArea();
       ExRaiseSqlError(getGlobals()->getDefaultHeap(), 
diff --git a/core/sql/executor/ExFastTransport.cpp b/core/sql/executor/ExFastTransport.cpp
index 9d43545..d2cbd47 100644
--- a/core/sql/executor/ExFastTransport.cpp
+++ b/core/sql/executor/ExFastTransport.cpp
@@ -790,7 +790,7 @@
           }
           else if (!isSequenceFile() && hdfsClient_ == NULL)
           {
-             hdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), hdfsClientRetCode);
+             hdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetCode);
              if (hdfsClientRetCode != HDFS_CLIENT_OK)
              {
                 createHdfsClientFileError(hdfsClientRetCode);
@@ -813,25 +813,29 @@
           }
           else
           {
-            hdfsClientRetCode = hdfsClient_->hdfsCreate(targetLocation_, isHdfsCompressed());
-            if (hdfsClientRetCode != HDFS_CLIENT_OK)
-            {
-              createHdfsClientFileError(hdfsClientRetCode);
-              pstate.step_ = EXTRACT_ERROR;
-              break;
-            }
-          }  
-          if (feStats)
-          {
-            feStats->setPartitionNumber(fileNum);
-          }
-        }
-      else
-        {
-          updateWorkATPDiagsArea(__FILE__,__LINE__,"sockets are not supported");
-          pstate.step_ = EXTRACT_ERROR;
-          break;
-        }
+             hdfsClientRetCode = hdfsClient_->hdfsOpen(targetLocation_, isHdfsCompressed());
+             if (hdfsClientRetCode != HDFS_CLIENT_OK)
+             {
+                createHdfsClientFileError(hdfsClientRetCode);
+                NADELETE(hdfsClient_,
+                       HdfsClient,
+                       heap_);
+                hdfsClient_ = NULL;
+                pstate.step_ = EXTRACT_ERROR;
+                break;
+             }
+           }
+           if (feStats)
+           {
+             feStats->setPartitionNumber(fileNum);
+           }
+       }
+       else
+       {
+           updateWorkATPDiagsArea(__FILE__,__LINE__,"sockets are not supported");
+           pstate.step_ = EXTRACT_ERROR;
+           break;
+       }
 
       for (UInt32 i = 0; i < myTdb().getChildTuple()->numAttrs(); i++)
       {
@@ -1042,7 +1046,7 @@
         }
       else
         {
-          hdfsClientRetCode = hdfsClient_->hdfsWrite(currBuffer_->data_, bytesToWrite);
+          hdfsClient_->hdfsWrite(currBuffer_->data_, bytesToWrite, hdfsClientRetCode);
           if (hdfsClientRetCode != HDFS_CLIENT_OK)
           {
             createSequenceFileError(hdfsClientRetCode);
@@ -1290,7 +1294,7 @@
                   (ExeErrorCode)(8447),
                   NULL, NULL, NULL, NULL,
                   errorMsg,
-                (char *)currContext->getJniErrorStr().data());
+                  (char *)currContext->getJniErrorStr().data());
   updateWorkATPDiagsArea(diagsArea);
 }
 
diff --git a/core/sql/executor/ExHbaseAccess.cpp b/core/sql/executor/ExHbaseAccess.cpp
index 2182f9a..8af9308 100644
--- a/core/sql/executor/ExHbaseAccess.cpp
+++ b/core/sql/executor/ExHbaseAccess.cpp
@@ -2991,7 +2991,8 @@
     {
       // Overwrite trailing delimiter with newline.
       hiveBuff[hiveBuffInx-1] = '\n';
-      sampleFileHdfsClient()->hdfsWrite(hiveBuff, hiveBuffInx);
+      HDFS_Client_RetCode hdfsClientRetcode;
+      sampleFileHdfsClient()->hdfsWrite(hiveBuff, hiveBuffInx, hdfsClientRetcode);
     }
   return 0;
 }
@@ -3263,16 +3264,16 @@
      return;
 
   if (!loggingFileCreated_) {
-     logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), hdfsClientRetcode);
+     logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
      if (hdfsClientRetcode == HDFS_CLIENT_OK)
-        hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, FALSE);
+        hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE);
      if (hdfsClientRetcode == HDFS_CLIENT_OK)
         loggingFileCreated_ = TRUE;
      else 
         goto logErrorReturn;
   }
   
-  hdfsClientRetcode = logFileHdfsClient_->hdfsWrite(logErrorRow, logErrorRowLen);
+  logFileHdfsClient_->hdfsWrite(logErrorRow, logErrorRowLen, hdfsClientRetcode);
   if (hdfsClientRetcode != HDFS_CLIENT_OK) 
      goto logErrorReturn;
   if (errorCond != NULL) {
@@ -3288,7 +3289,7 @@
      errorMsg = (char *)"[UNKNOWN EXCEPTION]\n";
      errorMsgLen = strlen(errorMsg);
   }
-  hdfsClientRetcode = logFileHdfsClient_->hdfsWrite(errorMsg, errorMsgLen);
+  logFileHdfsClient_->hdfsWrite(errorMsg, errorMsgLen, hdfsClientRetcode);
 logErrorReturn:
   if (hdfsClientRetcode != HDFS_CLIENT_OK) {
      loggingErrorDiags_ = ComDiagsArea::allocate(heap);
diff --git a/core/sql/executor/ExHbaseIUD.cpp b/core/sql/executor/ExHbaseIUD.cpp
index e8aac8c..6a52457 100644
--- a/core/sql/executor/ExHbaseIUD.cpp
+++ b/core/sql/executor/ExHbaseIUD.cpp
@@ -1421,7 +1421,7 @@
             HDFS_Client_RetCode hdfsClientRetcode;
             samplePath.append(filePart);
             if (sampleFileHdfsClient_ == NULL)
-                sampleFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), hdfsClientRetcode);
+                sampleFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
             if (hdfsClientRetcode == HDFS_CLIENT_OK) {
                 hdfsClientRetcode = sampleFileHdfsClient_->hdfsOpen(samplePath.data(), FALSE);
                 if (hdfsClientRetcode != HDFS_CLIENT_OK) {
diff --git a/core/sql/executor/ExHdfsScan.cpp b/core/sql/executor/ExHdfsScan.cpp
index 2b73feb..a56cb06 100644
--- a/core/sql/executor/ExHdfsScan.cpp
+++ b/core/sql/executor/ExHdfsScan.cpp
@@ -124,11 +124,12 @@
   , hdfsScan_(NULL)
   , hdfsStats_(NULL)
   , hdfsFileInfoListAsArray_(glob->getDefaultHeap(), hdfsScanTdb.getHdfsFileInfoList()->numEntries())
+  
 {
   Space * space = (glob ? glob->getSpace() : 0);
   CollHeap * heap = (glob ? glob->getDefaultHeap() : 0);
   useLibhdfsScan_ = hdfsScanTdb.getUseLibhdfsScan();
-  if (isSequenceFile())
+  if (isSequenceFile() || hdfsScanTdb.isCompressedFile())
      useLibhdfsScan_ = TRUE;
   lobGlob_ = NULL;
   hdfsScanBufMaxSize_ = hdfsScanTdb.hdfsBufSize_;
@@ -229,7 +230,7 @@
                                         (char*)"");
   ex_assert(ehi_ != NULL, "Internal error: ehi_ is null in ExHdfsScan");
   HDFS_Client_RetCode hdfsClientRetcode;
-  hdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), hdfsClientRetcode);
+  hdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
   ex_assert(hdfsClientRetcode == HDFS_CLIENT_OK, "Internal error: HdfsClient::newInstance returned an error"); 
   // Populate the hdfsInfo list into an array to gain o(1) lookup access
   Queue* hdfsInfoList = hdfsScanTdb.getHdfsFileInfoList();
@@ -317,7 +318,6 @@
   if (hdfsScan_ != NULL) 
      NADELETE(hdfsScan_, HdfsScan, getHeap());
 }
-
 NABoolean ExHdfsScanTcb::needStatsEntry()
 {
   // stats are collected for ALL and OPERATOR options.
@@ -420,19 +420,23 @@
   while (!qparent_.down->isEmpty())
     {
       ex_queue_entry *pentry_down = qparent_.down->getHeadEntry();
+      if (pentry_down->downState.request == ex_queue::GET_NOMORE && step_ != DONE) 
+      {
+          if (! useLibhdfsScan_)
+             step_ = STOP_HDFS_SCAN;
+      }
       switch (step_)
 	{
 	case NOT_STARTED:
 	  {
 	    matches_ = 0;
-	    
 	    beginRangeNum_ = -1;
 	    numRanges_ = -1;
 	    hdfsOffset_ = 0;
             checkRangeDelimiter_ = FALSE;
-
+            if (getStatsEntry())
+               hdfsStats_ = getStatsEntry()->castToExHdfsScanStats();
             dataModCheckDone_ = FALSE;
-
 	    myInstNum_ = getGlobals()->getMyInstanceNumber();
 	    hdfsScanBufMaxSize_ = hdfsScanTdb().hdfsBufSize_;
 
@@ -557,11 +561,14 @@
           {   
              if (hdfsScan_ != NULL)
                 NADELETE(hdfsScan_, HdfsScan, getHeap());
+             if (hdfsFileInfoListAsArray_.entries() == 0) {
+                step_ = DONE;
+                break;
+             } 
              hdfsScan_ = HdfsScan::newInstance((NAHeap *)getHeap(), hdfsScanBuf_, hdfsScanBufMaxSize_, 
                             &hdfsFileInfoListAsArray_, beginRangeNum_, numRanges_, hdfsScanTdb().rangeTailIOSize_, 
                             hdfsStats_, hdfsScanRetCode);
-             if (hdfsScanRetCode != HDFS_SCAN_OK)
-             {
+             if (hdfsScanRetCode != HDFS_SCAN_OK) {
                 setupError(EXE_ERROR_HDFS_SCAN, hdfsScanRetCode, "SETUP_HDFS_SCAN", 
                               currContext->getJniErrorStr(), NULL);              
                 step_ = HANDLE_ERROR_AND_DONE;
@@ -580,7 +587,7 @@
           break;
         case TRAF_HDFS_READ:
           {
-             hdfsScanRetCode = hdfsScan_->trafHdfsRead((NAHeap *)getHeap(), hdfsStats_, retArray_, sizeof(retArray_)/sizeof(int));
+             hdfsScanRetCode = hdfsScan_->trafHdfsRead(retArray_, sizeof(retArray_)/sizeof(int));
              if (hdfsScanRetCode == HDFS_SCAN_EOR) {
                 step_ = DONE;
                 break;
@@ -601,6 +608,11 @@
                 else
                    recordSkip_ = TRUE; 
              } else {
+                // Throw away the rest of the data when done with the current range
+                if (currRangeBytesRead_ > hdfo->getBytesToRead()) {
+                   step_ = TRAF_HDFS_READ;
+                   break;
+                }
                 currRangeBytesRead_ += retArray_[BYTES_COMPLETED];
                 bufBegin_ = hdfsScanBuf_[retArray_[BUF_NO]].buf_ - headRoomCopied_;
                 recordSkip_ = FALSE;
@@ -609,10 +621,14 @@
                 extraBytesRead_ = currRangeBytesRead_ - hdfo->getBytesToRead(); 
              else
                 extraBytesRead_ = 0;
-             // headRoom_ is the number of extra bytes read (rangeTailIOSize)
+             // headRoom_ is the number of extra bytes to be read (rangeTailIOSize)
              // If EOF is reached while reading the range and the extraBytes read
-             // is less than headRoom_, then process all the data till EOF 
-             if (retArray_[IS_EOF] && extraBytesRead_ < headRoom_)
+             // is less than headRoom_ then process all the data till EOF 
+             // TODO: If the whole range fits in one buffer, it is need too to process rows till EOF for the last range alone
+             // No easy way to identify that last range read, but can identify that it is not the first range. 
+             // The rows could be read more than once if there are more than 2 ranges.
+             // Fix optimizer not to have more than 2 ranges in that case 
+             if (retArray_[IS_EOF] && extraBytesRead_ < headRoom_ && hdfo->getStartOffset() != 0)
                 extraBytesRead_ = 0;
              bufLogicalEnd_ = hdfsScanBuf_[retArray_[BUF_NO]].buf_ + retArray_[BYTES_COMPLETED] - extraBytesRead_;
              prevRangeNum_ = retArray_[RANGE_NO];
@@ -629,6 +645,8 @@
                    step_ = HANDLE_ERROR_AND_DONE;
                    break;
                 }
+		//add changedLen since hdfs_strchr will remove the pointer ahead to remove the \r
+		hdfsBufNextRow_ += 1 + changedLen;   // point past record delimiter.
              }
              else
                 hdfsBufNextRow_ = (char *)bufBegin_; 
@@ -647,6 +665,17 @@
              step_ = TRAF_HDFS_READ;  
           }
           break;
+        case STOP_HDFS_SCAN:
+          {
+             hdfsScanRetCode = hdfsScan_->stop();
+             if (hdfsScanRetCode != HDFS_SCAN_OK) {
+                setupError(EXE_ERROR_HDFS_SCAN, hdfsScanRetCode, "HdfsScan::stop", 
+                              currContext->getJniErrorStr(), NULL);              
+                step_ = HANDLE_ERROR_AND_DONE;
+             }    
+             step_ = DONE;
+          }
+          break;
 	case INIT_HDFS_CURSOR:
 	  {
             hdfo_ = getRange(currRangeNum_);
@@ -668,6 +697,7 @@
             sprintf(cursorId_, "%d", currRangeNum_);
             stopOffset_ = hdfsOffset_ + hdfo_->getBytesToRead();
 
+
 	    step_ = OPEN_HDFS_CURSOR;
 	  }
         
@@ -1077,16 +1107,19 @@
 
 	  if (startOfNextRow == NULL)
 	  {
-            if (useLibhdfsScan_)
+            if (useLibhdfsScan_) 
 	       step_ = REPOS_HDFS_DATA;
             else {
-               if (retArray_[IS_EOF]) 
+               if (retArray_[IS_EOF]) { 
+                  headRoomCopied_ = 0; 
                   step_ = TRAF_HDFS_READ;
+               }
                else
                   step_ = COPY_TAIL_TO_HEAD;
             }
+            // Looks like we can break always
 	    if (!exception_)
-	      break;
+	       break;
 	  }
 	  else
 	  {
@@ -1097,17 +1130,13 @@
              } 
              else {
                 if ((BYTE *)startOfNextRow > bufLogicalEnd_) {
-                   step_ = TRAF_HDFS_READ;
+                   headRoomCopied_ = 0;
                    hdfsBufNextRow_ = NULL;
-	           if (!exception_)
-	               break;
-                }
-                else
+                } else 
 	          hdfsBufNextRow_ = startOfNextRow;
              }
-           
 	  }
-
+           
 	  if (exception_)
 	  {
 	    nextStep_ = step_;
@@ -1307,7 +1336,7 @@
 	          if ((pentry_down->downState.request == ex_queue::GET_N) &&
 	              (pentry_down->downState.requestValue == matches_)) {
                      if (useLibhdfsScan_)
-                        step_ = CLOSE_FILE;
+                        step_ = CLOSE_HDFS_CURSOR;
                      else
                         step_ = DONE;
                   }
@@ -1364,8 +1393,7 @@
 	            up_entry->getTupp(hdfsScanTdb().tuppIndex_).getDataPointer());
 	      }
 	    }
-	  }
-
+          }
 	  up_entry->upState.setMatchNo(++matches_);
 	  if (matches_ == matchBrkPoint_)
 	    brkpoint();
@@ -1747,17 +1775,18 @@
   {
      sourceRowEnd = hdfs_strchr(sourceData, rd, sourceDataEnd, checkRangeDelimiter_, mode, &changedLen);
      hdfsLoggingRowEnd_  = sourceRowEnd + changedLen;
-
+     
      if (sourceRowEnd == NULL)
         return NULL; 
-     if ((endOfRequestedRange) && 
+     if (useLibhdfsScan_) {
+        if ((endOfRequestedRange) && 
             (sourceRowEnd >= endOfRequestedRange)) {
-        checkRangeDelimiter_ = TRUE;
-        *(sourceRowEnd +1)= RANGE_DELIMITER;
+           checkRangeDelimiter_ = TRUE;
+           *(sourceRowEnd +1)= RANGE_DELIMITER;
+        }
      }
-
-    // no columns need to be converted. For e.g. count(*) with no predicate
-    return sourceRowEnd+1;
+     // no columns need to be converted. For e.g. count(*) with no predicate
+     return sourceRowEnd+1;
   }
 
   Lng32 neededColIndex = 0;
@@ -2134,15 +2163,15 @@
      return;
 
   if (!loggingFileCreated_) {
-     logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), hdfsClientRetcode);
+     logFileHdfsClient_ = HdfsClient::newInstance((NAHeap *)getHeap(), NULL, hdfsClientRetcode);
      if (hdfsClientRetcode == HDFS_CLIENT_OK)
-        hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, FALSE);
+        hdfsClientRetcode = logFileHdfsClient_->hdfsCreate(loggingFileName_, TRUE, FALSE);
      if (hdfsClientRetcode == HDFS_CLIENT_OK)
         loggingFileCreated_ = TRUE;
      else 
         goto logErrorReturn;
   }
-  hdfsClientRetcode = logFileHdfsClient_->hdfsWrite(logErrorRow, logErrorRowLen);
+  logFileHdfsClient_->hdfsWrite(logErrorRow, logErrorRowLen, hdfsClientRetcode);
   if (hdfsClientRetcode != HDFS_CLIENT_OK) 
      goto logErrorReturn;
   if (errorCond != NULL) {
@@ -2158,7 +2187,7 @@
      errorMsg = (char *)"[UNKNOWN EXCEPTION]\n";
      errorMsgLen = strlen(errorMsg);
   }
-  hdfsClientRetcode = logFileHdfsClient_->hdfsWrite(errorMsg, errorMsgLen);
+  logFileHdfsClient_->hdfsWrite(errorMsg, errorMsgLen, hdfsClientRetcode);
 logErrorReturn:
   if (hdfsClientRetcode != HDFS_CLIENT_OK) {
      loggingErrorDiags_ = ComDiagsArea::allocate(heap);
@@ -2168,6 +2197,7 @@
   }
 }
 
+
 ////////////////////////////////////////////////////////////////////////
 // ORC files
 ////////////////////////////////////////////////////////////////////////
diff --git a/core/sql/executor/ExHdfsScan.h b/core/sql/executor/ExHdfsScan.h
index f4ad7e1..aa01b6e 100644
--- a/core/sql/executor/ExHdfsScan.h
+++ b/core/sql/executor/ExHdfsScan.h
@@ -135,6 +135,29 @@
    
 public:
   enum
+/*
+   USE_LIBHDFS_SCAN - OFF enables hdfs access via java classes 
+      org.trafodion.sql.HdfsScan and org.trafodion.sql.HdfsClient
+   Steps involved:
+   1. Create a new HdfsScan object and set the scan ranges of the fragment instance in it
+      The scan range involves the following and it is determined either at runtime or compile time
+         a) filename
+         b) offset
+         c) len
+      Java layer always reads more than the len by rangeTailIOSize_ to accommdate the record split 
+   2. Two ByteBuffer objects are also passsed to HdfsScan object. These ByteBuffers are backed up by
+      2 native buffers where the data is fetched. The buffer has a head room of size rangeTailIOSize_ and the 
+      data is always read after the head room. 
+   3. HdfsScan returns an int array containing bytesRead, bufNo, rangeNo, isEOF and schedules either
+      the remaining bytes to be read or the next range using ByteBuffers alternatively.
+   4. HdfsScan returns null array when there is no more data to be read.
+   5. When the data is processed in one ByteBuffer in the native thread, the data is fetched into the other ByteBuffer by
+      another Java thread.
+   6. Native layer after processing all the rows in one ByteBuffer, moves the last incomplete row to head room of the
+      other ByteBuffer. Then it requests to check if the read is complete. The native layer processes the buffer starting
+      from the copied incomplete row.
+*/
+
   {
     BYTES_COMPLETED,
     BUF_NO,
@@ -204,6 +227,7 @@
   , SETUP_HDFS_SCAN
   , TRAF_HDFS_READ
   , COPY_TAIL_TO_HEAD
+  , STOP_HDFS_SCAN
   } step_,nextStep_;
 
   /////////////////////////////////////////////////////
diff --git a/core/sql/executor/ExSMCommon.cpp b/core/sql/executor/ExSMCommon.cpp
index fde847f..d946732 100644
--- a/core/sql/executor/ExSMCommon.cpp
+++ b/core/sql/executor/ExSMCommon.cpp
@@ -29,6 +29,7 @@
 #include <sys/syscall.h>
 #include <unistd.h>
 #include <pthread.h>
+#include "Platform.h"
 #include "ExSMCommon.h"
 #include "ExSMGlobals.h"
 #include "ExSMEvent.h"
diff --git a/core/sql/executor/ExSMCommon.h b/core/sql/executor/ExSMCommon.h
index 1cfe3e3..0a23d68 100644
--- a/core/sql/executor/ExSMCommon.h
+++ b/core/sql/executor/ExSMCommon.h
@@ -23,11 +23,10 @@
 #ifndef EXSM_COMMON_H
 #define EXSM_COMMON_H
 
-#define __STDC_LIMIT_MACROS 
-#define __STDC_FORMAT_MACROS 
 #include <stdint.h>
 #include <inttypes.h>
 #include <stdio.h>
+#include "Platform.h"
 #include "sm.h"
 #include "NAAssert.h"
 
diff --git a/core/sql/executor/ExSMGlobals.cpp b/core/sql/executor/ExSMGlobals.cpp
index 45dda94..fb205dc 100644
--- a/core/sql/executor/ExSMGlobals.cpp
+++ b/core/sql/executor/ExSMGlobals.cpp
@@ -26,6 +26,7 @@
 #include <signal.h>
 #include <errno.h>
 #include "seabed/pctl.h"
+#include "Platform.h"
 #include "ExSMGlobals.h"
 #include "ExSMTask.h"
 #include "ExSMReader.h"
diff --git a/core/sql/executor/ExSMQueue.cpp b/core/sql/executor/ExSMQueue.cpp
index ca1f857..7eac848 100644
--- a/core/sql/executor/ExSMQueue.cpp
+++ b/core/sql/executor/ExSMQueue.cpp
@@ -20,7 +20,7 @@
 //
 // @@@ END COPYRIGHT @@@
 **********************************************************************/
-
+#include "Platform.h"
 #include "ExSMQueue.h"
 #include "NAMemory.h"
 
diff --git a/core/sql/executor/ExSMReader.cpp b/core/sql/executor/ExSMReader.cpp
index f59cbc7..315be70 100644
--- a/core/sql/executor/ExSMReader.cpp
+++ b/core/sql/executor/ExSMReader.cpp
@@ -23,6 +23,7 @@
 
 #include <unistd.h>
 #include <time.h>
+#include "Platform.h"
 #include "seabed/pctl.h"
 #include "seabed/pevents.h"
 #include "ExSMReader.h"
diff --git a/core/sql/executor/ExSMShortMessage.cpp b/core/sql/executor/ExSMShortMessage.cpp
index b564605..c2a24ef 100644
--- a/core/sql/executor/ExSMShortMessage.cpp
+++ b/core/sql/executor/ExSMShortMessage.cpp
@@ -20,7 +20,7 @@
 //
 // @@@ END COPYRIGHT @@@
 **********************************************************************/
-
+#include "Platform.h"
 #include "ExSMShortMessage.h"
 #include "ExSMGlobals.h"
 
diff --git a/core/sql/executor/ExSMTaskList.cpp b/core/sql/executor/ExSMTaskList.cpp
index 698c63b..58e3c7c 100644
--- a/core/sql/executor/ExSMTaskList.cpp
+++ b/core/sql/executor/ExSMTaskList.cpp
@@ -20,7 +20,7 @@
 //
 // @@@ END COPYRIGHT @@@
 **********************************************************************/
-
+#include "Platform.h"
 #include "ExSMTaskList.h"
 #include "ExSMTask.h"
 #include "ExSMTrace.h"
diff --git a/core/sql/executor/ExStats.cpp b/core/sql/executor/ExStats.cpp
index 7093c44..af762c9 100644
--- a/core/sql/executor/ExStats.cpp
+++ b/core/sql/executor/ExStats.cpp
@@ -8164,7 +8164,7 @@
     cpu = cliGlobals->myCpu();
   else
     cpu = cpu_;
-  IpcServer *ssmpServer = ssmpManager->getSsmpServer(nodeName_, cpu, diagsArea_);
+  IpcServer *ssmpServer = ssmpManager->getSsmpServer((NAHeap *)getHeap(), nodeName_, cpu, diagsArea_);
   if (ssmpServer == NULL)
     return NULL; // diags are in diagsArea_
 
diff --git a/core/sql/executor/HBaseClient_JNI.cpp b/core/sql/executor/HBaseClient_JNI.cpp
index 6b400cd..8d12821 100644
--- a/core/sql/executor/HBaseClient_JNI.cpp
+++ b/core/sql/executor/HBaseClient_JNI.cpp
@@ -5166,4 +5166,3 @@
   }
   NADELETE(array, NAArray, heap);
 }
-                      
diff --git a/core/sql/executor/HdfsClient_JNI.cpp b/core/sql/executor/HdfsClient_JNI.cpp
index f08aa92..d5eb4ec 100644
--- a/core/sql/executor/HdfsClient_JNI.cpp
+++ b/core/sql/executor/HdfsClient_JNI.cpp
@@ -23,6 +23,7 @@
 
 #include "QRLogger.h"
 #include "Globals.h"
+#include "Context.h"
 #include "jni.h"
 #include "HdfsClient_JNI.h"
 
@@ -38,16 +39,30 @@
 static const char* const hdfsScanErrorEnumStr[] = 
 {
    "Error in HdfsScan::setScanRanges"
-  ,"Java Exception in HdfsScan::setScanRanges"
+  ,"Java exception in HdfsScan::setScanRanges"
   ,"Error in HdfsScan::trafHdfsRead"
-  ,"Java Exceptiokn in HdfsScan::trafHdfsRead"
+  ,"Java exception in HdfsScan::trafHdfsRead"
   , "Hdfs scan End of Ranges"
+  ,"Error in HdfsScan::stop"
+  ,"Java exception in HdfsScan::stop"
 };
 
  
 //////////////////////////////////////////////////////////////////////////////
 // 
 //////////////////////////////////////////////////////////////////////////////
+HdfsScan::~HdfsScan()
+{
+   if (j_buf1_ != NULL) {
+      jenv_->DeleteGlobalRef(j_buf1_);
+      j_buf1_ = NULL;
+   }
+   if (j_buf2_ != NULL) {
+      jenv_->DeleteGlobalRef(j_buf2_);
+      j_buf2_ = NULL;
+   }
+}
+
 HDFS_Scan_RetCode HdfsScan::init()
 {
   static char className[]="org/trafodion/sql/HdfsScan";
@@ -71,6 +86,8 @@
     JavaMethods_[JM_SET_SCAN_RANGES].jm_signature = "(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;[Ljava/lang/String;[J[J[I)V";
     JavaMethods_[JM_TRAF_HDFS_READ].jm_name      = "trafHdfsRead";
     JavaMethods_[JM_TRAF_HDFS_READ].jm_signature = "()[I";
+    JavaMethods_[JM_STOP].jm_name      = "stop";
+    JavaMethods_[JM_STOP].jm_signature = "()V";
    
     rc = (HDFS_Scan_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, javaMethodsInitialized_);
     if (rc == HDFS_SCAN_OK)
@@ -89,9 +106,8 @@
 }
 
 /////////////////////////////////////////////////////////////////////////////
-HDFS_Scan_RetCode HdfsScan::setScanRanges(NAHeap *heap, ExHdfsScanTcb::HDFS_SCAN_BUF *hdfsScanBuf,  int scanBufSize,
-      HdfsFileInfoArray *hdfsFileInfoArray, Int32 beginRangeNum, Int32 numRanges, int rangeTailIOSize,
-      ExHdfsScanStats *hdfsStats)
+HDFS_Scan_RetCode HdfsScan::setScanRanges(ExHdfsScanTcb::HDFS_SCAN_BUF *hdfsScanBuf,  int scanBufSize,
+      HdfsFileInfoArray *hdfsFileInfoArray, Int32 beginRangeNum, Int32 numRanges, int rangeTailIOSize)
 {
    QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsScan::setScanRanges() called.");
 
@@ -104,13 +120,24 @@
       jenv_->PopLocalFrame(NULL);
       return HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM;
    }
-
+   j_buf1_ = jenv_->NewGlobalRef(j_buf1);
+   if (j_buf1_ == NULL) {
+      GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM));
+      jenv_->PopLocalFrame(NULL);
+      return HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM;
+   }
    jobject j_buf2 = jenv_->NewDirectByteBuffer(hdfsScanBuf[1].buf_, scanBufSize);
    if (j_buf2 == NULL) {
       GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM));
       jenv_->PopLocalFrame(NULL);
       return HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM;
    }
+   j_buf2_ = jenv_->NewGlobalRef(j_buf2);
+   if (j_buf2_ == NULL) {
+      GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM));
+      jenv_->PopLocalFrame(NULL);
+      return HDFS_SCAN_ERROR_SET_SCAN_RANGES_PARAM;
+   }
    jobjectArray j_filenames = NULL;
    jlongArray j_offsets = NULL;
    jlongArray j_lens = NULL;  
@@ -171,13 +198,13 @@
        jenv_->SetIntArrayRegion(j_rangenums, rangeCount, 1, &tdbRangeNum);
    } 
 
-   if (hdfsStats)
-       hdfsStats->getHdfsTimer().start();
+   if (hdfsStats_ != NULL)
+       hdfsStats_->getHdfsTimer().start();
    tsRecentJMFromJNI = JavaMethods_[JM_SET_SCAN_RANGES].jm_full_name;
    jenv_->CallVoidMethod(javaObj_, JavaMethods_[JM_SET_SCAN_RANGES].methodID, j_buf1, j_buf2, j_filenames, j_offsets, j_lens, j_rangenums);
-   if (hdfsStats) {
-      hdfsStats->incMaxHdfsIOTime(hdfsStats->getHdfsTimer().stop());
-      hdfsStats->incHdfsCalls();
+   if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
    }
 
    if (jenv_->ExceptionCheck()) {
@@ -203,9 +230,11 @@
    if (hdfsScan != NULL) {
        hdfsScanRetCode = hdfsScan->init();
        if (hdfsScanRetCode == HDFS_SCAN_OK) 
-          hdfsScanRetCode = hdfsScan->setScanRanges(heap, hdfsScanBuf, scanBufSize, 
-                    hdfsFileInfoArray, beginRangeNum, numRanges, rangeTailIOSize, hdfsStats); 
-       if (hdfsScanRetCode != HDFS_SCAN_OK) {
+          hdfsScanRetCode = hdfsScan->setScanRanges(hdfsScanBuf, scanBufSize, 
+                    hdfsFileInfoArray, beginRangeNum, numRanges, rangeTailIOSize); 
+       if (hdfsScanRetCode == HDFS_SCAN_OK) 
+          hdfsScan->setHdfsStats(hdfsStats);
+       else {
           NADELETE(hdfsScan, HdfsScan, heap);
           hdfsScan = NULL;
        }
@@ -214,20 +243,20 @@
 }
 
 
-HDFS_Scan_RetCode HdfsScan::trafHdfsRead(NAHeap *heap, ExHdfsScanStats *hdfsStats, int retArray[], short arrayLen)
+HDFS_Scan_RetCode HdfsScan::trafHdfsRead(int retArray[], short arrayLen)
 {
    QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsScan::trafHdfsRead() called.");
 
    if (initJNIEnv() != JOI_OK)
      return HDFS_SCAN_ERROR_TRAF_HDFS_READ_PARAM;
 
-   if (hdfsStats)
-       hdfsStats->getHdfsTimer().start();
+   if (hdfsStats_ != NULL)
+       hdfsStats_->getHdfsTimer().start();
    tsRecentJMFromJNI = JavaMethods_[JM_TRAF_HDFS_READ].jm_full_name;
    jintArray j_retArray = (jintArray)jenv_->CallObjectMethod(javaObj_, JavaMethods_[JM_TRAF_HDFS_READ].methodID);
-   if (hdfsStats) {
-      hdfsStats->incMaxHdfsIOTime(hdfsStats->getHdfsTimer().stop());
-      hdfsStats->incHdfsCalls();
+   if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
    }
 
    if (jenv_->ExceptionCheck()) {
@@ -239,12 +268,39 @@
    }
    if (j_retArray == NULL)
       return HDFS_SCAN_EOR;
+
    short retArrayLen = jenv_->GetArrayLength(j_retArray);
    ex_assert(retArrayLen == arrayLen, "HdfsScan::trafHdfsRead() InternalError: retArrayLen != arrayLen");
    jenv_->GetIntArrayRegion(j_retArray, 0, 4, retArray);
    return HDFS_SCAN_OK;
 }
 
+HDFS_Scan_RetCode HdfsScan::stop()
+{
+   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsScan::stop() called.");
+
+   if (initJNIEnv() != JOI_OK)
+     return HDFS_SCAN_ERROR_STOP_PARAM;
+
+   if (hdfsStats_ != NULL)
+       hdfsStats_->getHdfsTimer().start();
+   tsRecentJMFromJNI = JavaMethods_[JM_STOP].jm_full_name;
+   jenv_->CallVoidMethod(javaObj_, JavaMethods_[JM_STOP].methodID);
+   if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+   }
+
+   if (jenv_->ExceptionCheck()) {
+      getExceptionDetails();
+      logError(CAT_SQL_HDFS, __FILE__, __LINE__);
+      logError(CAT_SQL_HDFS, "HdfsScan::stop()", getLastError());
+      jenv_->PopLocalFrame(NULL);
+      return HDFS_SCAN_ERROR_STOP_EXCEPTION;
+   }
+   return HDFS_SCAN_OK;
+}
+
 // ===========================================================================
 // ===== Class HdfsClient
 // ===========================================================================
@@ -256,24 +312,33 @@
 
 static const char* const hdfsClientErrorEnumStr[] = 
 {
-  "JNI NewStringUTF() in hdfsCreate()."
- ,"Java exception in hdfsCreate()."
- ,"JNI NewStringUTF() in hdfsOpen()."
- ,"Java exception in hdfsOpen()."
- ,"JNI NewStringUTF() in hdfsWrite()."
- ,"Java exception in hdfsWrite()."
- ,"Java exception in hdfsClose()."
- ,"JNI NewStringUTF() in hdfsMergeFiles()."
- ,"Java exception in hdfsMergeFiles()."
- ,"JNI NewStringUTF() in hdfsCleanUnloadPath()."
- ,"Java exception in hdfsCleanUnloadPath()."
- ,"JNI NewStringUTF() in hdfsExists()."
- ,"Java exception in hdfsExists()."
- ,"JNI NewStringUTF() in hdfsDeletePath()."
- ,"Java exception in hdfsDeletePath()."
- ,"Error in setHdfsFileInfo()."
- ,"Error in hdfsListDirectory()."
- ,"Java exception in hdfsListDirectory()."
+  "JNI NewStringUTF() in HdfsClient::hdfsCreate()."
+ ,"Java exception in HdfsClient::hdfsCreate()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsOpen()."
+ ,"Java exception in HdfsClient::hdfsOpen()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsWrite()."
+ ,"Java exception in HdfsClient::hdfsWrite()."
+ ,"Error in HdfsClient::hdfsRead()."
+ ,"Java exception in HdfsClient::hdfsRead()."
+ ,"Java exception in HdfsClient::hdfsClose()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsMergeFiles()."
+ ,"Java exception in HdfsClient::hdfsMergeFiles()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsCleanUnloadPath()."
+ ,"Java exception in HdfsClient::hdfsCleanUnloadPath()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsExists()."
+ ,"Java exception in HdfsClient::hdfsExists()."
+ ,"JNI NewStringUTF() in HdfsClient::hdfsDeletePath()."
+ ,"Java exception in HdfsClient::hdfsDeletePath()."
+ ,"Error in HdfsClient::setHdfsFileInfo()."
+ ,"Error in HdfsClient::hdfsListDirectory()."
+ ,"Java exception in HdfsClient::hdfsListDirectory()."
+ ,"preparing parameters for HdfsClient::getHiveTableMaxModificationTs()."
+ ,"java exception in HdfsClient::getHiveTableMaxModificationTs()."
+ ,"Error in HdfsClient::getFsDefaultName()."
+ ,"Java exception in HdfsClient::getFsDefaultName()."
+ ,"Buffer is small in HdfsClient::getFsDefaultName()."
+ ,"Error in HdfsClient::hdfsCreateDirectory()."
+ ,"Java exception in HdfsClient::hdfsCreateDirectory()."
 };
 
 //////////////////////////////////////////////////////////////////////////////
@@ -283,6 +348,8 @@
 {
    QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::~HdfsClient() called.");
    deleteHdfsFileInfo();
+   if (path_ != NULL) 
+      NADELETEBASIC(path_, getHeap());
 }
 
 void HdfsClient::deleteHdfsFileInfo()
@@ -292,22 +359,25 @@
       NADELETEBASIC(hdfsFileInfo_[i].mOwner, getHeap());
       NADELETEBASIC(hdfsFileInfo_[i].mGroup, getHeap());
    }
-   NADELETEBASIC(hdfsFileInfo_, getHeap()); 
+   if (hdfsFileInfo_ != NULL)
+      NADELETEBASICARRAY(hdfsFileInfo_, getHeap()); 
    numFiles_ = 0;
    hdfsFileInfo_ = NULL;
 }
 
-HdfsClient *HdfsClient::newInstance(NAHeap *heap, HDFS_Client_RetCode &retCode)
+HdfsClient *HdfsClient::newInstance(NAHeap *heap, ExHdfsScanStats *hdfsStats, HDFS_Client_RetCode &retCode)
 {
    QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::newInstance() called.");
 
-   if (initJNIEnv() != JOI_OK)
+      if (initJNIEnv() != JOI_OK)
      return NULL;
    retCode = HDFS_CLIENT_OK;
    HdfsClient *hdfsClient = new (heap) HdfsClient(heap);
    if (hdfsClient != NULL) {
        retCode = hdfsClient->init();
-       if (retCode != HDFS_CLIENT_OK) {
+       if (retCode == HDFS_CLIENT_OK) 
+          hdfsClient->setHdfsStats(hdfsStats);
+       else {
           NADELETE(hdfsClient, HdfsClient, heap);
           hdfsClient = NULL;
        }
@@ -315,6 +385,32 @@
    return hdfsClient;
 }
 
+HdfsClient* HdfsClient::getInstance()
+{
+   ContextCli *currContext = GetCliGlobals()->currContext();
+   HdfsClient *hdfsClient = currContext->getHDFSClient();
+   HDFS_Client_RetCode retcode;
+   if (hdfsClient == NULL) {
+      NAHeap *heap = currContext->exHeap();
+      hdfsClient = newInstance(heap, NULL, retcode);
+      if (retcode != HDFS_CLIENT_OK)
+         return NULL; 
+      currContext->setHDFSClient(hdfsClient);
+   }
+   return hdfsClient;
+}
+
+void HdfsClient::deleteInstance()
+{
+   ContextCli *currContext = GetCliGlobals()->currContext();
+   HdfsClient *hdfsClient = currContext->getHDFSClient();
+   if (hdfsClient != NULL) {
+      NAHeap *heap = currContext->exHeap();
+      NADELETE(hdfsClient, HdfsClient, heap);
+      currContext->setHDFSClient(NULL);
+   }
+}
+
 HDFS_Client_RetCode HdfsClient::init()
 {
   static char className[]="org/trafodion/sql/HDFSClient";
@@ -335,11 +431,13 @@
     JavaMethods_[JM_CTOR      ].jm_name      = "<init>";
     JavaMethods_[JM_CTOR      ].jm_signature = "()V";
     JavaMethods_[JM_HDFS_CREATE     ].jm_name      = "hdfsCreate";
-    JavaMethods_[JM_HDFS_CREATE     ].jm_signature = "(Ljava/lang/String;Z)Z";
+    JavaMethods_[JM_HDFS_CREATE     ].jm_signature = "(Ljava/lang/String;ZZ)Z";
     JavaMethods_[JM_HDFS_OPEN       ].jm_name      = "hdfsOpen";
     JavaMethods_[JM_HDFS_OPEN       ].jm_signature = "(Ljava/lang/String;Z)Z";
     JavaMethods_[JM_HDFS_WRITE      ].jm_name      = "hdfsWrite";
-    JavaMethods_[JM_HDFS_WRITE      ].jm_signature = "([BJ)Z";
+    JavaMethods_[JM_HDFS_WRITE      ].jm_signature = "([B)I";
+    JavaMethods_[JM_HDFS_READ       ].jm_name      = "hdfsRead";
+    JavaMethods_[JM_HDFS_READ       ].jm_signature = "(Ljava/nio/ByteBuffer;)I";
     JavaMethods_[JM_HDFS_CLOSE      ].jm_name      = "hdfsClose";
     JavaMethods_[JM_HDFS_CLOSE      ].jm_signature = "()Z";
     JavaMethods_[JM_HDFS_MERGE_FILES].jm_name      = "hdfsMergeFiles";
@@ -352,6 +450,12 @@
     JavaMethods_[JM_HDFS_DELETE_PATH].jm_signature = "(Ljava/lang/String;)Z";
     JavaMethods_[JM_HDFS_LIST_DIRECTORY].jm_name      = "hdfsListDirectory";
     JavaMethods_[JM_HDFS_LIST_DIRECTORY].jm_signature = "(Ljava/lang/String;J)I";
+    JavaMethods_[JM_HIVE_TBL_MAX_MODIFICATION_TS].jm_name      = "getHiveTableMaxModificationTs";
+    JavaMethods_[JM_HIVE_TBL_MAX_MODIFICATION_TS ].jm_signature = "(Ljava/lang/String;I)J";
+    JavaMethods_[JM_GET_FS_DEFAULT_NAME].jm_name      = "getFsDefaultName";
+    JavaMethods_[JM_GET_FS_DEFAULT_NAME].jm_signature = "()Ljava/lang/String;";
+    JavaMethods_[JM_HDFS_CREATE_DIRECTORY].jm_name      = "hdfsCreateDirectory";
+    JavaMethods_[JM_HDFS_CREATE_DIRECTORY].jm_signature = "(Ljava/lang/String;)Z";
     rc = (HDFS_Client_RetCode)JavaObjectInterface::init(className, javaClass_, JavaMethods_, (Int32)JM_LAST, javaMethodsInitialized_);
     if (rc == HDFS_CLIENT_OK)
        javaMethodsInitialized_ = TRUE;
@@ -371,13 +475,22 @@
     return (char*)hdfsClientErrorEnumStr[errEnum-HDFS_CLIENT_FIRST];
 }
 
-HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean compress)
+void HdfsClient::setPath(const char *path)
+{
+   if (path_ != NULL) 
+      NADELETEBASIC(path_, getHeap());
+   short len = strlen(path);
+   path_ = new (getHeap()) char[len+1];
+   strcpy(path_, path); 
+}
+
+HDFS_Client_RetCode HdfsClient::hdfsCreate(const char* path, NABoolean overwrite, NABoolean compress)
 {
   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsCreate(%s) called.", path);
 
   if (initJNIEnv() != JOI_OK)
      return HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM;
-
+  setPath(path);
   jstring js_path = jenv_->NewStringUTF(path);
   if (js_path == NULL) {
     GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_CREATE_PARAM));
@@ -386,9 +499,17 @@
   }
 
   jboolean j_compress = compress;
+  jboolean j_overwrite = overwrite;
+
+  if (hdfsStats_ != NULL)
+     hdfsStats_->getHdfsTimer().start();
 
   tsRecentJMFromJNI = JavaMethods_[JM_HDFS_CREATE].jm_full_name;
-  jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CREATE].methodID, js_path, j_compress);
+  jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CREATE].methodID, js_path, j_overwrite, j_compress);
+  if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+  }
 
   if (jenv_->ExceptionCheck())
   {
@@ -425,9 +546,14 @@
   }
 
   jboolean j_compress = compress;
-
+  if (hdfsStats_ != NULL)
+     hdfsStats_->getHdfsTimer().start();
   tsRecentJMFromJNI = JavaMethods_[JM_HDFS_OPEN].jm_full_name;
   jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_OPEN].methodID, js_path, j_compress);
+  if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+  }
 
   if (jenv_->ExceptionCheck())
   {
@@ -450,50 +576,92 @@
 }
 
 
-HDFS_Client_RetCode HdfsClient::hdfsWrite(const char* data, Int64 len)
+Int32 HdfsClient::hdfsWrite(const char* data, Int64 len, HDFS_Client_RetCode &hdfsClientRetcode)
 {
   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsWrite(%ld) called.", len);
 
-  if (initJNIEnv() != JOI_OK)
-     return HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
+  if (initJNIEnv() != JOI_OK) {
+     hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
+     return 0;
+  }
 
   //Write the requisite bytes into the file
   jbyteArray jbArray = jenv_->NewByteArray( len);
   if (!jbArray) {
     GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM));
     jenv_->PopLocalFrame(NULL);
-    return HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM;
+    hdfsClientRetcode =  HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM;
+    return 0;
   }
   jenv_->SetByteArrayRegion(jbArray, 0, len, (const jbyte*)data);
 
-  jlong j_len = len;
-  tsRecentJMFromJNI = JavaMethods_[JM_HDFS_WRITE].jm_full_name;
-  jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_WRITE].methodID,jbArray , j_len);
+  if (hdfsStats_ != NULL)
+     hdfsStats_->getHdfsTimer().start();
 
+  tsRecentJMFromJNI = JavaMethods_[JM_HDFS_WRITE].jm_full_name;
+  // Java method returns the cumulative bytes written
+  jint totalBytesWritten = jenv_->CallIntMethod(javaObj_, JavaMethods_[JM_HDFS_WRITE].methodID, jbArray);
+
+  if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+  }
   if (jenv_->ExceptionCheck())
   {
     getExceptionDetails();
     logError(CAT_SQL_HDFS, __FILE__, __LINE__);
     logError(CAT_SQL_HDFS, "HdfsClient::hdfsWrite()", getLastError());
     jenv_->PopLocalFrame(NULL);
-    return HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
+    hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
+    return 0;
   }
 
-  if (jresult == false)
-  {
-    logError(CAT_SQL_HDFS, "HdfsClient::hdfsWrite()", getLastError());
-    jenv_->PopLocalFrame(NULL);
-    return HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION;
-  }
-
-
   jenv_->PopLocalFrame(NULL);
-  return HDFS_CLIENT_OK;
+  hdfsClientRetcode = HDFS_CLIENT_OK;
+  Int32 bytesWritten = totalBytesWritten - totalBytesWritten_;
+  totalBytesWritten_ = totalBytesWritten;
+  return bytesWritten; 
 }
 
-//////////////////////////////////////////////////////////////////////////////
-//
-//////////////////////////////////////////////////////////////////////////////
+Int32 HdfsClient::hdfsRead(const char* data, Int64 len, HDFS_Client_RetCode &hdfsClientRetcode)
+{
+   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsWrite(%ld) called.", len);
+
+   if (initJNIEnv() != JOI_OK) {
+      hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_READ_EXCEPTION;
+      return 0;
+   }
+   jobject j_buf = jenv_->NewDirectByteBuffer((BYTE *)data, len);
+   if (j_buf == NULL) {
+      GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_READ_PARAM));
+      jenv_->PopLocalFrame(NULL);
+      return HDFS_CLIENT_ERROR_HDFS_READ_PARAM;
+   }
+  if (hdfsStats_ != NULL)
+     hdfsStats_->getHdfsTimer().start();
+
+  tsRecentJMFromJNI = JavaMethods_[JM_HDFS_READ].jm_full_name;
+  jint bytesRead = 0;
+  bytesRead = jenv_->CallIntMethod(javaObj_, JavaMethods_[JM_HDFS_READ].methodID, j_buf);
+
+  if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+  }
+  if (jenv_->ExceptionCheck())
+  {
+    getExceptionDetails();
+    logError(CAT_SQL_HDFS, __FILE__, __LINE__);
+    logError(CAT_SQL_HDFS, "HdfsClient::hdfsRead()", getLastError());
+    jenv_->PopLocalFrame(NULL);
+    hdfsClientRetcode = HDFS_CLIENT_ERROR_HDFS_READ_EXCEPTION;
+    return 0;
+  }
+  jenv_->PopLocalFrame(NULL);
+  hdfsClientRetcode = HDFS_CLIENT_OK;
+  return bytesRead; 
+}
+
 HDFS_Client_RetCode HdfsClient::hdfsClose()
 {
   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::close() called.");
@@ -502,9 +670,15 @@
      return HDFS_CLIENT_ERROR_HDFS_CLOSE_EXCEPTION;
 
   // String close();
+  if (hdfsStats_ != NULL)
+     hdfsStats_->getHdfsTimer().start();
   tsRecentJMFromJNI = JavaMethods_[JM_HDFS_CLOSE].jm_full_name;
   jboolean jresult = jenv_->CallBooleanMethod(javaObj_, JavaMethods_[JM_HDFS_CLOSE].methodID);
 
+  if (hdfsStats_ != NULL) {
+      hdfsStats_->incMaxHdfsIOTime(hdfsStats_->getHdfsTimer().stop());
+      hdfsStats_->incHdfsCalls();
+  }
   if (jenv_->ExceptionCheck())
   {
     getExceptionDetails();
@@ -531,6 +705,8 @@
                                                                              uldPath.data());
   if (initJNIEnv() != JOI_OK)
      return HDFS_CLIENT_ERROR_HDFS_CLEANUP_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_HDFS_CLEANUP_PARAM;
   jstring js_UldPath = jenv_->NewStringUTF(uldPath.data());
   if (js_UldPath == NULL) {
     GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HDFS_CLEANUP_PARAM));
@@ -561,8 +737,9 @@
                   srcPath.data(), dstPath.data());
 
   if (initJNIEnv() != JOI_OK)
-     return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_EXCEPTION;
-
+     return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM;
   jstring js_SrcPath = jenv_->NewStringUTF(srcPath.data());
 
   if (js_SrcPath == NULL) {
@@ -606,7 +783,9 @@
   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsDeletePath(%s called.",
                   delPath.data());
   if (initJNIEnv() != JOI_OK)
-     return HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_EXCEPTION;
+     return HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_PARAM;
 
   jstring js_delPath = jenv_->NewStringUTF(delPath.data());
   if (js_delPath == NULL) {
@@ -615,7 +794,6 @@
      return HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_PARAM;
   }
 
-
   tsRecentJMFromJNI = JavaMethods_[JM_HDFS_DELETE_PATH].jm_full_name;
   jboolean jresult = jenv_->CallStaticBooleanMethod(javaClass_, JavaMethods_[JM_HDFS_DELETE_PATH].methodID, js_delPath);
 
@@ -674,9 +852,10 @@
 {
   QRLogger::log(CAT_SQL_HDFS, LL_DEBUG, "HdfsClient::hdfsExists(%s) called.",
                                                       uldPath.data());
-
   if (initJNIEnv() != JOI_OK)
-     return HDFS_CLIENT_ERROR_HDFS_EXISTS_EXCEPTION;
+     return HDFS_CLIENT_ERROR_HDFS_EXISTS_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_HDFS_EXISTS_PARAM;
 
   jstring js_UldPath = jenv_->NewStringUTF(uldPath.data());
   if (js_UldPath == NULL) {
@@ -699,6 +878,113 @@
   return HDFS_CLIENT_OK;
 }
 
+HDFS_Client_RetCode HdfsClient::getHiveTableMaxModificationTs( Int64& maxModificationTs, const char * tableDirPaths,  int levelDeep)
+{
+  QRLogger::log(CAT_SQL_HBASE, LL_DEBUG, "Enter HDFSClient_JNI::getHiveTableMaxModificationTs(%s) called.",tableDirPaths);
+  if (initJNIEnv() != JOI_OK)
+     return HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_PARAM; 
+  jstring js_tableDirPaths = jenv_->NewStringUTF(tableDirPaths);
+  if (js_tableDirPaths == NULL)
+  {
+    GetCliGlobals()->setJniErrorStr(getErrorText(HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_PARAM));
+    jenv_->PopLocalFrame(NULL);
+    return HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_PARAM;
+  }
+
+  jint jlevelDeep = levelDeep;
+  tsRecentJMFromJNI = JavaMethods_[JM_HIVE_TBL_MAX_MODIFICATION_TS].jm_full_name;
+  jlong jresult = jenv_->CallStaticLongMethod(javaClass_,
+                                          JavaMethods_[JM_HIVE_TBL_MAX_MODIFICATION_TS].methodID,
+										  js_tableDirPaths, jlevelDeep);
+  jenv_->DeleteLocalRef(js_tableDirPaths);
+  if (jenv_->ExceptionCheck())
+  {
+    getExceptionDetails(jenv_);
+    logError(CAT_SQL_HBASE, __FILE__, __LINE__);
+    logError(CAT_SQL_HBASE, "HDFSClientI::getHiveTableMaxModificationTs()", getLastError());
+    jenv_->PopLocalFrame(NULL);
+    return HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_EXCEPTION;
+  }
+  QRLogger::log(CAT_SQL_HBASE, LL_DEBUG,
+       "Exit HDFSClient_JNI::getHiveTableMaxModificationTs() called.");
+  maxModificationTs = jresult;
+  jenv_->PopLocalFrame(NULL);
+
+  return HDFS_CLIENT_OK;
+}
+
+HDFS_Client_RetCode HdfsClient::getFsDefaultName(char* buf, int buf_len)
+{
+  QRLogger::log(CAT_SQL_HBASE, LL_DEBUG, "Enter HDFSClient_JNI::getFsDefaultName() called.");
+  if (initJNIEnv() != JOI_OK)
+     return HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_PARAM;
+
+  tsRecentJMFromJNI = JavaMethods_[JM_GET_FS_DEFAULT_NAME].jm_full_name;
+  jstring jresult = 
+        (jstring)jenv_->CallStaticObjectMethod(javaClass_,
+                              JavaMethods_[JM_GET_FS_DEFAULT_NAME].methodID);
+  if (jenv_->ExceptionCheck())
+  {
+    getExceptionDetails(jenv_);
+    logError(CAT_SQL_HBASE, __FILE__, __LINE__);
+    logError(CAT_SQL_HBASE, "HDFSClient_JNI::getFsDefaultName()", getLastError());
+    jenv_->PopLocalFrame(NULL);
+    return HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_EXCEPTION;
+  }
+  const char* char_result = jenv_->GetStringUTFChars(jresult, 0);
+
+  HDFS_Client_RetCode retcode = HDFS_CLIENT_OK;
+  if ( buf_len >= strlen(char_result) ) {
+     strcpy(buf, char_result);
+  } else
+     retcode = HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_BUFFER_TOO_SMALL;
+
+  jenv_->ReleaseStringUTFChars(jresult, char_result);
+  jenv_->PopLocalFrame(NULL);
+
+  return retcode;
+}
+
+HDFS_Client_RetCode HdfsClient::hdfsCreateDirectory(const NAString &dirName)
+{
+  QRLogger::log(CAT_SQL_HBASE, LL_DEBUG, "Enter HDFSClient_JNI::createDirectory() called.");
+  if (initJNIEnv() != JOI_OK)
+     return HDFS_CLIENT_ERROR_CREATE_DIRECTORY_PARAM;
+  if (getInstance() == NULL)
+     return HDFS_CLIENT_ERROR_CREATE_DIRECTORY_PARAM;
+
+  jstring js_dirName = jenv_->NewStringUTF(dirName.data());
+  if (js_dirName == NULL) {
+     jenv_->PopLocalFrame(NULL);
+     return HDFS_CLIENT_ERROR_CREATE_DIRECTORY_PARAM;
+  }
+
+  tsRecentJMFromJNI = JavaMethods_[JM_HDFS_CREATE_DIRECTORY].jm_full_name;
+  jstring jresult = 
+        (jstring)jenv_->CallStaticObjectMethod(javaClass_,
+                              JavaMethods_[JM_HDFS_CREATE_DIRECTORY].methodID, js_dirName);
+  if (jenv_->ExceptionCheck())
+  {
+    getExceptionDetails(jenv_);
+    logError(CAT_SQL_HBASE, __FILE__, __LINE__);
+    logError(CAT_SQL_HBASE, "HDFSClient_JNI::hdfsCreateDirectory()", getLastError());
+    jenv_->PopLocalFrame(NULL);
+    return HDFS_CLIENT_ERROR_CREATE_DIRECTORY_EXCEPTION;
+  }
+  if (jresult == false)
+  {
+    logError(CAT_SQL_HDFS, "HdfsClient::hdfsCreateDirectory()", getLastError());
+    jenv_->PopLocalFrame(NULL);
+    return HDFS_CLIENT_ERROR_HDFS_DELETE_PATH_EXCEPTION;
+  }
+  jenv_->PopLocalFrame(NULL);
+  return HDFS_CLIENT_OK;
+}
+
 HDFS_Client_RetCode HdfsClient::setHdfsFileInfo(JNIEnv *jenv, jint numFiles, jint fileNo, jboolean isDir, 
           jstring filename, jlong modTime, jlong len, jshort numReplicas, jlong blockSize, 
           jstring owner, jstring group, jshort permissions, jlong accessTime)
diff --git a/core/sql/executor/HdfsClient_JNI.h b/core/sql/executor/HdfsClient_JNI.h
index c45d226..6f68f4d 100644
--- a/core/sql/executor/HdfsClient_JNI.h
+++ b/core/sql/executor/HdfsClient_JNI.h
@@ -39,6 +39,8 @@
   ,HDFS_SCAN_ERROR_TRAF_HDFS_READ_PARAM
   ,HDFS_SCAN_ERROR_TRAF_HDFS_READ_EXCEPTION
   ,HDFS_SCAN_EOR
+  ,HDFS_SCAN_ERROR_STOP_PARAM
+  ,HDFS_SCAN_ERROR_STOP_EXCEPTION
   ,HDFS_SCAN_LAST
 } HDFS_Scan_RetCode;
 
@@ -48,11 +50,18 @@
   // Default constructor - for creating a new JVM		
   HdfsScan(NAHeap *heap)
   :  JavaObjectInterface(heap) 
+  , hdfsStats_(NULL)
+  , j_buf1_(NULL)
+  , j_buf2_(NULL)
   {}
 
+  ~HdfsScan();
+
   // Initialize JVM and all the JNI configuration.
   // Must be called.
   HDFS_Scan_RetCode init();
+  void setHdfsStats(ExHdfsScanStats *hdfsStats)
+  { hdfsStats_ = hdfsStats; } 
 
   // Get the error description.
   static char* getErrorText(HDFS_Scan_RetCode errEnum);
@@ -61,19 +70,25 @@
             HdfsFileInfoArray *hdfsFileInfoArray, Int32 beginRangeNum, Int32 numRanges, int rangeTailIOSize,
             ExHdfsScanStats *hdfsStats, HDFS_Scan_RetCode &hdfsScanRetCode);
 
-  HDFS_Scan_RetCode setScanRanges(NAHeap *heap, ExHdfsScanTcb::HDFS_SCAN_BUF *hdfsScanBuf, int scanBufSize, 
+  HDFS_Scan_RetCode setScanRanges(ExHdfsScanTcb::HDFS_SCAN_BUF *hdfsScanBuf, int scanBufSize, 
             HdfsFileInfoArray *hdfsFileInfoArray, Int32 beginRangeNum, Int32 numRanges, 
-            int rangeTailIOSize, ExHdfsScanStats *hdfsStats);
+            int rangeTailIOSize);
 
-  HDFS_Scan_RetCode trafHdfsRead(NAHeap *heap, ExHdfsScanStats *hdfsStats, int retArray[], short arrayLen);
+  HDFS_Scan_RetCode trafHdfsRead(int retArray[], short arrayLen);
+
+  HDFS_Scan_RetCode stop();
 
 private:
   enum JAVA_METHODS {
     JM_CTOR = 0, 
     JM_SET_SCAN_RANGES,
     JM_TRAF_HDFS_READ,
+    JM_STOP,
     JM_LAST
   };
+  jobject j_buf1_;
+  jobject j_buf2_;
+  ExHdfsScanStats *hdfsStats_;
   static jclass javaClass_;
   static JavaMethodInit* JavaMethods_;
   static bool javaMethodsInitialized_;
@@ -114,6 +129,8 @@
  ,HDFS_CLIENT_ERROR_HDFS_OPEN_EXCEPTION
  ,HDFS_CLIENT_ERROR_HDFS_WRITE_PARAM
  ,HDFS_CLIENT_ERROR_HDFS_WRITE_EXCEPTION
+ ,HDFS_CLIENT_ERROR_HDFS_READ_PARAM
+ ,HDFS_CLIENT_ERROR_HDFS_READ_EXCEPTION
  ,HDFS_CLIENT_ERROR_HDFS_CLOSE_EXCEPTION
  ,HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_PARAM
  ,HDFS_CLIENT_ERROR_HDFS_MERGE_FILES_EXCEPTION
@@ -127,6 +144,13 @@
  ,HDFS_CLIENT_ERROR_SET_HDFSFILEINFO
  ,HDFS_CLIENT_ERROR_HDFS_LIST_DIR_PARAM
  ,HDFS_CLIENT_ERROR_HDFS_LIST_DIR_EXCEPTION
+ ,HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_PARAM
+ ,HDFS_CLIENT_ERROR_HIVE_TBL_MAX_MODIFICATION_TS_EXCEPTION
+ ,HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_PARAM
+ ,HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_EXCEPTION
+ ,HDFS_CLIENT_ERROR_GET_FS_DEFAULT_NAME_BUFFER_TOO_SMALL
+ ,HDFS_CLIENT_ERROR_CREATE_DIRECTORY_PARAM
+ ,HDFS_CLIENT_ERROR_CREATE_DIRECTORY_EXCEPTION
  ,HDFS_CLIENT_LAST
 } HDFS_Client_RetCode;
 
@@ -136,34 +160,43 @@
   // Default constructor - for creating a new JVM		
   HdfsClient(NAHeap *heap)
   :  JavaObjectInterface(heap) 
+    , path_(NULL)
     , hdfsFileInfo_(NULL) 
     , numFiles_(0)
+    , totalBytesWritten_(0)
+    , hdfsStats_(NULL)
   {
   }
  
   ~HdfsClient();
-  static HdfsClient *newInstance(NAHeap *heap, HDFS_Client_RetCode &retCode);
+  static HdfsClient *newInstance(NAHeap *heap, ExHdfsScanStats *hdfsStats, HDFS_Client_RetCode &retCode);
+  static HdfsClient *getInstance();
+  static void deleteInstance();
 
   // Get the error description.
   static char* getErrorText(HDFS_Client_RetCode errEnum);
-  
-  // Initialize JVM and all the JNI configuration.
-  // Must be called.
+  void setHdfsStats(ExHdfsScanStats *hdfsStats)
+  { hdfsStats_ = hdfsStats; } 
   HDFS_Client_RetCode    init();
-  HDFS_Client_RetCode    hdfsCreate(const char* path, NABoolean compress);
+  HDFS_Client_RetCode    hdfsCreate(const char* path, NABoolean overwrite, NABoolean compress);
   HDFS_Client_RetCode    hdfsOpen(const char* path, NABoolean compress);
-  HDFS_Client_RetCode    hdfsWrite(const char* data, Int64 size);
+  Int32                  hdfsWrite(const char* data, Int64 size, HDFS_Client_RetCode &hdfsClientRetcode);
+  Int32                  hdfsRead(const char* data, Int64 size, HDFS_Client_RetCode &hdfsClientRetcode);
   HDFS_Client_RetCode    hdfsClose();
-  HDFS_Client_RetCode    hdfsMergeFiles(const NAString& srcPath,
-                                 const NAString& dstPath);
-  HDFS_Client_RetCode    hdfsCleanUnloadPath(const NAString& uldPath );
-  HDFS_Client_RetCode    hdfsExists(const NAString& uldPath,  NABoolean & exists );
-  HDFS_Client_RetCode    hdfsDeletePath(const NAString& delPath);
   HDFS_Client_RetCode    setHdfsFileInfo(JNIEnv *jenv, jint numFiles, jint fileNo, jboolean isDir, 
           jstring filename, jlong modTime, jlong len, jshort numReplicas, jlong blockSize, 
           jstring owner, jstring group, jshort permissions, jlong accessTime);
   HDFS_Client_RetCode    hdfsListDirectory(const char *pathStr, HDFS_FileInfo **hdfsFileInfo, int *numFiles);
-  void	                 deleteHdfsFileInfo();
+  static HDFS_Client_RetCode    hdfsMergeFiles(const NAString& srcPath, const NAString& dstPath);
+  static HDFS_Client_RetCode    hdfsCleanUnloadPath(const NAString& uldPath );
+  static HDFS_Client_RetCode    hdfsExists(const NAString& uldPath,  NABoolean & exists );
+  static HDFS_Client_RetCode    hdfsDeletePath(const NAString& delPath);
+  static HDFS_Client_RetCode    getHiveTableMaxModificationTs(Int64& maxModificationTs, const char * tableDirPaths,  int levelDeep);
+   // Get the hdfs URL.
+  // buffer is the buffer pre-allocated to hold the result
+  // buf_len is the length of the buffer in bytes
+  static HDFS_Client_RetCode    getFsDefaultName(char* buffer, Int32 buf_len);
+  static HDFS_Client_RetCode    hdfsCreateDirectory(const NAString& path);
 
 private:  
   enum JAVA_METHODS {
@@ -171,16 +204,27 @@
     JM_HDFS_CREATE,
     JM_HDFS_OPEN,
     JM_HDFS_WRITE,
+    JM_HDFS_READ,
     JM_HDFS_CLOSE,
     JM_HDFS_MERGE_FILES,
     JM_HDFS_CLEAN_UNLOAD_PATH,
     JM_HDFS_EXISTS,
     JM_HDFS_DELETE_PATH,
     JM_HDFS_LIST_DIRECTORY,
+    JM_HIVE_TBL_MAX_MODIFICATION_TS,
+    JM_GET_FS_DEFAULT_NAME,
+    JM_HDFS_CREATE_DIRECTORY,
     JM_LAST
   };
+
+  void deleteHdfsFileInfo();
+  void setPath(const char *path);
+
   HDFS_FileInfo *hdfsFileInfo_; 
   int numFiles_;
+  char *path_;
+  Int64 totalBytesWritten_;
+  ExHdfsScanStats *hdfsStats_;
   static jclass javaClass_;
   static JavaMethodInit* JavaMethods_;
   static bool javaMethodsInitialized_;
diff --git a/core/sql/executor/JavaObjectInterface.cpp b/core/sql/executor/JavaObjectInterface.cpp
index 6919866..2260715 100644
--- a/core/sql/executor/JavaObjectInterface.cpp
+++ b/core/sql/executor/JavaObjectInterface.cpp
@@ -24,6 +24,7 @@
 #include "JavaObjectInterface.h"
 #include "QRLogger.h"
 #include "Globals.h"
+#include "Context.h"
 #include "ComUser.h"
 #include "LmJavaOptions.h"
 #include "ex_ex.h"
@@ -546,7 +547,7 @@
    if (jenv == NULL)
        jenv = jenv_;
    CliGlobals *cliGlobals = GetCliGlobals();
-   NAString error_msg(heap_);
+   NAString error_msg(cliGlobals->currContext()->exHeap());
    if (jenv == NULL)
    {
       error_msg = "Internal Error - Unable to obtain jenv";
diff --git a/core/sql/executor/JavaObjectInterface.h b/core/sql/executor/JavaObjectInterface.h
index b167420..d07b6f0 100644
--- a/core/sql/executor/JavaObjectInterface.h
+++ b/core/sql/executor/JavaObjectInterface.h
@@ -119,12 +119,12 @@
   // Get the error description.
   static char* getErrorText(JOI_RetCode errEnum);
  
-  NAString getLastError();
+  static NAString getLastError();
 
   // Write the description of a Java error to the log file.
-  void logError(std::string &cat, const char* methodName, const char *result);
-  void logError(std::string &cat, const char* methodName, jstring jresult);
-  void logError(std::string &cat, const char* file, int line);
+  static void logError(std::string &cat, const char* methodName, const char *result);
+  static void logError(std::string &cat, const char* methodName, jstring jresult);
+  static void logError(std::string &cat, const char* file, int line);
 
   static JOI_RetCode initJNIEnv();
   static char* buildClassPath();  
@@ -145,9 +145,9 @@
   }
   // Pass in jenv if the thread where the object is created is different than
   // the thread where exception occurred
-  NABoolean getExceptionDetails(JNIEnv *jenv = NULL);  
+  static NABoolean getExceptionDetails(JNIEnv *jenv = NULL);  
 
-  void appendExceptionMessages(JNIEnv *jenv, jthrowable a_exception, NAString &error_msg);
+  static void appendExceptionMessages(JNIEnv *jenv, jthrowable a_exception, NAString &error_msg);
   
   NAHeap *getHeap() { return heap_; }
 protected:
diff --git a/core/sql/executor/OrcFileReader.h b/core/sql/executor/OrcFileReader.h
index 925456b..536235a 100644
--- a/core/sql/executor/OrcFileReader.h
+++ b/core/sql/executor/OrcFileReader.h
@@ -90,7 +90,7 @@
   
   OFR_RetCode				getRowCount(Int64& count);
 
-  virtual char*  getErrorText(OFR_RetCode errEnum);
+  static char*  getErrorText(OFR_RetCode errEnum);
 
 protected:
   jstring getLastError();
diff --git a/core/sql/executor/SequenceFileReader.h b/core/sql/executor/SequenceFileReader.h
index 12a68c2..a679eba 100644
--- a/core/sql/executor/SequenceFileReader.h
+++ b/core/sql/executor/SequenceFileReader.h
@@ -99,7 +99,7 @@
 
   SFR_RetCode    fetchRowsIntoBuffer(Int64 stopOffset, char* buffer, Int64 buffSize, Int64& bytesRead, char rowDelimiter);
 
-  virtual char*  getErrorText(SFR_RetCode errEnum);
+  static char*  getErrorText(SFR_RetCode errEnum);
 
 protected:
   jstring getLastError();
@@ -187,7 +187,7 @@
   SFW_RetCode    close();
   SFW_RetCode    release();
 
-  virtual char*  getErrorText(SFW_RetCode errEnum);
+  static char*  getErrorText(SFW_RetCode errEnum);
 
 
 
diff --git a/core/sql/executor/ex_control.cpp b/core/sql/executor/ex_control.cpp
index 3684b1f..4a23e30 100644
--- a/core/sql/executor/ex_control.cpp
+++ b/core/sql/executor/ex_control.cpp
@@ -197,7 +197,7 @@
       {
          
         NAHeap *arkcmpHeap = currCtxt->exHeap();
-	ComDiagsArea *da = ComDiagsArea::allocate(getHeap());
+	ComDiagsArea *da = NULL;
         cmpStatus = CmpCommon::context()->compileDirect(
                                (char *) data, dataLen, arkcmpHeap,
                                SQLCHARSETCODE_UTF8,
@@ -217,11 +217,12 @@
             
 	    // da->clear();
             getHeap()->deallocateMemory(emsText);
+            if (da != NULL)
+               da->decrRefCount();
           }
         else
           saveControl = TRUE; // need to save control to exe ControlInfoTable
 
-        da->decrRefCount();
         if (dummyReply != NULL)
           {
             arkcmpHeap->deallocateMemory((void*)dummyReply);
diff --git a/core/sql/executor/ex_ddl.cpp b/core/sql/executor/ex_ddl.cpp
index 36772a2..ed04c04 100644
--- a/core/sql/executor/ex_ddl.cpp
+++ b/core/sql/executor/ex_ddl.cpp
@@ -258,8 +258,6 @@
           const char *parentQid = masterGlob->getStatement()->
             getUniqueStmtId();
           CmpCommon::context()->sqlSession()->setParentQid(parentQid);
-          if (cpDiagsArea == NULL)
-	    cpDiagsArea = ComDiagsArea::allocate(getHeap());
           // Despite its name, the compileDirect method is where 
           // the DDL is actually performed. 
           Int32 cpStatus = CmpCommon::context()->compileDirect(
@@ -606,8 +604,6 @@
               getUniqueStmtId();
             CmpCommon::context()->sqlSession()->setParentQid(parentQid);
             
-            if (cpDiagsArea == NULL)
-              cpDiagsArea = ComDiagsArea::allocate(getHeap());
             cmpStatus = CmpCommon::context()->compileDirect(
                data_, dataLen_,
                currContext->exHeap(),
@@ -951,6 +947,7 @@
   ComDiagsArea *da = NULL;  
   ComDiagsArea *diagsArea;  
   NAHeap *arkcmpHeap = currContext()->exHeap(); // same heap, see cli/Context.h
+  NABoolean deleteTmpDa = FALSE;
   while (1)
     {
       switch (pstate.step_)
@@ -987,8 +984,6 @@
                   getUniqueStmtId();
                 CmpCommon::context()->sqlSession()->setParentQid(parentQid);
 
-                if (da == NULL)
-                  da = ComDiagsArea::allocate(arkcmpHeap);
                 compStatus = CmpCommon::context()->compileDirect(
                                  describeTdb().query_,
                                  describeTdb().queryLen_,
@@ -1021,6 +1016,9 @@
                   {
                     pstate.step_ = HANDLE_ERROR_;
                   }
+                  // ComDiagsDa is allocated in compileDirect, needs to be deallocated
+                  if (da != NULL)
+                    deleteTmpDa = TRUE; 
               }
             else if (getArkcmp())  // regular arkcmp exists
               {
@@ -1150,6 +1148,8 @@
  	        up_entry->setDiagsArea(da);
 	        up_entry->getAtp()->getDiagsArea()->incrRefCount();
                 // Reset the da for the next error/warning.
+                if (deleteTmpDa)
+                   da->decrRefCount();
                 da = NULL;
               }
 
@@ -1194,6 +1194,8 @@
  	    // insert into parent
 	    qparent_.up->insert();
 	    
+            if (deleteTmpDa)
+               da->decrRefCount();
  	    // reset the diagsArea for the next error to be set properly.
  	    da = NULL;
  	    pstate.step_ = DONE_;
diff --git a/core/sql/executor/ex_root.cpp b/core/sql/executor/ex_root.cpp
index 3b3a4d0..38654b3 100644
--- a/core/sql/executor/ex_root.cpp
+++ b/core/sql/executor/ex_root.cpp
@@ -2235,6 +2235,7 @@
             }
           else 
             {
+/*
               // redrive the scheduler.
               // Fix for CR 6701 - some ExExeUtil operators call back
               // in to the CLI and explicitly clear the curr context
@@ -2246,13 +2247,15 @@
               ContextCli *context = statement->getContext();
               ComDiagsArea *savedContextDiags = context->diags().copy();
               context->diags().clear();
+*/
 
               schedRetcode = glob->getScheduler()->work();
-
+/*
               savedContextDiags->mergeAfter(context->diags());
               context->diags().clear();
               context->diags().mergeAfter(*savedContextDiags);
               savedContextDiags->decrRefCount();
+*/
             }
         }
       if (!getQueueDiags)
@@ -2641,6 +2644,7 @@
   SessionDefaults *sessionDefaults = context->getSessionDefaults();
   if (sessionDefaults)
   {
+  
     // Note that it will be required that if a session does not
     // allow queries to be canceled, then it also will not be 
     // possible to suspend the queries.
@@ -2659,15 +2663,11 @@
       return;
     }
   }
-  NABoolean diagsAreaAllocated = FALSE;
-
-  if (diagsArea == NULL)
-  {
-     diagsAreaAllocated = TRUE;
-     diagsArea = ComDiagsArea::allocate(getHeap());
-  }
+  Lng32 fromCond = 0;
+  if (diagsArea != NULL)
+      fromCond = diagsArea->mark();
   ExSsmpManager *ssmpManager = context->getSsmpManager();
-  cbServer_ = ssmpManager->getSsmpServer(
+  cbServer_ = ssmpManager->getSsmpServer((NAHeap *)getHeap(),
                                  cliGlobals->myNodeName(), 
                                  cliGlobals->myCpu(), diagsArea);
   if (cbServer_ == NULL || cbServer_->getControlConnection() == NULL)		
@@ -2675,17 +2675,9 @@
       // We could not get a phandle for the cancel broker.  However,		
       // let the query run (on the assumption that it will not need to 		
       // be canceled) and convert any error conditions to warnings.		
-
-      // tbd - figure a way retry registration later, as the query progresses.		
-      if (diagsArea != NULL)		
-         NegateAllErrors(diagsArea);		
+      diagsArea->negateErrors(fromCond); 
       return;
   }
-  else if (diagsAreaAllocated)
-  {
-     diagsArea->decrRefCount();
-     diagsArea = NULL;
-  }
 
   // The stream's actOnSend method will delete (or call decrRefCount()) 
   // for this object.
diff --git a/core/sql/exp/ExpLOBaccess.cpp b/core/sql/exp/ExpLOBaccess.cpp
index b5a427b..4b3a785 100644
--- a/core/sql/exp/ExpLOBaccess.cpp
+++ b/core/sql/exp/ExpLOBaccess.cpp
@@ -63,6 +63,7 @@
 #include "ComQueue.h"
 #include "QRLogger.h"
 #include "NAMemory.h"
+#include "HdfsClient_JNI.h"
 #include <seabed/ms.h>
 #include <seabed/fserr.h>
 #include <curl/curl.h>
@@ -366,74 +367,6 @@
     return LOB_OPER_OK;
 }
 
-Ex_Lob_Error ExLob::dataModCheck2(
-       char * dirPath, 
-       Int64  inputModTS,
-       Lng32  numOfPartLevels,
-       Int64 &failedModTS,
-       char  *failedLocBuf,
-       Int32 *failedLocBufLen)
-{
-  if (numOfPartLevels == 0)
-    return LOB_OPER_OK;
-
-  Lng32 currNumFilesInDir = 0;
-  hdfsFileInfo * fileInfos = 
-    hdfsListDirectory(fs_, dirPath, &currNumFilesInDir);
-  if ((currNumFilesInDir > 0) && (fileInfos == NULL))
-    {
-      return LOB_DATA_FILE_NOT_FOUND_ERROR;
-    }
-
-  NABoolean failed = FALSE;
-  for (Lng32 i = 0; ((NOT failed) && (i < currNumFilesInDir)); i++)
-    {
-      hdfsFileInfo &fileInfo = fileInfos[i];
-      if (fileInfo.mKind == kObjectKindDirectory)
-        {
-          Int64 currModTS = fileInfo.mLastMod;
-          if ((inputModTS > 0) &&
-              (currModTS > inputModTS) &&
-	      (!strstr(fileInfo.mName, ".hive-staging_hive_")))
-            {
-              failed = TRUE;
-              failedModTS = currModTS;
-
-              if (failedLocBuf && failedLocBufLen)
-                {
-                  Lng32 failedFileLen = strlen(fileInfo.mName);
-                  Lng32 copyLen = (failedFileLen > (*failedLocBufLen-1) 
-                                   ? (*failedLocBufLen-1) : failedFileLen);
-                  
-                  str_cpy_and_null(failedLocBuf, fileInfo.mName, copyLen,
-                                   '\0', ' ', TRUE);
-                  *failedLocBufLen = copyLen;
-                }
-            }
-        }
-    }
-
-  hdfsFreeFileInfo(fileInfos, currNumFilesInDir);
-  if (failed)
-    return LOB_DATA_MOD_CHECK_ERROR;
-
-  numOfPartLevels--;
-  Ex_Lob_Error err = LOB_OPER_OK;
-  if (numOfPartLevels > 0)
-    {
-      for (Lng32 i = 0; ((NOT failed) && (i < currNumFilesInDir)); i++)
-        {
-          hdfsFileInfo &fileInfo = fileInfos[i];
-          err = dataModCheck2(fileInfo.mName, inputModTS, numOfPartLevels,
-                              failedModTS, failedLocBuf, failedLocBufLen);
-          if (err != LOB_OPER_OK)
-            return err;
-        }
-    }
-
-  return LOB_OPER_OK;
-}
-
 // numOfPartLevels: 0, if not partitioned
 //                  N, number of partitioning cols
 // failedModTS: timestamp value that caused the mismatch
@@ -446,63 +379,46 @@
        char  *failedLocBuf,
        Int32 *failedLocBufLen)
 {
+  if (inputModTS <= 0)
+    return LOB_OPER_OK;
+
+  Ex_Lob_Error result = LOB_OPER_OK;
+  HDFS_Client_RetCode rc;
+  Int64 currModTS;
+
+
   failedModTS = -1;
 
-  // find mod time of root dir
-  hdfsFileInfo *fileInfos = hdfsGetPathInfo(fs_, dirPath);
-  if (fileInfos == NULL)
-    {       
+  // libhdfs returns a second-resolution timestamp,
+  // get a millisecond-resolution timestamp via JNI
+  rc = HdfsClient::getHiveTableMaxModificationTs(currModTS,
+                                                dirPath,
+                                                numOfPartLevels);
+  // check for errors and timestamp mismatches
+  if (rc != HDFS_CLIENT_OK || currModTS <= 0)
+    {
+      result = LOB_DATA_READ_ERROR;
+    }
+  else if (currModTS > inputModTS)
+    {
+      result = LOB_DATA_MOD_CHECK_ERROR;
+      failedModTS = currModTS;
+    }
+
+  if (result != LOB_OPER_OK && failedLocBuf && failedLocBufLen)
+    {
+      // sorry, we lost the exact location for partitioned
+      // files, user needs to search for him/herself
       Lng32 failedFileLen = strlen(dirPath);
       Lng32 copyLen = (failedFileLen > (*failedLocBufLen-1) 
                        ? (*failedLocBufLen-1) : failedFileLen);
-      Int32 hdfserror = errno;
-      char hdfsErrStr[20];
-      snprintf(hdfsErrStr,sizeof(hdfsErrStr),"(errno %d)",errno);
+
       str_cpy_and_null(failedLocBuf, dirPath, copyLen,
                        '\0', ' ', TRUE);
-      str_cat_c(failedLocBuf,hdfsErrStr);
       *failedLocBufLen = copyLen;
-      if (errno)
-        {
-          // Allow for hdfs error. AQR will find the new hive mapped files
-          // if the hive table has been remapped to new data files
-          return LOB_DATA_MOD_CHECK_ERROR;
-        }
-      else
-        return LOB_DATA_READ_ERROR;
-    }
-    
-  Int64 currModTS = fileInfos[0].mLastMod;
-  if ((inputModTS > 0) &&
-      (currModTS > inputModTS))
-    {
-      hdfsFileInfo &fileInfo = fileInfos[0];
-
-      failedModTS = currModTS;
-
-      if (failedLocBuf && failedLocBufLen)
-        {
-          Lng32 failedFileLen = strlen(fileInfo.mName);
-          Lng32 copyLen = (failedFileLen > (*failedLocBufLen-1) 
-                           ? (*failedLocBufLen-1) : failedFileLen);
-          
-          str_cpy_and_null(failedLocBuf, fileInfo.mName, copyLen,
-                           '\0', ' ', TRUE);
-          *failedLocBufLen = copyLen;
-        }
-
-      hdfsFreeFileInfo(fileInfos, 1);
-      return LOB_DATA_MOD_CHECK_ERROR;
     }
 
-  hdfsFreeFileInfo(fileInfos, 1);
-  if (numOfPartLevels > 0)
-    {
-      return dataModCheck2(dirPath, inputModTS, numOfPartLevels, 
-                           failedModTS, failedLocBuf, failedLocBufLen);
-    }
-
-  return LOB_OPER_OK;
+  return result;
 }
 
 Ex_Lob_Error ExLob::emptyDirectory(char *dirPath,
diff --git a/core/sql/exp/ExpLOBaccess.h b/core/sql/exp/ExpLOBaccess.h
index 47f06df..3c5e93e 100644
--- a/core/sql/exp/ExpLOBaccess.h
+++ b/core/sql/exp/ExpLOBaccess.h
@@ -484,14 +484,6 @@
        char  *failedLocBuf,
        Int32 *failedLocBufLen);
 
-  Ex_Lob_Error dataModCheck2(
-       char * dirPath, 
-       Int64  modTS,
-       Lng32  numOfPartLevels,
-       Int64 &failedModTS,
-       char  *failedLocBuf,
-       Int32 *failedLocBufLen);
-
   Ex_Lob_Error emptyDirectory(char* dirPath, ExLobGlobals* lobGlobals);
 
   ExLobStats *getStats() { return &stats_; }
diff --git a/core/sql/export/ComDiags.h b/core/sql/export/ComDiags.h
index fa9b1e5..da54f7d 100644
--- a/core/sql/export/ComDiags.h
+++ b/core/sql/export/ComDiags.h
@@ -837,7 +837,7 @@
 
   // These members provide set and get operations on the data
   // of a ComDiagsArea that is defined in ANSI table 21, in subclause
-  // 18.1.  See also, ``Creating Errors Korrectly.''
+  // 18.1.  See also, ``Creating Errors Correctly.''
 
   Lng32	              getNumber           () const;
   Lng32		      getNumber           (DgSqlCode::ErrorOrWarning) const;
@@ -1018,6 +1018,12 @@
    while (getNumber(DgSqlCode::ERROR_))
      negateCondition(0);
  }
+
+ void negateErrors (Lng32 fromCondition)
+ {
+   while (getNumber(DgSqlCode::ERROR_) > fromCondition)
+        negateCondition(fromCondition);
+ }
  
 void negateAllWarnings  ()
  {
diff --git a/core/sql/generator/GenFastTransport.cpp b/core/sql/generator/GenFastTransport.cpp
index bc788d9..75c1e0e 100644
--- a/core/sql/generator/GenFastTransport.cpp
+++ b/core/sql/generator/GenFastTransport.cpp
@@ -646,7 +646,7 @@
                  (char*)getHiveTableName().data(),
                  TRUE, // isHive
                  (char*)getTargetName().data(), // root dir
-                 hTabStats->getModificationTS(),
+                 hTabStats->getModificationTSmsec(),
                  0,
                  NULL,
                  (char*)getHdfsHostName().data(), 
@@ -657,7 +657,7 @@
       else
         {
           // sim check at leaf
-          modTS = hTabStats->getModificationTS();
+          modTS = hTabStats->getModificationTSmsec();
         }
     } // do sim check
 
diff --git a/core/sql/generator/GenPreCode.cpp b/core/sql/generator/GenPreCode.cpp
index 208ca0b..9832c44 100644
--- a/core/sql/generator/GenPreCode.cpp
+++ b/core/sql/generator/GenPreCode.cpp
@@ -1154,12 +1154,14 @@
 
   if (VEG_DEBUG)
     {
-      NAString av,iv,vb;
+      NAString av,iv,vb,vr;
       availableValues.unparse(av);
       inputValues.unparse(iv);
       valuesToBeBound.unparse(vb);
+      ValueIdSet thisVegRef(getValueId());
+      thisVegRef.unparse(vr);
       cout << endl;
-	  cout << "VEGReference " << getValueId() << " :" << endl;
+      cout << "VEGReference " << getValueId() << " (" << vr << "):" << endl;
       cout << "AV: " << av << endl;
       cout << "IV: " << iv << endl;
       cout << "VB: " << vb << endl;
diff --git a/core/sql/generator/GenRelScan.cpp b/core/sql/generator/GenRelScan.cpp
index 3d01223..827b94c 100644
--- a/core/sql/generator/GenRelScan.cpp
+++ b/core/sql/generator/GenRelScan.cpp
@@ -102,7 +102,7 @@
   else if (format_ == SHOWSTATS_) type = ComTdbDescribe::SHOWSTATS_;
   else if (format_ == TRANSACTION_) type = ComTdbDescribe::TRANSACTION_;
   else if (format_ == SHORT_) type = ComTdbDescribe::SHORT_;
-  else if (format_ == LONG_) type = ComTdbDescribe::LONG_;
+  else if (format_ == SHOWDDL_) type = ComTdbDescribe::LONG_;
   else if (format_ == PLAN_) type = ComTdbDescribe::PLAN_;
   else if (format_ == LABEL_) type = ComTdbDescribe::LABEL_;
   else if (format_ == SHAPE_) type = ComTdbDescribe::SHAPE_;
@@ -393,7 +393,8 @@
                                 char* &hdfsHostName,
                                 Int32 &hdfsPort,
                                 NABoolean &useCursorMulti,
-                                NABoolean &doSplitFileOpt)
+                                NABoolean &doSplitFileOpt,
+                                NABoolean &isCompressedFile)
 {
   Space * space          = generator->getSpace();
 
@@ -480,6 +481,9 @@
 	      hfi.bytesToRead_ = span;
 	      hfi.fileName_ = fnameInList;
 	      
+              isCompressedFile = FALSE;
+                  //if (file->getCompressionInfo().getCompressionMethod() != ComCompressionInfo::UNCOMPRESSED)
+                  //   isCompressedFile = TRUE;
 	      char * hfiInList = space->allocateAndCopyToAlignedSpace
 		((char*)&hfi, sizeof(HdfsFileInfo));
 	      
@@ -1144,13 +1148,14 @@
   }
   NABoolean useCursorMulti = FALSE;
   NABoolean doSplitFileOpt = FALSE;
+  NABoolean isCompressedFile = FALSE;
 
   if ((hTabStats->isTextFile()) || (hTabStats->isSequenceFile()))
     {
       genForTextAndSeq(generator, 
                        hdfsFileInfoList, hdfsFileRangeBeginList, hdfsFileRangeNumList,
                        hdfsHostName, hdfsPort,
-                       useCursorMulti, doSplitFileOpt);
+                       useCursorMulti, doSplitFileOpt, isCompressedFile);
     }
   else if (hTabStats->isOrcFile())
     {
@@ -1277,7 +1282,7 @@
       (hTabStats->numOfPartCols() <= 0) &&
       (!getCommonSubExpr()))
     {
-      modTS = hTabStats->getModificationTS();
+      modTS = hTabStats->getModificationTSmsec();
       numOfPartLevels = hTabStats->numOfPartCols();
 
       // if specific directories are to checked based on the query struct
@@ -1296,7 +1301,7 @@
                  tiName,
                  TRUE, // isHive
                  (char*)hTabStats->tableDir().data(), // root dir
-                 hTabStats->getModificationTS(),
+                 modTS,
                  numOfPartLevels,
                  hdfsDirsToCheck,
                  hdfsHostName, hdfsPort);
@@ -1310,8 +1315,6 @@
             space->allocateAndCopyToAlignedSpace(hTabStats->tableDir().data(),
                                                  hTabStats->tableDir().length(),
                                                  0);
-          modTS = hTabStats->getModificationTS();
-          numOfPartLevels = hTabStats->numOfPartCols();
         }
     }
 
@@ -1394,6 +1397,8 @@
   if (CmpCommon::getDefault(USE_LIBHDFS_SCAN) == DF_ON)
      hdfsscan_tdb->setUseLibhdfsScan(TRUE);
 
+  hdfsscan_tdb->setCompressedFile(isCompressedFile);
+
   if(!generator->explainDisabled()) {
     generator->setExplainTuple(
        addExplainInfo(hdfsscan_tdb, 0, 0, generator));
diff --git a/core/sql/generator/GenRelUpdate.cpp b/core/sql/generator/GenRelUpdate.cpp
index e0a8d38..b7cc6bb 100644
--- a/core/sql/generator/GenRelUpdate.cpp
+++ b/core/sql/generator/GenRelUpdate.cpp
@@ -2945,6 +2945,8 @@
 	// without code change
 	if (loadFlushSizeinRows >= USHRT_MAX/2)
 	  loadFlushSizeinRows = ((USHRT_MAX/2)-1);
+	else if (loadFlushSizeinRows < 1)  // make sure we don't fall to zero on really long rows
+	  loadFlushSizeinRows = 1;
 	hbasescan_tdb->setTrafLoadFlushSize(loadFlushSizeinRows);
 
         // For sample file, set the sample location in HDFS and the sampling rate.
diff --git a/core/sql/lib_mgmt/src/main/java/org/trafodion/libmgmt/JDBCUDR.java b/core/sql/lib_mgmt/src/main/java/org/trafodion/libmgmt/JDBCUDR.java
new file mode 100644
index 0000000..e75f603
--- /dev/null
+++ b/core/sql/lib_mgmt/src/main/java/org/trafodion/libmgmt/JDBCUDR.java
@@ -0,0 +1,721 @@
+/**********************************************************************
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+**********************************************************************/
+
+/***************************************************
+ * A TMUDF that executes a generic JDBC query
+ * and returns the result of the one SQL statement
+ * in the list that produces results as a table-valued
+ * output
+ *
+ * Invocation (all arguments are strings):
+ *
+ * select ... from udf(JDBC(
+ *    <name of JDBC driver jar>, // file name of the JDBC driver jar, stored
+ *                               // in $TRAF_HOME/udr/public/external_libs
+ *    <name of JDBC driver class in the jar>,
+ *    <connection string>,
+ *    <user name>,
+ *    <password>,
+ *    <statement_type>,
+ *    <sql statement 1>
+ *    [ , <sql statements 2 ...n> ] )) ...
+ *
+ * The first 7 arguments are required and must be
+ * string literals that are available at compile
+ * time.
+ * Statement type:
+ *    'source': This statement produces a result
+ *              (only type allowed at this time)
+ *              (may support "target" to insert
+ *               into a table via JDBC later)
+ *
+ * Note that only one of the SQL statements can be
+ * a select or other result-producing statements.
+ * The others can perform setup and cleanup
+ * operations, if necessary (e.g. create table,
+ * insert, select, drop table).
+ *
+ * For an example, see file
+ * core/sql/regress/udr/TEST002.
+ ***************************************************/
+
+package org.trafodion.libmgmt;
+
+import org.trafodion.sql.udr.*;
+import java.sql.*;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Vector;
+import java.lang.Math;
+import java.util.Properties;
+import java.util.logging.Logger;
+
+class JDBCUDR extends UDR
+{
+    // class used to connect, both at compile and at runtime
+    static class JdbcConnectionInfo
+    {
+        String driverJar_;
+        String driverClassName_;
+        String connectionString_;
+        String username_;
+        String password_;
+        boolean debug_;
+
+        Connection conn_;
+
+        public void setJar(String jar)
+                                                     { driverJar_ = jar; }
+        public void setClass(String className)
+                                         { driverClassName_ = className; }
+        public void setConnString(String connString)
+                                       { connectionString_ = connString; }
+        public void setUsername(String userName)
+                                                 { username_ = userName; }
+        public void setPassword(String password)
+                                                 { password_ = password; }
+        public void setDebug(boolean debug)            { debug_ = debug; }
+
+        public Connection connect() throws UDRException
+        {
+          try {
+            Path driverJarPath = Paths.get(driverJar_);
+
+            // for security reasons, we sandbox the allowed driver jars
+            // into $TRAF_HOME/udr/public/external_libs
+            driverJarPath = driverJarPath.normalize();
+            if (driverJarPath.isAbsolute())
+              {
+                if (! driverJarPath.startsWith(
+                                  LmUtility.getSandboxRootForUser(null)))
+                  throw new UDRException(
+                    38010,
+                    "The jar name of the JDBC driver must be a name relative to %s, got %s",
+                    LmUtility.getSandboxRootForUser(null).toString(),
+                    driverJar_);
+              }
+            else
+              driverJarPath = LmUtility.getExternalLibsDirForUser(null).resolve(
+                    driverJarPath);
+
+            // for security reasons we also reject the Trafodion T2
+            // driver (check both class name and URL)
+            if (driverClassName_.equals("org.apache.trafodion.jdbc.t2.T2Driver"))
+                throw new UDRException(
+                    38012,
+                    "This UDF does not support the Trafodion T2 driver class %s",
+                    driverClassName_);
+
+            if (LmT2Driver.checkURL(connectionString_))
+                throw new UDRException(
+                    38013,
+                    "This UDF does not support the Trafodion T2 driver URL %s",
+                    connectionString_);
+ 
+            // Create a class loader that can access the jar file
+            // specified by the caller. Note that this is only needed
+            // because the JDBC UDR is a predefined UDR and is loaded
+            // by the standard class loader. If it were a regular UDR,
+            // it would have been loaded by LmClassLoader and we would
+            // not need to create an LmClassLoader here.
+            LmClassLoader jdbcJarLoader = LmUtility.createClassLoader(
+                                               driverJarPath.toString(),0);
+
+            Driver d = (Driver) Class.forName(driverClassName_,
+                                              true,
+                                              jdbcJarLoader).newInstance();
+
+            // go through an intermediary driver, since the DriverManager
+            // will not accept classes that are not loaded by the default
+            // class loader
+            DriverManager.registerDriver(new URLDriver(d));
+            conn_ = DriverManager.getConnection(connectionString_,
+                                                username_,
+                                                password_);
+            return conn_;
+          }
+          catch (ClassNotFoundException cnf) {
+              throw new UDRException(
+                38020,
+                "JDBC driver class %s not found. Please make sure the JDBC driver jar %s is stored in %s. Message: %s",
+                driverClassName_,
+                driverJar_,
+                LmUtility.getSandboxRootForUser(null).toString(),
+                cnf.getMessage());
+          }
+          catch (SQLException se) {
+              throw new UDRException(
+                38020,
+                "SQL exception during connect. Message: %s",
+                se.getMessage());
+          }
+          catch (Exception e) {
+              if (debug_)
+                  {
+                      System.out.println("Debug: Exception during connect:");
+                      try { e.printStackTrace(System.out); }
+                      catch (Exception e2) {}
+                  }
+              throw new UDRException(
+                38020,
+                "Exception during connect: %s",
+                e.getMessage());
+          }
+        }
+
+        public Connection getConnection()                 { return conn_; }
+
+        public void disconnect() throws SQLException
+        {
+            conn_.close();
+            conn_ = null;
+        }
+    };
+
+    // list of SQL statements to execute
+    static class SQLStatementInfo
+    {
+        // list of SQL statements to execute
+        Vector<String> sqlStrings_;
+
+        // which of the above is the one that
+        // produces the table-valued result?
+        int resultStatementIndex_;
+
+        // prepared result-producing statement
+        PreparedStatement resultStatement_;
+
+        SQLStatementInfo()
+        {
+            sqlStrings_ = new Vector<String>();
+            resultStatementIndex_ = -1;
+        }
+
+        void addStatementText(String sqlText)
+        {
+            sqlStrings_.add(sqlText);
+        }
+
+        void addResultProducingStatement(PreparedStatement preparedStmt,
+                                         int resultStatementIndex)
+        {
+            resultStatement_ = preparedStmt;
+            resultStatementIndex_ = resultStatementIndex;
+        }
+
+        String getStatementText(int ix)    { return sqlStrings_.get(ix); }
+        PreparedStatement getResultStatement(){ return resultStatement_; }
+        int getNumStatements()              { return sqlStrings_.size(); }
+        int getResultStatementIndex()    { return resultStatementIndex_; }
+    };
+
+    // Define data that gets passed between compiler phases
+    static class JdbcCompileTimeData extends UDRWriterCompileTimeData
+    {
+        JdbcConnectionInfo jci_;
+        SQLStatementInfo sqi_;
+
+        JdbcCompileTimeData()
+        {
+            jci_ = new JdbcConnectionInfo();
+            sqi_ = new SQLStatementInfo();
+        }
+    };
+
+    static class URLDriver implements Driver {
+	private Driver driver_;
+	URLDriver(Driver d) { driver_ = d; }
+	public boolean acceptsURL(String u) throws SQLException {
+            return driver_.acceptsURL(u);
+	}
+	public Connection connect(String u, Properties p) throws SQLException {
+            return driver_.connect(u, p);
+	}
+	public int getMajorVersion() {
+            return driver_.getMajorVersion();
+	}
+	public int getMinorVersion() {
+            return driver_.getMinorVersion();
+	}
+	public DriverPropertyInfo[] getPropertyInfo(String u, Properties p) throws SQLException {
+            return driver_.getPropertyInfo(u, p);
+	}
+	public boolean jdbcCompliant() {
+            return driver_.jdbcCompliant();
+	}
+        public Logger getParentLogger() throws SQLFeatureNotSupportedException {
+            return driver_.getParentLogger();
+        }
+    }
+
+    JdbcConnectionInfo getConnectionInfo(UDRInvocationInfo info) throws UDRException
+    {
+        return ((JdbcCompileTimeData) info.getUDRWriterCompileTimeData()).jci_;
+    }
+
+    SQLStatementInfo getSQLStatementInfo(UDRInvocationInfo info) throws UDRException
+    {
+        return ((JdbcCompileTimeData) info.getUDRWriterCompileTimeData()).sqi_;
+    }
+
+
+    // default constructor
+    public JDBCUDR()
+    {}
+
+    // a method to process the input parameters, this is
+    // used both at compile time and at runtime
+    private void handleInputParams(UDRInvocationInfo info,
+                                   JdbcConnectionInfo jci,
+                                   SQLStatementInfo sqi,
+                                   boolean isCompileTime)
+                                             throws UDRException
+    {
+        int numInParams = info.par().getNumColumns();
+
+        // Right now we don't support table inputs
+        if (isCompileTime && info.getNumTableInputs() != 0)
+            throw new UDRException(
+              38300,
+              "%s must be called with no table-valued inputs",
+              info.getUDRName());
+
+        if (numInParams < 7)
+            throw new UDRException(
+              38310,
+              "Expecting at least 7 parameters for %s UDR",
+              info.getUDRName());
+
+        // loop over scalar input parameters
+        for (int p=0; p<numInParams; p++)
+            {
+                if (isCompileTime &&
+                    ! info.par().isAvailable(p))
+                    throw new UDRException(
+                      38320,
+                      "Parameter %d of %s must be a compile time constant",
+                      p+1,
+                      info.getUDRName());
+
+                String paramValue = info.par().getString(p);
+
+                switch (p)
+                    {
+                    case 0:
+                        jci.setJar(paramValue);
+                        break;
+
+                    case 1:
+                        jci.setClass(paramValue);
+                        break;
+
+                    case 2:
+                        jci.setConnString(paramValue);
+                        break;
+
+                    case 3:
+                        jci.setUsername(paramValue);
+                        break;
+
+                    case 4:
+                        jci.setPassword(paramValue);
+                        break;
+
+                    case 5:
+                        // Only statement type supported
+                        // so far is select, we may support insert later
+                        if (paramValue.compareToIgnoreCase("source") != 0)
+                            throw new UDRException(
+                              38330,
+                              "The only statement type supported so far is 'source' in parameter 6 of %s",
+                              info.getUDRName());
+                        break;
+
+                    default:
+                        // SQL statement (there could be multiple)
+                        sqi.addStatementText(paramValue);
+                        break;
+
+                    }
+
+                if (isCompileTime)
+                    // add the actual parameter as a formal parameter
+                    // (the formal parameter list is initially empty)
+                    info.addFormalParameter(info.par().getColumn(p));
+            }
+
+        jci.setDebug(info.getDebugFlags() != 0);
+
+        // Prepare each provided statement. We will verify that
+        // only one of these statements produces result rows,
+        // which will become our table-valued output.
+        int numSQLStatements = sqi.getNumStatements();
+
+        // sanity check
+        if (numSQLStatements != numInParams-6)
+            throw new UDRException(383400, "internal error");
+
+        if (numSQLStatements < 1)
+            throw new UDRException(383500, "At least one SQL statement must be given in parameters 6 and following");
+
+        if (isCompileTime)
+        {
+            // walk through all statements, check whether they are
+            // valid by preparing them, and determine which one is
+            // the one that generates a result set
+            String currentStmtText = "";
+            try
+            {
+                jci.connect();
+
+                for (int s=0; s<numSQLStatements; s++)
+                {
+                    currentStmtText = sqi.getStatementText(s);
+                    // System.out.printf("Statement to prepare: %s\n", currentStmtText);
+                    PreparedStatement preparedStmt =
+                            jci.getConnection().prepareStatement(currentStmtText);
+                    // if (preparedStmt != null)
+                    //    System.out.printf("Prepare was successful\n");
+                    ParameterMetaData pmd = preparedStmt.getParameterMetaData();
+                    if (pmd != null && pmd.getParameterCount() != 0)
+                        throw new UDRException(
+                                38360,
+                                "Statement %s requires %d input parameters, which is not supported",
+                                currentStmtText, pmd.getParameterCount());
+                    ResultSetMetaData desc = preparedStmt.getMetaData();
+
+                    int numResultCols = desc.getColumnCount();
+                    // System.out.printf("Number of output columns: %d", numResultCols);
+
+                    if (numResultCols > 0)
+                    {
+                        if (sqi.getResultStatementIndex() >= 0)
+                            throw new UDRException(
+                                    38370,
+                                    "More than one of the statements provided produce output, this is not supported (%d and %d)",
+                                    sqi.getResultStatementIndex()+1,
+                                    s+1);
+
+                        // we found the statement that is producing the result
+                        sqi.addResultProducingStatement(preparedStmt, s);
+
+                        // now add the output columns
+                        for (int c=0; c<numResultCols; c++)
+                        {
+                            String colName = desc.getColumnLabel(c+1);
+                            TypeInfo udrType = getUDRTypeFromJDBCType(desc, c+1);
+                            info.out().addColumn(new ColumnInfo(colName, udrType));
+                        }
+                    }
+                }
+                jci.disconnect();
+            }
+            catch (SQLException e)
+            {
+                throw new UDRException(
+                        38380,
+                        "SQL Exception when preparing SQL statement %s. Exception text: %s",
+                        currentStmtText, e.getMessage());
+            }
+        }
+    }
+
+    TypeInfo getUDRTypeFromJDBCType(ResultSetMetaData desc,
+                                    int colNumOneBased) throws UDRException
+    {
+        TypeInfo result;
+
+        final int maxLength = 100000;
+
+        int colJDBCType;
+
+        // the ingredients to make a UDR type and their default values
+        TypeInfo.SQLTypeCode      sqlType      = TypeInfo.SQLTypeCode.UNDEFINED_SQL_TYPE;
+        int                       length       = 0;
+        boolean                   nullable     = false;
+        int                       scale        = 0;
+        TypeInfo.SQLCharsetCode   charset      = TypeInfo.SQLCharsetCode.CHARSET_UCS2;
+        TypeInfo.SQLIntervalCode  intervalCode = TypeInfo.SQLIntervalCode.UNDEFINED_INTERVAL_CODE;
+        int                       precision    = 0;
+        TypeInfo.SQLCollationCode collation    = TypeInfo.SQLCollationCode.SYSTEM_COLLATION;
+
+        try {
+            colJDBCType = desc.getColumnType(colNumOneBased);
+            nullable = (desc.isNullable(colNumOneBased) != ResultSetMetaData.columnNoNulls);
+
+            // map the JDBC type to a Trafodion UDR parameter type
+            switch (colJDBCType)
+            {
+            case java.sql.Types.SMALLINT:
+            case java.sql.Types.TINYINT:
+            case java.sql.Types.BOOLEAN:
+                if (desc.isSigned(colNumOneBased))
+                    sqlType = TypeInfo.SQLTypeCode.SMALLINT;
+                else
+                    sqlType = TypeInfo.SQLTypeCode.SMALLINT_UNSIGNED;
+                break;
+
+            case java.sql.Types.INTEGER:
+                if (desc.isSigned(colNumOneBased))
+                    sqlType = TypeInfo.SQLTypeCode.INT;
+                else
+                    sqlType = TypeInfo.SQLTypeCode.INT_UNSIGNED;
+                break;
+
+            case java.sql.Types.BIGINT:
+                sqlType = TypeInfo.SQLTypeCode.LARGEINT;
+                break;
+
+            case java.sql.Types.DECIMAL:
+            case java.sql.Types.NUMERIC:
+                if (desc.isSigned(colNumOneBased))
+                    sqlType = TypeInfo.SQLTypeCode.NUMERIC;
+                else
+                    sqlType = TypeInfo.SQLTypeCode.NUMERIC_UNSIGNED;
+                precision = desc.getPrecision(colNumOneBased);
+                scale = desc.getScale(colNumOneBased);
+                break;
+
+            case java.sql.Types.REAL:
+                sqlType = TypeInfo.SQLTypeCode.REAL;
+                break;
+
+            case java.sql.Types.DOUBLE:
+            case java.sql.Types.FLOAT:
+                sqlType = TypeInfo.SQLTypeCode.DOUBLE_PRECISION;
+                break;
+
+            case java.sql.Types.CHAR:
+            case java.sql.Types.NCHAR:
+                sqlType = TypeInfo.SQLTypeCode.CHAR;
+                length  = Math.min(desc.getPrecision(colNumOneBased), maxLength);
+                charset = TypeInfo.SQLCharsetCode.CHARSET_UCS2;
+                break;
+
+            case java.sql.Types.VARCHAR:
+            case java.sql.Types.NVARCHAR:
+                sqlType = TypeInfo.SQLTypeCode.VARCHAR;
+                length  = Math.min(desc.getPrecision(colNumOneBased), maxLength);
+                charset = TypeInfo.SQLCharsetCode.CHARSET_UCS2;
+                break;
+
+            case java.sql.Types.DATE:
+                sqlType = TypeInfo.SQLTypeCode.DATE;
+                break;
+
+            case java.sql.Types.TIME:
+                sqlType = TypeInfo.SQLTypeCode.TIME;
+                break;
+
+            case java.sql.Types.TIMESTAMP:
+                sqlType = TypeInfo.SQLTypeCode.TIMESTAMP;
+                scale   = 3;
+                break;
+
+                // BLOB - not supported yet, map to varchar
+                // case java.sql.Types.BLOB:
+                // sqlType = TypeInfo.SQLTypeCode.BLOB;
+                // break;
+
+                // CLOB - not supported yet, map to varchar
+                // case java.sql.Types.CLOB:
+                // sqlType = TypeInfo.SQLTypeCode.CLOB;
+                // break;
+
+            case java.sql.Types.ARRAY:
+            case java.sql.Types.BINARY:
+            case java.sql.Types.BIT:
+            case java.sql.Types.BLOB:
+            case java.sql.Types.DATALINK:
+            case java.sql.Types.DISTINCT:
+            case java.sql.Types.JAVA_OBJECT:
+            case java.sql.Types.LONGVARBINARY:
+            case java.sql.Types.NULL:
+            case java.sql.Types.OTHER:
+            case java.sql.Types.REF:
+            case java.sql.Types.STRUCT:
+            case java.sql.Types.VARBINARY:
+                // these types produce a binary result, represented
+                // as varchar(n) character set iso88591
+                sqlType = TypeInfo.SQLTypeCode.VARCHAR;
+                length  = Math.min(desc.getPrecision(colNumOneBased), maxLength);
+                charset = TypeInfo.SQLCharsetCode.CHARSET_ISO88591;
+                break;
+
+            case java.sql.Types.LONGVARCHAR:
+            case java.sql.Types.LONGNVARCHAR:
+            case java.sql.Types.CLOB:
+            case java.sql.Types.NCLOB:
+            case java.sql.Types.ROWID:
+            case java.sql.Types.SQLXML:
+                // these types produce a varchar(n) character set utf8 result
+                sqlType = TypeInfo.SQLTypeCode.VARCHAR;
+                length  = Math.min(desc.getPrecision(colNumOneBased), maxLength);
+                charset = TypeInfo.SQLCharsetCode.CHARSET_UCS2;
+                break;
+            }
+        } catch (SQLException e) {
+            throw new UDRException(
+                    38500,
+                    "Error determinging the type of output column %d: ",
+                    colNumOneBased,
+                    e.getMessage());
+        }
+
+        result = new TypeInfo(
+                sqlType,
+                length,
+                nullable,
+                scale,
+                charset,
+                intervalCode,
+                precision,
+                collation);
+
+        return result;
+    }
+
+    // determine output columns dynamically at compile time
+    @Override
+    public void describeParamsAndColumns(UDRInvocationInfo info)
+        throws UDRException
+    {
+        // create an object with common info for this
+        // UDF invocation that we will carry through the
+        // compilation phases
+        info.setUDRWriterCompileTimeData(new JdbcCompileTimeData());
+
+        // retrieve the compile time data, we will do this for
+        // every compile phase
+        JdbcConnectionInfo jci = getConnectionInfo(info);
+        SQLStatementInfo   sqi = getSQLStatementInfo(info);
+
+        // process input parameters
+        handleInputParams(info, jci, sqi, true);
+   }
+
+    // override the runtime method
+    @Override
+    public void processData(UDRInvocationInfo info,
+                            UDRPlanInfo plan)
+        throws UDRException
+    {
+        // retrieve the compile time data, we will do this for
+        // every compile phase
+        JdbcConnectionInfo jci = new JdbcConnectionInfo();
+        SQLStatementInfo   sqi = new SQLStatementInfo();
+        int numCols = info.out().getNumColumns();
+
+        // process input parameters (again, now at runtime)
+        handleInputParams(info, jci, sqi, false);
+
+        int numSQLStatements = sqi.getNumStatements();
+        int numSQLResultSets = 0;
+        String stmtText = null;
+
+        try {
+            Connection conn = jci.connect();
+            Statement stmt = conn.createStatement();
+
+            for (int s=0; s<numSQLStatements; s++)
+            {
+                stmtText = sqi.getStatementText(s);
+
+                boolean hasResultSet = stmt.execute(stmtText);
+
+                if (hasResultSet)
+                {
+                    ResultSet rs = stmt.getResultSet();
+                    numSQLResultSets++;
+
+                    if (numSQLResultSets > 1)
+                        throw new UDRException(
+                                38700,
+                                "More than one result set returned by UDF %s",
+                                info.getUDRName());
+
+                    if (rs.getMetaData().getColumnCount() != numCols)
+                        throw new UDRException(
+                                38702,
+                                "Number of columns returned by UDF %s (%d) differs from the number determined at compile time (%d)",
+                                info.getUDRName(),
+                                rs.getMetaData().getColumnCount(),
+                                numCols);
+
+                    while (rs.next())
+                    {
+                        for (int c=0; c<numCols; c++)
+                        {
+                            TypeInfo typ = info.out().getColumn(c).getType();
+
+                            switch (typ.getSQLTypeSubClass())
+                            {
+                            case FIXED_CHAR_TYPE:
+                            case VAR_CHAR_TYPE:
+                                info.out().setString(c, rs.getString(c+1));
+                                break;
+
+                            case EXACT_NUMERIC_TYPE:
+                                info.out().setLong(c, rs.getLong(c+1));
+                                break;
+
+                            case APPROXIMATE_NUMERIC_TYPE:
+                                info.out().setDouble(c, rs.getDouble(c+1));
+                                break;
+
+                            case DATE_TYPE:
+                                info.out().setTime(c, rs.getDate(c+1));
+                                break;
+
+                            case TIME_TYPE:
+                                info.out().setTime(c, rs.getTime(c+1));
+                                break;
+
+                            case TIMESTAMP_TYPE:
+                                info.out().setTime(c, rs.getTimestamp(c+1));
+                                break;
+
+                            case LOB_SUB_CLASS:
+                                throw new UDRException(38710, "LOB parameters not yet supported");
+
+                            default:
+                                throw new UDRException(38720, "Unexpected data type encountered");
+
+                            } // switch
+
+                            if (rs.wasNull())
+                                info.out().setNull(c);
+                        } // loop over columns
+
+                        // produce a result row
+                        emitRow(info);
+
+                    } // loop over result rows
+                } // statement produces a result set
+            } // loop over statements
+            jci.disconnect();
+        } catch (SQLException e) {
+            throw new UDRException(
+                    38730,
+                    "Error preparing statement %s at runtime: %s",
+                    stmtText,
+                    e.getMessage());
+        }
+    }
+};
diff --git a/core/sql/nskgmake/tdm_sqlexp/Makefile b/core/sql/nskgmake/tdm_sqlexp/Makefile
index 6b973af..7213449 100755
--- a/core/sql/nskgmake/tdm_sqlexp/Makefile
+++ b/core/sql/nskgmake/tdm_sqlexp/Makefile
@@ -77,7 +77,7 @@
 
 
 CPPSRC += vers_libtdm_sqlexp.cpp
-DEFS := -D_IEEE_FLOAT -DHAVE_INTTYPES_H -DHAVE_NETINET_IN_H -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
+DEFS := -D_IEEE_FLOAT -DHAVE_INTTYPES_H -DHAVE_NETINET_IN_H -D__STDC_CONSTANT_MACROS 
 
 #
 LLVM_OBJ_DIR := $(LLVM)/$(SQ_BUILD_TYPE)/lib
diff --git a/core/sql/optimizer/BindRelExpr.cpp b/core/sql/optimizer/BindRelExpr.cpp
index 7beaf9e..0faae6f 100644
--- a/core/sql/optimizer/BindRelExpr.cpp
+++ b/core/sql/optimizer/BindRelExpr.cpp
@@ -6094,8 +6094,11 @@
   if (prevScope)
     inRowSubquery = prevScope->context()->inRowSubquery();
 
+  NABoolean groupByAggNodeAdded = FALSE;
   if (inRowSubquery && (CmpCommon::getDefault(COMP_BOOL_137) == DF_OFF))
-      addOneRowAggregates(bindWA);
+    // force adding one row aggregates in the [last 0] case
+    groupByAggNodeAdded = addOneRowAggregates(bindWA, 
+                            getFirstNRows() == -2 /* [last 0] case */);
 
   returnedRoot = 
     transformGroupByWithOrdinalPhase2(bindWA);
@@ -6665,36 +6668,79 @@
   if ((getFirstNRows() != -1) ||
        (getFirstNRowsParam()))
     {
-      // create a firstN node to retrieve firstN rows.
-      FirstN * firstn = new(bindWA->wHeap())
-        FirstN(child(0), getFirstNRows(), needFirstSortedRows(), getFirstNRowsParam());   
+      // [first/any/last N] processing
 
-      firstn->bindNode(bindWA);
-      if (bindWA->errStatus())
-        return NULL;
+      RelExpr * nodeToInsertUnder = this;
+      if (inRowSubquery)
+        {
+          // [first/any/last N] in a row subquery special case
+          //
+          // In this case, if N > 1 it is first/any N, and we can simply
+          // ignore that as row subqueries already enforce an at-most-one
+          // row semantic. For [first 1], [last 1], [last 0], we need to
+          // add the node below any one-row aggregate group by node created
+          // earlier in this method. (If it put it above that group by node,
+          // that is too late; the one-row aggregate will raise an 8401 error
+          // before our FirstN node has a chance to narrow the result down to
+          // zero or one rows.) There is an interesting nuance with [last 0]:
+          // We forced the addition of a one-row aggregate group by node
+          // in that case, because [last 0] returns no rows. We might have
+          // a scalar aggregate subquery which ordinarily would not require
+          // a one-row aggregate group, but when [last 0] is present we want
+          // to force the aggregates to become NULL. Adding a one-row 
+          // aggregate group on top of the scalar aggregate, with the FirstN
+          // node in between them does the trick.
+          if (groupByAggNodeAdded &&
+               ( (getFirstNRows() == 1) ||   // [first 1] or [any 1]
+                 (getFirstNRows() == -2) ||  // [last 0]
+                 (getFirstNRows() == -3) ) ) // [last 1]
+            {
+              nodeToInsertUnder = child(0);
+              CMPASSERT(nodeToInsertUnder->getOperatorType() == REL_GROUPBY);             
+            }
+          else if (!groupByAggNodeAdded && (getFirstNRows() == -2))  // [last 0]
+            {
+              CMPASSERT(groupByAggNodeAdded);  // a GroupByAgg should have been forced
+            }
+          else  // a case where we can throw the [first/any/last N] away
+            {
+              nodeToInsertUnder = NULL;
+            }
+        }
+          
+      if (nodeToInsertUnder)
+        {
+          // create a firstN node to retrieve firstN rows.
+          FirstN * firstn = new(bindWA->wHeap())
+            FirstN(nodeToInsertUnder->child(0), getFirstNRows(), needFirstSortedRows(), getFirstNRowsParam());   
 
-      // Note: For ORDER BY + [first n], we want to sort the rows before 
-      // picking just n of them. (We don't do this for [any n].) We might
-      // be tempted to copy the orderByTree into the FirstN node at this
-      // point, but this doesn't work. Instead, we copy the bound ValueIds
-      // at normalize time. We have to do this in case there are expressions
-      // involved in the ORDER BY and there is a DESC. The presence of the
-      // Inverse node at the top of the expression tree seems to cause the
-      // expressions underneath to be bound to different ValueIds, which 
-      // causes coverage tests in FirstN::createContextForAChild requirements
-      // generation to fail. An example of where this occurs is:
-      //
-      // prepare s1 from
-      //   select [first 2] y, x from
-      //    (select a,b + 26 from t1) as t(x,y)
-      //   order by y desc;
-      //
-      // If we copy the ORDER BY ItemExpr tree and rebind, we get a different
-      // ValueId for the expression b + 26 in the child characteristic outputs
-      // than what we get for the child of Inverse in Inverse(B + 26). The
-      // trick of copying the already-bound ORDER BY clause later avoids this.
+          firstn->bindNode(bindWA);
+          if (bindWA->errStatus())
+            return NULL;
 
-      setChild(0, firstn);
+          // Note: For ORDER BY + [first n], we want to sort the rows before 
+          // picking just n of them. (We don't do this for [any n].) We might
+          // be tempted to copy the orderByTree into the FirstN node at this
+          // point, but this doesn't work. Instead, we copy the bound ValueIds
+          // at normalize time. We have to do this in case there are expressions
+          // involved in the ORDER BY and there is a DESC. The presence of the
+          // Inverse node at the top of the expression tree seems to cause the
+          // expressions underneath to be bound to different ValueIds, which 
+          // causes coverage tests in FirstN::createContextForAChild requirements
+          // generation to fail. An example of where this occurs is:
+          //
+          // prepare s1 from
+          //   select [first 2] y, x from
+          //    (select a,b + 26 from t1) as t(x,y)
+          //   order by y desc;
+          //
+          // If we copy the ORDER BY ItemExpr tree and rebind, we get a different
+          // ValueId for the expression b + 26 in the child characteristic outputs
+          // than what we get for the child of Inverse in Inverse(B + 26). The
+          // trick of copying the already-bound ORDER BY clause later avoids this.
+
+          nodeToInsertUnder->setChild(0, firstn);
+        }
 
       // reset firstN indication in the root node.
       setFirstNRows(-1);
@@ -14652,12 +14698,9 @@
 
   if (! describedTableName_.getQualifiedNameObj().getObjectName().isNull())
     {
-      if ((getFormat() >= CONTROL_FIRST_) &&
-          (getFormat() <= CONTROL_LAST_))
-        {
+       if (getIsControl())
           describedTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
-        }
-      else
+        if (NOT getIsControl())
         {
           // do not override schema for showddl
           bindWA->setToOverrideSchema(FALSE);  
@@ -14666,27 +14709,20 @@
           // describedTableName_ is qualified by getNATable
           if (describedTableName_.getQualifiedNameObj().getSchemaName().isNull())
             setToTryPublicSchema(TRUE);
-      
-          bindWA->getNATable(describedTableName_);
-          if (bindWA->errStatus()) 
+
+          if ((getFormat() == Describe::INVOKE_) ||
+              (getFormat() == Describe::SHOWDDL_) &&
+              (getLabelAnsiNameSpace() == COM_TABLE_NAME) &&
+              (NOT getIsSchema()))
             {
-              // if volatile related error, return it.
-              // Otherwise, clear diags and let this error be caught
-              // when describe is executed.
-              if ((CmpCommon::diags()->mainSQLCODE() == -4190) ||
-                  (CmpCommon::diags()->mainSQLCODE() == -4191) ||
-                  (CmpCommon::diags()->mainSQLCODE() == -4192) ||
-                  (CmpCommon::diags()->mainSQLCODE() == -4193) ||
-                  (CmpCommon::diags()->mainSQLCODE() == -4155) || // define not supported
-                  (CmpCommon::diags()->mainSQLCODE() == -4086) || // catch Define Not Found error
-                  (CmpCommon::diags()->mainSQLCODE() == -30044)|| // default schema access error
-                  (CmpCommon::diags()->mainSQLCODE() == -4261) || // reserved schema
-                  (CmpCommon::diags()->mainSQLCODE() == -1398))   // uninit hbase
-                    return this;
-      
-              CmpCommon::diags()->clear();
-              bindWA->resetErrStatus();
+              bindWA->getNATableInternal(describedTableName_);
+              if (bindWA->errStatus())
+                {
+                  return this;
+                }
             }
+          else
+            describedTableName_.applyDefaults(bindWA, bindWA->getDefaultSchema());
         }
       if (pUUDFName_ NEQ NULL AND NOT pUUDFName_->getObjectName().isNull())
       {
@@ -17226,6 +17262,18 @@
           for (int i=0; i<getArity(); i++)
             result->child(i) = child(i);
 
+          if (opType == REL_TABLE_MAPPING_BUILTIN_LOG_READER ||
+              opType == REL_TABLE_MAPPING_BUILTIN_JDBC)
+            {
+              // The event log reader and JDBC TMUDFs are being migrated
+              // to real UDFs, use of the predefined UDFs is deprecated.
+              // Issue a warning. Eventually these predefined functions
+              // will be removed.
+              (*CmpCommon::diags())
+                << DgSqlCode(4323)
+                << DgString0(tmfuncName.getExposedNameAsAnsiString());
+            }
+
           // Abandon the current node and return the bound new node.
           // Next time it will reach this method it will call an
           // overloaded getRoutineMetadata() that will succeed.
diff --git a/core/sql/optimizer/HDFSHook.cpp b/core/sql/optimizer/HDFSHook.cpp
index dfc80ad..4082679 100644
--- a/core/sql/optimizer/HDFSHook.cpp
+++ b/core/sql/optimizer/HDFSHook.cpp
@@ -29,6 +29,7 @@
 
 // for DNS name resolution
 #include <netdb.h>
+#include "HdfsClient_JNI.h"
 #include "Globals.h"
 #include "Context.h"
 // Initialize static variables
@@ -894,7 +895,13 @@
       // put back fully qualified URI
       tableDir = hsd->location_;
 
-      // visit the directory
+      // get the fine-resolution timestamp before visiting
+      // the tree, to avoid losing any updates while this
+      // method is executing
+      computeModificationTSmsec();
+
+      if (diags_.isSuccess())
+        // visit the directory
       processDirectory(tableDir, hsd->buckets_, 
                        hsd->isTrulyText(), 
                        hsd->getRecordTerminator());
@@ -1148,6 +1155,32 @@
   // is dropped or the thread exits.
 }
 
+void HHDFSTableStats::computeModificationTSmsec()
+{
+  if (modificationTSInMillisec_ <= 0)
+    {
+      HDFS_Client_RetCode rc;
+
+      // get a millisecond-resolution timestamp via JNI
+      rc = HdfsClient::getHiveTableMaxModificationTs(
+               modificationTSInMillisec_,
+               tableDir_.data(),
+               numOfPartCols_);
+      // check for errors and timestamp mismatches
+      if (rc != HDFS_CLIENT_OK || modificationTSInMillisec_ <= 0)
+        {
+          NAString errMsg;
+
+          errMsg.format("Error %d when reading msec timestamp for HDFS URL %s",
+                        rc,
+                        tableDir_.data());
+          diags_.recordError(errMsg, "HHDFSTableStats::computeModificationTSmsec");
+          modificationTSInMillisec_ = -1;
+        }
+    }
+
+  return;
+}
 
 OsimHHDFSStatsBase* HHDFSTableStats::osimSnapShot(NAMemory * heap)
 {
diff --git a/core/sql/optimizer/HDFSHook.h b/core/sql/optimizer/HDFSHook.h
index f6f64fa..1aec212 100644
--- a/core/sql/optimizer/HDFSHook.h
+++ b/core/sql/optimizer/HDFSHook.h
@@ -319,7 +319,11 @@
                                     validationJTimestamp_(-1),
                                     listPartitionStatsList_(heap),
                                     hiveStatsSize_(0),
-                                    heap_(heap) {}
+                                    heap_(heap),
+                                    type_(UNKNOWN_),
+                                    modificationTSInMillisec_(-1)
+  {}
+
   ~HHDFSTableStats();
 
   const CollIndex entries() const          { return listPartitionStatsList_.entries(); }
@@ -393,6 +397,12 @@
   const Lng32 numOfPartCols() const { return numOfPartCols_; }
   const Lng32 totalNumPartitions() const { return totalNumPartitions_; }
 
+  // finer-resolution timestamp for entire table
+  // (can remove this once we use JNI to collect this info
+  // for all HDFS files)
+  Int64 getModificationTSmsec() const { return modificationTSInMillisec_; }
+  void computeModificationTSmsec();
+
 private:
   enum FileType
   {
@@ -443,8 +453,10 @@
   HHDFSDiags diags_;
 
   NAMemory *heap_;
-
+ 
   FileType type_;
+
+  Int64 modificationTSInMillisec_;
 };
 
 #endif
diff --git a/core/sql/optimizer/Inlining.cpp b/core/sql/optimizer/Inlining.cpp
index f8d7b66..e93e33b 100644
--- a/core/sql/optimizer/Inlining.cpp
+++ b/core/sql/optimizer/Inlining.cpp
@@ -1888,7 +1888,7 @@
 			     NABoolean isIMInsert,
 			     NABoolean useInternalSyskey,
                              NABoolean isForUpdateOrMergeUpdate,
-                             NABoolean isForMerge, // mergeDelete OR mergeUpdate
+                             NABoolean mergeDeleteWithInsertOrMergeUpdate,
 			     NABoolean isEffUpsert)
 {
    
@@ -1906,7 +1906,8 @@
   // that correspond to the base table. Hence we introduce 
   // robustDelete below. This flag could also be called 
   // isIMOnAUniqueIndexForMerge
-  NABoolean robustDelete = (isForMerge && index->isUniqueIndex()) || (isEffUpsert && index->isUniqueIndex());
+  NABoolean robustDelete = (mergeDeleteWithInsertOrMergeUpdate && index->isUniqueIndex()) || 
+                           (isEffUpsert && index->isUniqueIndex());
 
   tableCorrName.setCorrName(isIMInsert ?  NEWCorr : OLDCorr);
   
@@ -2127,8 +2128,15 @@
   if (getOperatorType() == REL_UNARY_DELETE ||
       getOperatorType() == REL_UNARY_UPDATE ||
       isEffUpsert)
-    
-    indexDelete = indexOp = createIMNode(bindWA,
+    {
+      NABoolean mergeDeleteWithInsertOrMergeUpdate = isMerge();
+      if (mergeDeleteWithInsertOrMergeUpdate && 
+          (getOperatorType() == REL_UNARY_DELETE) && 
+          (!insertValues()))
+        // merge delete without an insert
+        mergeDeleteWithInsertOrMergeUpdate = FALSE;
+      
+      indexDelete = indexOp = createIMNode(bindWA,
 					 tableCorrName,
                                          indexCorrName,
 					 index,
@@ -2136,8 +2144,9 @@
 					 FALSE,
     			       		 useInternalSyskey,
                                          isForUpdateOrMergeUpdate,
-                                         isMerge(),
+                                         mergeDeleteWithInsertOrMergeUpdate,
 					 isEffUpsert);
+    }
 
   if ((getOperatorType() == REL_UNARY_UPDATE) || isEffUpsert){
     indexOp = new (bindWA->wHeap()) Union(indexDelete, indexInsert, 
diff --git a/core/sql/optimizer/NATable.cpp b/core/sql/optimizer/NATable.cpp
index d657a48..cb0cc39 100644
--- a/core/sql/optimizer/NATable.cpp
+++ b/core/sql/optimizer/NATable.cpp
@@ -87,6 +87,7 @@
 #define MAX_NODE_NAME 9
 
 #include "SqlParserGlobals.h"
+#include "HdfsClient_JNI.h"
 
 //#define __ROSETTA
 //#include "rosetta_ddl_include.h"
diff --git a/core/sql/optimizer/OptimizerSimulator.cpp b/core/sql/optimizer/OptimizerSimulator.cpp
index 53af84d..83e69e6 100644
--- a/core/sql/optimizer/OptimizerSimulator.cpp
+++ b/core/sql/optimizer/OptimizerSimulator.cpp
@@ -55,7 +55,6 @@
 #include "HBaseClient_JNI.h"
 
 #include "vproc.h"
-#include "hdfs.h"
 #include "CmpSeabaseDDL.h"
 #include "ExExeUtilCli.h"
 #include "ComUser.h"
@@ -1883,7 +1882,7 @@
    retcode = cliInterface_->fetchAllRows(queue_, query, 0, FALSE, FALSE, TRUE);
    //retrieve idag area runing the query above,
    //if there's any error, we can get the detail.
-   cliInterface_->retrieveSQLDiagnostics(0);
+   cliInterface_->retrieveSQLDiagnostics(CmpCommon::diags());
 
    cmpSBD_->switchBackCompiler();
    
diff --git a/core/sql/optimizer/RelExpr.cpp b/core/sql/optimizer/RelExpr.cpp
index 5c0af91..5096b8b 100644
--- a/core/sql/optimizer/RelExpr.cpp
+++ b/core/sql/optimizer/RelExpr.cpp
@@ -7812,6 +7812,8 @@
   myLocalExpr += child(0).getGroupAttr()->getCharacteristicInputs();
   myLocalExpr += groupExpr();
   myLocalExpr += aggregateExpr();
+  // make sure we can still produce our characteristic outputs too
+  myLocalExpr += getGroupAttr()->getCharacteristicOutputs();
           
   // consider only preds that we can evaluate in the parent
   if (optionalMap)
@@ -11388,8 +11390,9 @@
   pMvBindContextForScope_ = pMvBindContext;
 }
 
-void RelRoot::addOneRowAggregates(BindWA* bindWA)
+NABoolean RelRoot::addOneRowAggregates(BindWA* bindWA, NABoolean forceGroupByAgg)
 {
+  NABoolean groupByAggNodeAdded = FALSE;
   RelExpr * childOfRoot = child(0);
   GroupByAgg *aggNode = NULL;
   // If the One Row Subquery is already enforced by a scalar aggregate
@@ -11404,7 +11407,11 @@
   // way out and add a one row aggregate.
   // Also if the groupby is non scalar then we need to add a one row aggregate.
   // Also if we have select max(a) + select b from t1 from t2;
-  if (childOfRoot->getOperatorType() == REL_GROUPBY)
+  // Still another exception is if there is a [last 0] on top of this node. We
+  // need an extra GroupByAgg node with one row aggregates in this case so
+  // we can put the FirstN node underneath that.
+  if (!forceGroupByAgg &&
+      (childOfRoot->getOperatorType() == REL_GROUPBY))
     {
       aggNode = (GroupByAgg *)childOfRoot;
 
@@ -11421,7 +11428,7 @@
 
     }
   if (aggNode)
-    return ;
+    return groupByAggNodeAdded;
 
   const RETDesc *oldTable = getRETDesc();
   RETDesc *resultTable = new(bindWA->wHeap()) RETDesc(bindWA);
@@ -11469,9 +11476,12 @@
 
   newGrby->bindNode(bindWA) ;
   child(0) = newGrby ;
+  groupByAggNodeAdded = TRUE;
   // Set the return descriptor
   //
   setRETDesc(resultTable);
+
+  return groupByAggNodeAdded;
 }
 // -----------------------------------------------------------------------
 // member functions for class PhysicalRelRoot
diff --git a/core/sql/optimizer/RelMisc.h b/core/sql/optimizer/RelMisc.h
index e6920ca..9ea9782 100644
--- a/core/sql/optimizer/RelMisc.h
+++ b/core/sql/optimizer/RelMisc.h
@@ -521,7 +521,8 @@
   // defined in generator/GenRelMisc.cpp
   TrafQuerySimilarityInfo * genSimilarityInfo(Generator *generator);
 
-  void addOneRowAggregates(BindWA * bindWA);
+  // returns TRUE if a GroupByAgg node was added
+  NABoolean addOneRowAggregates(BindWA * bindWA, NABoolean forceGroupByAgg);
 
   inline void setNumBMOs(unsigned short num) { numBMOs_ = num; }
   inline unsigned short getNumBMOs() { return numBMOs_; }
diff --git a/core/sql/optimizer/RelScan.h b/core/sql/optimizer/RelScan.h
index 58af7cd..c026914 100644
--- a/core/sql/optimizer/RelScan.h
+++ b/core/sql/optimizer/RelScan.h
@@ -882,7 +882,8 @@
                              char* &hdfsHostName,
                              Int32 &hdfsPort,
                              NABoolean &doMultiCursor,
-                             NABoolean &doSplitFileOpt);
+                             NABoolean &doSplitFileOpt,
+                             NABoolean &isHdfsCompressed);
   static short genForOrc(Generator * generator,
                          const HHDFSTableStats* hTabStats,
                          const PartitioningFunction * mypart,
@@ -1614,7 +1615,7 @@
     INVOKE_,  // describe sql/mp INVOKE style
     SHOWSTATS_, //display histograms for specified table
     SHORT_,   // just show ddl for table
-    LONG_,    // show everything about this table (ddl, indexes, views, etc)
+    SHOWDDL_,    // show everything about this table (ddl, indexes, views, etc)
     PLAN_,    // return information about runtime plan. Currently, details
               // about expressions and clauses are the only info returned.
               // For internal debugging use only. Not externalized to users.
@@ -1657,7 +1658,7 @@
 
   Describe(char * originalQuery,
            const CorrName &describedTableName,
-           Format format = LONG_,
+           Format format = SHOWDDL_,
            ComAnsiNameSpace labelAnsiNameSpace = COM_TABLE_NAME,
            ULng32 flags = 0,
 	   NABoolean header = TRUE)
@@ -1688,7 +1689,7 @@
 
   Describe(char * originalQuery,
            const SchemaName &schemaName,
-           Format format = LONG_,
+           Format format = SHOWDDL_,
            ULng32 flags = 0,
 	   NABoolean header = TRUE)
     : Scan(REL_DESCRIBE),
@@ -1731,7 +1732,7 @@
   Describe(char * originalQuery,
            ComIdClass authIDClass,
            const NAString &authIDName,
-           Format format = LONG_,
+           Format format = SHOWDDL_,
            ULng32 flags = 0,
            NABoolean header = TRUE)
     : Scan(REL_DESCRIBE),
@@ -1764,7 +1765,7 @@
   // constructor used for SHOWDDL USER and SHOWDDL ROLE
   Describe(char * originalQuery,
            const NAString &componentName,
-           Format format = LONG_,
+           Format format = SHOWDDL_,
            ULng32 flags = 0,
            NABoolean header = TRUE)
     : Scan(REL_DESCRIBE),
@@ -1873,6 +1874,9 @@
     return labelAnsiNameSpace_;
   }
 
+  NABoolean getIsControl() const { return ((getFormat() >= CONTROL_FIRST_) &&
+                                           (getFormat() <= CONTROL_LAST_)); }
+
   // TRUE  => output detail (long) label info
   // FALSE => output short label info
   NABoolean getLabelDetail() const
@@ -2007,7 +2011,7 @@
       format_ == INVOKE_    ||
       format_ == SHOWSTATS_ ||
       format_ == SHORT_     ||
-      format_ == LONG_      ||
+      format_ == SHOWDDL_   ||
       format_ == LABEL_) ;
   }
 
diff --git a/core/sql/optimizer/ScanOptimizer.cpp b/core/sql/optimizer/ScanOptimizer.cpp
index 9d859f5..c9b40aa 100644
--- a/core/sql/optimizer/ScanOptimizer.cpp
+++ b/core/sql/optimizer/ScanOptimizer.cpp
@@ -10350,10 +10350,11 @@
   CostScalar costAdj = (ActiveSchemaDB()->getDefaults()).getAsDouble(NCM_MDAM_COST_ADJ_FACTOR);
   scmCost_->cpScmlr().scaleByValue(costAdj);
 
-  // If the cost exceeds the bound, MDAM loses
+  // If MDAM is not forced and the cost exceeds the bound, MDAM loses
 
-  if (costBoundPtr_ != NULL &&
-      costBoundPtr_->scmCompareCosts(*scmCost_) == LESS)
+  if (!mdamForced_ &&
+      (costBoundPtr_ != NULL) &&
+      (costBoundPtr_->scmCompareCosts(*scmCost_) == LESS))
   {
     mdamWon_ = FALSE;
     MDAM_DEBUG0(MTL2, "Mdam scan lost due to higher cost determined by scmCompareCosts()");
diff --git a/core/sql/optimizer/mdam.cpp b/core/sql/optimizer/mdam.cpp
index 4570fc9..0a26267 100644
--- a/core/sql/optimizer/mdam.cpp
+++ b/core/sql/optimizer/mdam.cpp
@@ -2002,18 +2002,26 @@
 	      inVidset.next(predId);
 	      inVidset.advance(predId) )
 	 {
-            if(predId.getItemExpr()->getOperatorType() == ITM_RANGE_SPEC_FUNC )
-            {
-	       if(predId.getItemExpr()->child(1)->getOperatorType() == ITM_AND ){
-	          predId.getItemExpr()->child(1)->convertToValueIdSet(parsedVs, NULL, ITM_AND, FALSE);
-		    outVidset +=parsedVs;
-            }
-	    else if(predId.getItemExpr()->child(1)->getOperatorType() != ITM_AND 
-			 && predId.getItemExpr()->child(1)->getOperatorType() != ITM_OR)
-	       outVidset += predId.getItemExpr()->child(1)->castToItemExpr()->getValueId();	    
+           //TRAFODION-2988
+           if( ITM_RANGE_SPEC_FUNC == predId.getItemExpr()->getOperatorType() )
+           {   
+               if( ITM_AND == predId.getItemExpr()->child(1)->getOperatorType() )
+               {   
+                   predId.getItemExpr()->child(1)->convertToValueIdSet(parsedVs, NULL, ITM_AND, FALSE);
+                   outVidset +=parsedVs;
+               }
+               else if( ITM_OR == predId.getItemExpr()->child(1)->getOperatorType() )
+               {//add deal with OR operator   
+                   outVidset +=predId;
+               }
+               else
+               {   
+                   outVidset += predId.getItemExpr()->child(1)->castToItemExpr()->getValueId();
+               }
            }
-	   else
-	     outVidset +=predId;
+           else
+               outVidset +=predId;
+           //TRAFODION-2988
 	 parsedVs.clear();
 	}
 
diff --git a/core/sql/parser/SqlParserAux.cpp b/core/sql/parser/SqlParserAux.cpp
index c73d78d..02f595c 100644
--- a/core/sql/parser/SqlParserAux.cpp
+++ b/core/sql/parser/SqlParserAux.cpp
@@ -3231,7 +3231,7 @@
     pDescribe = new (PARSERHEAP())
       Describe ( SQLTEXT()
                , *optional_showddl_action_name_clause // in - const CorrName & - deep copy
-               , Describe::LONG_
+               , Describe::SHOWDDL_
                , COM_UUDF_ACTION_NAME                 // in - ComAnsiNameSpace labelAnsiNameSpace_
                , optional_showddlroutine_options      // in - long optional_showddlroutine_options
                );
@@ -3242,7 +3242,7 @@
     pDescribe = new (PARSERHEAP())
       Describe ( SQLTEXT()
                , *actual_routine_name_of_udf_or_uudf // in - const CorrName & - deep copy
-               , Describe::LONG_
+               , Describe::SHOWDDL_
                , COM_UDF_NAME                        // in - ComAnsiNameSpace labelAnsiNameSpace_
                , optional_showddlroutine_options     // in - long optional_showddlroutine_options
                );
diff --git a/core/sql/parser/sqlparser.y b/core/sql/parser/sqlparser.y
index 69df367..d7ea7f2 100755
--- a/core/sql/parser/sqlparser.y
+++ b/core/sql/parser/sqlparser.y
@@ -22695,7 +22695,7 @@
 	     {
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$2, Describe::LONG_,
+			 Describe(SQLTEXT(), *$2, Describe::SHOWDDL_,
 			          COM_TABLE_NAME, $3/*optional_sqlmp_option*/),
 			 REL_ROOT,	
 			 new (PARSERHEAP())
@@ -22711,7 +22711,7 @@
 	     {
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$3, Describe::LONG_, 
+			 Describe(SQLTEXT(), *$3, Describe::SHOWDDL_, 
 			          COM_TABLE_NAME, $4/*optional_sqlmp_option*/),
 			 REL_ROOT,	
 			 new (PARSERHEAP())
@@ -22724,7 +22724,7 @@
                  ->getQualifiedNameObj().setObjectNameSpace(COM_UDF_NAME);
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$3/*actual_routine_name*/, Describe::LONG_, 
+			 Describe(SQLTEXT(), *$3/*actual_routine_name*/, Describe::SHOWDDL_, 
 			          COM_UDF_NAME, $4/*optional_showddl_options_lsit*/),
 			 REL_ROOT,
 			 new (PARSERHEAP())
@@ -22734,7 +22734,7 @@
 	     {
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$3, Describe::LONG_, $4),
+			 Describe(SQLTEXT(), *$3, Describe::SHOWDDL_, $4),
 			 REL_ROOT,	
 			 new (PARSERHEAP())
 			 ColReference(new (PARSERHEAP()) ColRefName(TRUE, PARSERHEAP())));
@@ -22743,7 +22743,7 @@
             {
               $$ = new (PARSERHEAP())
                 RelRoot(new (PARSERHEAP())
-                  Describe(SQLTEXT(), COM_USER_CLASS, *$3, Describe::LONG_),
+                  Describe(SQLTEXT(), COM_USER_CLASS, *$3, Describe::SHOWDDL_),
                   REL_ROOT,
                   new (PARSERHEAP())
                   ColReference(new (PARSERHEAP()) ColRefName(TRUE, PARSERHEAP()))); 
@@ -22753,7 +22753,7 @@
             {
               $$ = new (PARSERHEAP())
                 RelRoot(new (PARSERHEAP())
-                  Describe(SQLTEXT(), COM_ROLE_CLASS, *$2, Describe::LONG_, $3),
+                  Describe(SQLTEXT(), COM_ROLE_CLASS, *$2, Describe::SHOWDDL_, $3),
                   REL_ROOT,
                   new (PARSERHEAP())
                   ColReference(new (PARSERHEAP()) ColRefName(TRUE, PARSERHEAP())));
@@ -22776,7 +22776,7 @@
                    ->getQualifiedNameObj().setObjectNameSpace(COM_LIBRARY_NAME); 
   	         $$ = new (PARSERHEAP())
         		 RelRoot(new (PARSERHEAP()) 
-    	  		 Describe(SQLTEXT(), *$2, Describe::LONG_, COM_LIBRARY_NAME, $3),
+    	  		 Describe(SQLTEXT(), *$2, Describe::SHOWDDL_, COM_LIBRARY_NAME, $3),
   	  	           	 REL_ROOT, new (PARSERHEAP())
   			     ColReference(new (PARSERHEAP()) ColRefName(TRUE, PARSERHEAP())));
   	         delete $2; // CorrName * qualified_name
@@ -22786,7 +22786,7 @@
 	     {
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$2, Describe::LONG_, 
+			 Describe(SQLTEXT(), *$2, Describe::SHOWDDL_, 
 			          COM_SEQUENCE_GENERATOR_NAME, $3),
 			 REL_ROOT,	
 			 new (PARSERHEAP())
@@ -22803,7 +22803,7 @@
 		 ->getQualifiedNameObj().setObjectNameSpace(COM_UDF_NAME); // SPJ
 	       $$ = new (PARSERHEAP())
 		 RelRoot(new (PARSERHEAP())
-			 Describe(SQLTEXT(), *$3, Describe::LONG_, COM_UDF_NAME, $4),
+			 Describe(SQLTEXT(), *$3, Describe::SHOWDDL_, COM_UDF_NAME, $4),
 			 REL_ROOT,	
 			 new (PARSERHEAP())
 			 ColReference(new (PARSERHEAP()) ColRefName(TRUE, PARSERHEAP())));
diff --git a/core/sql/regress/core/EXPECTED002.LINUX b/core/sql/regress/core/EXPECTED002.LINUX
index 6d2a190..fd9659c 100644
--- a/core/sql/regress/core/EXPECTED002.LINUX
+++ b/core/sql/regress/core/EXPECTED002.LINUX
@@ -21,6 +21,13 @@
 --- SQL operation complete.
 >>#ifMX
 >>
+>>create table t002main (a int not null, b int, primary key (a));
+
+--- SQL operation complete.
+>>create table t002sub  (x int not null, y int, primary key (x));
+
+--- SQL operation complete.
+>>
 >>?section dml
 >>-- INSERT queries
 >>insert into t002t1 values (10, 'abc', 20, 'xy');
@@ -48,6 +55,13 @@
 --- 2 row(s) inserted.
 >>#ifMX
 >>
+>>insert into t002main values (1,1);
+
+--- 1 row(s) inserted.
+>>insert into t002sub values (1,1), (2,1);
+
+--- 2 row(s) inserted.
+>>
 >>?section subqtests
 >>-- Expect 2 identical rows saying "2 2";
 >>-- thus certain queries following expect 2 identical rows
@@ -464,22 +478,16 @@
 >>
 >>
 >>------------------------------------------------------------------------
->>?section Genesis_10_000222_6892_p1
->>SELECT 1 FROM T002T3 T1
-+>GROUP BY T1.A
-+>HAVING T1.A >ANY
-+>  ( SELECT 2 FROM T002T1 T2
-+>    WHERE T2.C >SOME
-+>      ( SELECT AVG (T1.A) FROM T002T1 T3 )
-+>  );
-
-(EXPR)
-------
-
-     1
-     1
-
---- 2 row(s) selected.
+>>-- This test disabled since it is non -deterministic.
+>>-- It will be enabled after further investigation.
+>>-- ?section Genesis_10_000222_6892_p1
+>>-- SELECT 1 FROM T002T3 T1
+>>-- GROUP BY T1.A
+>>-- HAVING T1.A >ANY
+>>--  ( SELECT 2 FROM T002T1 T2
+>>--    WHERE T2.C >SOME
+>>--      ( SELECT AVG (T1.A) FROM T002T1 T3 )
+>>--  );
 >>
 >>?section Genesis_10_000222_6892_p2
 >>SELECT 1 FROM T002T1 T1
@@ -1149,4 +1157,111 @@
 >> -- cnt
 >>------------------------------------------------------------------------
 >>
+>>-- Tests of the interaction of [first n] etc. with subqueries
+>>
+>>-- Should return 1
+>>select 
++>(select [FIRST 1] y aa from t002sub b where b.x = a.b) as result_value
++>from t002main a;
+
+RESULT_VALUE
+------------
+
+           1
+
+--- 1 row(s) selected.
+>>
+>>-- Should return null
+>>select 
++>(select [last 0] y aa from t002sub b where b.x = a.b) as result_value
++>from t002main a;
+
+RESULT_VALUE
+------------
+
+           ?
+
+--- 1 row(s) selected.
+>>
+>>-- Should get a cardinality violation (error 8401)
+>>select 
++>(select [first 2] y aa from t002sub b where b.y = a.b) as result_value
++>from t002main a;
+
+*** ERROR[8401] A row subquery or SELECT...INTO statement cannot return more than one row.
+
+--- 0 row(s) selected.
+>>
+>>-- Should return 1
+>>select 
++>(select [first 1] y aa from t002sub b where b.y = a.b) as result_value
++>from t002main a;
+
+RESULT_VALUE
+------------
+
+           1
+
+--- 1 row(s) selected.
+>>
+>>-- Should return 1
+>>select 
++>(select [last 1] y aa from t002sub b where b.y = a.b) as result_value
++>from t002main a;
+
+RESULT_VALUE
+------------
+
+           1
+
+--- 1 row(s) selected.
+>>
+>>-- Should return null
+>>select 
++>(select [last 0] y aa from t002sub b where b.y = a.b) as result_value
++>from t002main a;
+
+RESULT_VALUE
+------------
+
+           ?
+
+--- 1 row(s) selected.
+>>
+>>-- Should return null
+>>select
++>(select [last 0] count(*) from t002sub) as result_value
++>from t002main;
+
+RESULT_VALUE        
+--------------------
+
+                   ?
+
+--- 1 row(s) selected.
+>>
+>>-- Should return 2
+>>select
++>(select [first 20] count(*) from t002sub) as result_value
++>from t002main;
+
+RESULT_VALUE        
+--------------------
+
+                   2
+
+--- 1 row(s) selected.
+>>
+>>-- Should return null
+>>select
++>(select [last 0] x from t002sub) as result_value
++>from t002main;
+
+RESULT_VALUE
+------------
+
+           ?
+
+--- 1 row(s) selected.
+>>
 >>log;
diff --git a/core/sql/regress/core/EXPECTED116 b/core/sql/regress/core/EXPECTED116
index fe69776..0d65c19 100644
--- a/core/sql/regress/core/EXPECTED116
+++ b/core/sql/regress/core/EXPECTED116
@@ -710,7 +710,8 @@
 
 *** ERROR[4082] Object TRAFODION.SCH.T116T2 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>create table t116t2 (a int);
 
 --- SQL operation complete.
@@ -732,7 +733,8 @@
 
 *** ERROR[4082] Object TRAFODION.SCH.T116T2 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>create table t116t2 (a int);
 
 --- SQL operation complete.
diff --git a/core/sql/regress/core/TEST002 b/core/sql/regress/core/TEST002
index 5d0889e..34529c7 100755
--- a/core/sql/regress/core/TEST002
+++ b/core/sql/regress/core/TEST002
@@ -50,6 +50,9 @@
 create table t002ut2 (a int not null, b nchar(9), c int, d nchar(4), primary key (a));
 #ifMX
 
+create table t002main (a int not null, b int, primary key (a));
+create table t002sub  (x int not null, y int, primary key (x));
+
 ?section dml
 -- INSERT queries
 insert into t002t1 values (10, 'abc', 20, 'xy');
@@ -63,6 +66,9 @@
 insert into t002ut2 select * from t002ut1;
 #ifMX
 
+insert into t002main values (1,1);
+insert into t002sub values (1,1), (2,1);
+
 ?section subqtests
 -- Expect 2 identical rows saying "2 2";
 -- thus certain queries following expect 2 identical rows
@@ -533,6 +539,53 @@
 select COUNT(O.X) from t002FU O having exists(select COUNT(I.X) from t002FUI I); -- cnt
 ------------------------------------------------------------------------
 
+-- Tests of the interaction of [first n] etc. with subqueries
+
+-- Should return 1
+select 
+(select [FIRST 1] y aa from t002sub b where b.x = a.b) as result_value
+from t002main a;
+
+-- Should return null
+select 
+(select [last 0] y aa from t002sub b where b.x = a.b) as result_value
+from t002main a;
+
+-- Should get a cardinality violation (error 8401)
+select 
+(select [first 2] y aa from t002sub b where b.y = a.b) as result_value
+from t002main a;
+
+-- Should return 1
+select 
+(select [first 1] y aa from t002sub b where b.y = a.b) as result_value
+from t002main a;
+
+-- Should return 1
+select 
+(select [last 1] y aa from t002sub b where b.y = a.b) as result_value
+from t002main a;
+
+-- Should return null
+select 
+(select [last 0] y aa from t002sub b where b.y = a.b) as result_value
+from t002main a;
+
+-- Should return null
+select
+(select [last 0] count(*) from t002sub) as result_value
+from t002main;
+
+-- Should return 2
+select
+(select [first 20] count(*) from t002sub) as result_value
+from t002main;
+
+-- Should return null
+select
+(select [last 0] x from t002sub) as result_value
+from t002main;
+
 log;
 obey TEST002(clnup);
 exit;
@@ -554,6 +607,9 @@
 drop table t002ut2;
 #ifMX
 
+drop table t002main;
+drop table t002sub;
+
 ?section clnup_end
 
 
diff --git a/core/sql/regress/executor/EXPECTED001 b/core/sql/regress/executor/EXPECTED001
index 38fd155..39ac8ac 100755
--- a/core/sql/regress/executor/EXPECTED001
+++ b/core/sql/regress/executor/EXPECTED001
@@ -50,7 +50,8 @@
 
 *** ERROR[4082] Object TRAFODION.SCH.T001TN does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>invoke t001t2;
 
 -- Definition of Trafodion table TRAFODION.SCH.T001T2
diff --git a/core/sql/regress/executor/EXPECTED013.SB b/core/sql/regress/executor/EXPECTED013.SB
index 5c0011a..db07760 100644
--- a/core/sql/regress/executor/EXPECTED013.SB
+++ b/core/sql/regress/executor/EXPECTED013.SB
@@ -801,7 +801,8 @@
 
 *** ERROR[4082] Object TRAFODION.T013SCH1.T013T1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>
 >>-- store by a, 4 partns
diff --git a/core/sql/regress/hive/EXPECTED007 b/core/sql/regress/hive/EXPECTED007
index 2162198..2b0fd34 100644
--- a/core/sql/regress/hive/EXPECTED007
+++ b/core/sql/regress/hive/EXPECTED007
@@ -956,7 +956,7 @@
 
 *** WARNING[8597] Statement was automatically retried 1 time(s). Delay before each retry was 0 seconds. See next entry for the error that caused this retry.
 
-*** WARNING[8436] Mismatch detected between compiletime and runtime hive table definitions. DataModMismatchDetails: compiledModTS = 1508360788, failedModTS = -1, failedLoc = hdfs://localhost:25600/user/hive/warehouse/hivesch007.db/thive1
+*** WARNING[8577] Table, index, or view HIVE.HIVESCH007.THIVE1 was not found.
 
 --- 0 row(s) selected.
 >>
diff --git a/core/sql/regress/hive/EXPECTED018 b/core/sql/regress/hive/EXPECTED018
index d693a56..6f100f1 100644
--- a/core/sql/regress/hive/EXPECTED018
+++ b/core/sql/regress/hive/EXPECTED018
@@ -2376,7 +2376,7 @@
 Task: UNLOAD           Status: Started
 Task:  EXTRACT         Status: Started    Time: 2018-02-15 18:12:42.125129
 
-*** ERROR[8447] An error occurred during hdfs access. Error Detail: Java exception in hdfsCreate(). java.io.IOException: No FileSystem for scheme: null
+*** ERROR[8447] An error occurred during hdfs access. Error Detail: Java exception in HdfsClient::hdfsOpen(). java.io.IOException: No FileSystem for scheme: null
 org.apache.hadoop.fs.FileSystem.getFileSystemClass(FileSystem.java:2584)
 org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2591)
 org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:91)
diff --git a/core/sql/regress/hive/EXPECTED040 b/core/sql/regress/hive/EXPECTED040
index 8fe3212..bb75200 100644
--- a/core/sql/regress/hive/EXPECTED040
+++ b/core/sql/regress/hive/EXPECTED040
@@ -252,7 +252,7 @@
 moveExprColsTuppIndex_ = 2, moveExprColsRowLength_ = 576
 convertSkipListSize_ = 33, convertSkipList_ = 3
 outputRowLength_ = 16
-Flag = 0xc
+Flag = 0x20c
 
 Number of ranges to scan: 1
 Number of esps to scan:    1
@@ -390,7 +390,7 @@
 moveExprColsTuppIndex_ = 2, moveExprColsRowLength_ = 16
 convertSkipListSize_ = 33, convertSkipList_ = 3
 outputRowLength_ = 16
-Flag = 0xc
+Flag = 0x20c
 
 Number of ranges to scan: 1
 Number of esps to scan:    1
@@ -546,7 +546,7 @@
 moveExprColsTuppIndex_ = 2, moveExprColsRowLength_ = 8
 convertSkipListSize_ = 33, convertSkipList_ = 2
 outputRowLength_ = 8
-Flag = 0xc
+Flag = 0x20c
 
 Number of ranges to scan: 1
 Number of esps to scan:    1
@@ -682,7 +682,7 @@
 moveExprColsTuppIndex_ = 2, moveExprColsRowLength_ = 8
 convertSkipListSize_ = 33, convertSkipList_ = 2
 outputRowLength_ = 8
-Flag = 0xc
+Flag = 0x20c
 
 Number of ranges to scan: 1
 Number of esps to scan:    1
diff --git a/core/sql/regress/privs1/EXPECTED123 b/core/sql/regress/privs1/EXPECTED123
index bf848b4..0251bda 100644
--- a/core/sql/regress/privs1/EXPECTED123
+++ b/core/sql/regress/privs1/EXPECTED123
@@ -262,6 +262,8 @@
 Privileges for User SQL_USER1
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 --DU---    TRAFODION.T123SCH.GAMES
 -I-----    TRAFODION.T123SCH.GAMES
 SIDU-R-    TRAFODION.T123SCH.PLAYERS
@@ -280,6 +282,8 @@
 Privileges for User SQL_USER2
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 -I-----    TRAFODION.T123SCH.GAMES
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
@@ -295,6 +299,8 @@
 Privileges for User SQL_USER3
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -307,6 +313,8 @@
 Privileges for User SQL_USER4
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.GAMES
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
@@ -321,6 +329,8 @@
 Privileges for User SQL_USER5
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 SIDU-R-    TRAFODION.T123SCH.GAMES
 SIDU-R-    TRAFODION.T123SCH.PLAYERS
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
@@ -377,6 +387,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -488,6 +500,8 @@
 Privileges for User SQL_USER1
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 --DU---    TRAFODION.T123SCH.GAMES
 -I-----    TRAFODION.T123SCH.GAMES
 SIDU-R-    TRAFODION.T123SCH.PLAYERS
@@ -544,6 +558,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -655,6 +671,8 @@
 Privileges for User SQL_USER2
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 -I-----    TRAFODION.T123SCH.GAMES
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
@@ -699,6 +717,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -806,6 +826,8 @@
 Privileges for User SQL_USER3
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -837,6 +859,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -947,6 +971,8 @@
 Privileges for User SQL_USER4
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.GAMES
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
@@ -977,6 +1003,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
@@ -1097,6 +1125,8 @@
 Privileges for User SQL_USER5
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 SIDU-R-    TRAFODION.T123SCH.GAMES
 SIDU-R-    TRAFODION.T123SCH.PLAYERS
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
@@ -1140,6 +1170,8 @@
 Privileges for Role PUBLIC
 ==========================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NAME
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_NUMBER
 S------    TRAFODION.T123SCH.PLAYERS <Column> PLAYER_TEAM_NUMBER
diff --git a/core/sql/regress/privs1/EXPECTED125 b/core/sql/regress/privs1/EXPECTED125
index 1984bb0..73f0d59 100644
--- a/core/sql/regress/privs1/EXPECTED125
+++ b/core/sql/regress/privs1/EXPECTED125
@@ -482,6 +482,8 @@
 Privileges for User SQL_USER1
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 ----G--    TRAFODION.T125SCH2.PLAYERS_SEQUENCE
 ------E    TRAFODION.T125SCH2.TESTHIVE
 -I-----    TRAFODION.T125SCH3.GAMES
@@ -494,6 +496,8 @@
 Privileges for User SQL_USER2
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 SIDU-R-    TRAFODION.T125SCH1.GAMES
 S------    TRAFODION.T125SCH1.GAMES_BY_PLAYER
 SIDU-R-    TRAFODION.T125SCH2.GAMES
@@ -511,6 +515,8 @@
 Privileges for User SQL_USER7
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 S------    TRAFODION.T125SCH2.GAMES <Column> GAME_NUMBER
 S------    TRAFODION.T125SCH2.TEAMS <Column> TEAM_NUMBER
 SIDU-R-    TRAFODION.T125SCH3.PLAYERS
@@ -522,6 +528,8 @@
 Privileges for User SQL_USER8
 =============================
 
+------E    TRAFODION."_LIBMGR_".EVENT_LOG_READER
+------E    TRAFODION."_LIBMGR_".JDBC
 SIDU-R-    TRAFODION.T125SCH3.GAMES
 S----R-    TRAFODION.T125SCH3.GAMES_BY_PLAYER
 S----R-    TRAFODION.T125SCH3.HOME_TEAMS_GAMES
diff --git a/core/sql/regress/privs1/EXPECTED141 b/core/sql/regress/privs1/EXPECTED141
index 3f291f0..07da30d 100644
--- a/core/sql/regress/privs1/EXPECTED141
+++ b/core/sql/regress/privs1/EXPECTED141
@@ -703,7 +703,8 @@
 
 *** ERROR[4082] Object TRAFODION.T141_USER1.U2V2 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>-- these creates should fail
 >>-- user2 has no privs on u1t1
diff --git a/core/sql/regress/privs2/EXPECTED135 b/core/sql/regress/privs2/EXPECTED135
index dc7a2c2..5bf3c18 100644
--- a/core/sql/regress/privs2/EXPECTED135
+++ b/core/sql/regress/privs2/EXPECTED135
@@ -266,7 +266,8 @@
 
 *** ERROR[4082] Object TRAFODION.T135SCH.USER1_V1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>drop view user1_v1;
 
 *** ERROR[1389] Object TRAFODION.T135SCH.USER1_V1 does not exist in Trafodion.
@@ -347,7 +348,8 @@
 
 *** ERROR[4082] Object TRAFODION.T135SCH.USER2_V1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>drop view user2_v1;
 
 *** ERROR[1389] Object TRAFODION.T135SCH.USER2_V1 does not exist in Trafodion.
@@ -518,7 +520,8 @@
 
 *** ERROR[4082] Object TRAFODION.T135SCH_USER3.T135_V1_USER3 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>exit;
 
@@ -691,7 +694,8 @@
 
 *** ERROR[4082] Object TRAFODION.T135SCH_USER5.T135_V1_USER5 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>select * from t135_v1_user5;
 
 *** ERROR[4082] Object TRAFODION.T135SCH_USER5.T135_V1_USER5 does not exist or is inaccessible.
@@ -710,7 +714,8 @@
 
 *** ERROR[4082] Object TRAFODION.T135SCH.T135_V1_USER5 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>select * from t135_v1_user5;
 
 *** ERROR[4082] Object TRAFODION.T135SCH.T135_V1_USER5 does not exist or is inaccessible.
diff --git a/core/sql/regress/privs2/EXPECTED138 b/core/sql/regress/privs2/EXPECTED138
index 9643889..f8dad07 100644
--- a/core/sql/regress/privs2/EXPECTED138
+++ b/core/sql/regress/privs2/EXPECTED138
@@ -67,7 +67,8 @@
 
 *** ERROR[4082] Object TRAFODION.T138SCH.USER1_T1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>create table user1_t2 (c1 int, c2 int);
 
@@ -90,7 +91,8 @@
 
 *** ERROR[4082] Object TRAFODION.T138SCH.USER1_T2 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>exit;
 
diff --git a/core/sql/regress/seabase/EXPECTED022 b/core/sql/regress/seabase/EXPECTED022
index 2a0251d..b1efc0f 100644
--- a/core/sql/regress/seabase/EXPECTED022
+++ b/core/sql/regress/seabase/EXPECTED022
@@ -801,7 +801,8 @@
 
 *** ERROR[4082] Object TRAFODION.SCH.T022HBM1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>create external table t022hbm1 ("cf".a varchar(4) not null,
 +>            b int)
 +>        primary key (a)
diff --git a/core/sql/regress/seabase/EXPECTED026 b/core/sql/regress/seabase/EXPECTED026
index 219b4ab..fb16c30 100644
--- a/core/sql/regress/seabase/EXPECTED026
+++ b/core/sql/regress/seabase/EXPECTED026
@@ -37,7 +37,8 @@
 
 *** ERROR[4082] Object TRAFODION.SCH026.T026T1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>drop table t026t1;
 
 *** ERROR[4254] Object TRAFODION.SCH026.T026T1 has invalid state and cannot be accessed. Use cleanup command to drop it.
@@ -105,12 +106,14 @@
 
 *** ERROR[4082] Object TRAFODION.SCH026.T026T0 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>invoke t026t1;
 
 *** ERROR[4082] Object TRAFODION.SCH026.T026T1 does not exist or is inaccessible.
 
---- SQL operation failed with errors.
+*** ERROR[8822] The statement was not prepared.
+
 >>
 >>set parserflags 131072;
 
diff --git a/core/sql/regress/udr/EXPECTED002 b/core/sql/regress/udr/EXPECTED002
index 4a378ba..b5d92cb 100644
--- a/core/sql/regress/udr/EXPECTED002
+++ b/core/sql/regress/udr/EXPECTED002
@@ -6515,10 +6515,20 @@
 
 --- 6344 row(s) selected.
 >>
->>-- some simple tests for the event_log_reader predefined TMUDF
+>>-- make sure the event_log_reader and jdbc UDFs are created (this
+>>-- should only be needed while we are developing Trafodion 2.3, in
+>>-- future releases the installer should do this automatically, by
+>>-- executing the upgrade command below)
+>>initialize trafodion, upgrade library management;
+
+--- SQL operation complete.
+>>
+>>-- some simple tests for the event_log_reader TMUDF
 >>-- (result of the UDF is not deterministic)
 >>select [last 0] * from udf(event_log_reader());
 
+*** WARNING[4323] Use of predefined UDF EVENT_LOG_READER is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
+
 --- 0 row(s) selected.
 >>select [last 0] log_ts + interval '1' day,
 +>                severity || 'x',
@@ -6533,12 +6543,14 @@
 +>                log_file_name,
 +>                log_file_line -1,
 +>                case when parse_status = ' ' then 'ok' else parse_status end
-+>from udf(event_log_reader('f'));
++>from udf("_LIBMGR_".event_log_reader('f'));
 
 --- 0 row(s) selected.
 >>select coalesce(min(log_file_node), 0) from udf(event_log_reader('f')) x
 +>where x.log_file_line <100;
 
+*** WARNING[4323] Use of predefined UDF X is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
+
 (EXPR)     
 -----------
 
@@ -6554,7 +6566,7 @@
 +>       max(log_file_line) as num_lines,
 +>       count(distinct query_id) num_query_ids,
 +>       sum(case when parse_status = ' ' then 0 else 1 end) num_parse_errors
-+>from udf(event_log_reader('f'))
++>from udf("_LIBMGR_".event_log_reader('f'))
 +>where log_file_name = 'master_exec_regr_999_99999.log';
 
 NUM_EVENTS            NUM_LINES    NUM_QUERY_IDS         NUM_PARSE_ERRORS
@@ -6565,7 +6577,7 @@
 --- 1 row(s) selected.
 >>-- there are 22 events in 33 lines in this file, with 13 unique query ids and 3 parse errors
 >>
->>select * from udf(event_log_reader('f'))
+>>select * from udf("_LIBMGR_".event_log_reader('f'))
 +>where log_file_name = 'master_exec_regr_999_99999.log'
 +>  and (log_file_line in (1,3,8,16,23) or parse_status <> ' ')
 +>order by log_file_line;
@@ -6590,33 +6602,46 @@
 >>-- some negative test cases
 >>prepare s from select * from udf(event_log_reader(10));
 
+*** WARNING[4323] Use of predefined UDF EVENT_LOG_READER is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
+
 *** ERROR[11252] Expecting a character constant as first parameter of the call to TRAFODION.SCH.EVENT_LOG_READER (SQLSTATE 38222)
 
 *** ERROR[8822] The statement was not prepared.
 
 >>-- parameter must be a string
->>prepare s from select * from udf(event_log_reader(?));
+>>prepare s from select * from udf("_LIBMGR_".event_log_reader(?));
 
 *** ERROR[11151] Unable to use 'type' 'UNSUPPORTED TYPE' in a user-defined routine. Details: unsupported type class.
 
 *** ERROR[8822] The statement was not prepared.
 
 >>-- parameter must be available at compile time
->>prepare s from select * from udf(event_log_reader(table(select * from (values (1)) as x)));
+>>prepare s from
++>select *
++>from udf("_LIBMGR_".event_log_reader(table(select * from (values (1)) as x)));
 
-*** ERROR[11252] There should be no table-valued parameters to the call to TRAFODION.SCH.EVENT_LOG_READER, got 1 (SQLSTATE 38220)
+*** ERROR[11252] There should be no table-valued parameters to the call to TRAFODION."_LIBMGR_".EVENT_LOG_READER, got 1 (SQLSTATE 38220)
 
 *** ERROR[8822] The statement was not prepared.
 
 >>-- table-valued input not allowed
 >>prepare s from select query_id, log_file_name, parse_status
-+>from udf(event_log_reader());
++>from udf("_LIBMGR_".event_log_reader());
 
 *** ERROR[4001] Column LOG_FILE_NAME is not found.  Tables in scope: NONE.  Default schema: TRAFODION.SCH.
 
 *** ERROR[8822] The statement was not prepared.
 
 >>-- log_file_name and parse_status columns not available without 'f' option
+>>prepare s from select * from udf(nosuchschema.event_log_reader());
+
+*** ERROR[1389] Object EVENT_LOG_READER does not exist in Trafodion.
+
+*** ERROR[4450] Function NOSUCHSCHEMA.EVENT_LOG_READER is not a built-in function or registered user-defined function.
+
+*** ERROR[8822] The statement was not prepared.
+
+>>-- Schema/UDF does not exist
 >>
 >>-- some simple tests for the timeseries builtin UDF
 >>
@@ -6759,7 +6784,10 @@
 +>              'source',
 +>              'select * from (values (''Hello'', ''World''), (''Hallo'', ''Welt'')) T(a,b)'));
 
+*** WARNING[4323] Use of predefined UDF JDBC is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
+
 --- SQL command prepared.
+>>-- warning 4323, predefined UDF is deprecated
 >>execute s_traf;
 
 A           B         
@@ -6770,6 +6798,27 @@
 
 --- 2 row(s) selected.
 >>
+>>prepare s_traf from
++>select *
++>from udf("_LIBMGR_".jdbc('jdbcT4-${TRAFODION_VER}.jar',
++>                         'org.trafodion.jdbc.t4.T4Driver',
++>                         $$QUOTE$$$$JDBC_T4_URL$$$$QUOTE$$,
++>                         'any', -- no user id
++>                         'any', -- no password
++>                         'source',
++>                         'select * from (values (''Bonjour'', ''Le monde''), (''Hola'', ''Mundo'')) T(a,b)'));
+
+--- SQL command prepared.
+>>execute s_traf;
+
+A               B               
+--------------  ----------------
+
+Bonjour         Le monde        
+Hola            Mundo           
+
+--- 2 row(s) selected.
+>>
 >>-- negative tests
 >>
 >>select * from udf(timeseries());
@@ -6856,6 +6905,38 @@
 
 >>-- Arguments not in pairs of column names and instructions
 >>
+>>prepare s_traf from
++>select *
++>from udf("_LIBMGR_".jdbc('trafjdbcT2-99.99.99.jar',
++>                         'org.apache.trafodion.jdbc.t2.T2Driver',
++>                         'jdbc:t2jdbc:/something',
++>                         'any', -- no user id
++>                         'any', -- no password
++>                         'source',
++>                         'some sql'));
+
+*** ERROR[11252] Exception during connect: This UDF does not support the Trafodion T2 driver class org.apache.trafodion.jdbc.t2.T2Driver (SQLSTATE 38020)
+
+*** ERROR[8822] The statement was not prepared.
+
+>>-- T2 driver class is not supported
+>>
+>>prepare s_traf from
++>select *
++>from udf("_LIBMGR_".jdbc('trafjdbcT2-99.99.99.jar',
++>                         'someclass',
++>                         'jdbc:t2jdbc:/something',
++>                         'any', -- no user id
++>                         'any', -- no password
++>                         'source',
++>                         'some sql'));
+
+*** ERROR[11252] Exception during connect: This UDF does not support the Trafodion T2 driver URL jdbc:t2jdbc:/something (SQLSTATE 38020)
+
+*** ERROR[8822] The statement was not prepared.
+
+>>-- T2 driver URL is not supported
+>>
 >>-- eclipse timeseries builtin function with a true UDF
 >>create table_mapping function timeseries(pattern char(1))
 +>returns (outval char(40))
diff --git a/core/sql/regress/udr/EXPECTED102 b/core/sql/regress/udr/EXPECTED102
index f0de120..79263aa 100644
--- a/core/sql/regress/udr/EXPECTED102
+++ b/core/sql/regress/udr/EXPECTED102
@@ -51,6 +51,7 @@
 ======================================
 
 DB__LIBMGRNAME
+DB__LIBMGR_LIB_CPP
 
 --- SQL operation complete.
 >>get procedures;
@@ -233,6 +234,7 @@
 ======================================
 
 DB__LIBMGRNAME
+DB__LIBMGR_LIB_CPP
 
 --- SQL operation complete.
 >>get procedures;
diff --git a/core/sql/regress/udr/TEST002 b/core/sql/regress/udr/TEST002
index 4e21184..381d557 100644
--- a/core/sql/regress/udr/TEST002
+++ b/core/sql/regress/udr/TEST002
@@ -147,7 +147,13 @@
 
 execute s1 ;
 
--- some simple tests for the event_log_reader predefined TMUDF
+-- make sure the event_log_reader and jdbc UDFs are created (this
+-- should only be needed while we are developing Trafodion 2.3, in
+-- future releases the installer should do this automatically, by
+-- executing the upgrade command below)
+initialize trafodion, upgrade library management;
+
+-- some simple tests for the event_log_reader TMUDF
 -- (result of the UDF is not deterministic)
 select [last 0] * from udf(event_log_reader());
 select [last 0] log_ts + interval '1' day,
@@ -163,7 +169,7 @@
                 log_file_name,
                 log_file_line -1,
                 case when parse_status = ' ' then 'ok' else parse_status end
-from udf(event_log_reader('f'));
+from udf("_LIBMGR_".event_log_reader('f'));
 select coalesce(min(log_file_node), 0) from udf(event_log_reader('f')) x
 where x.log_file_line <100;
 
@@ -175,11 +181,11 @@
        max(log_file_line) as num_lines,
        count(distinct query_id) num_query_ids,
        sum(case when parse_status = ' ' then 0 else 1 end) num_parse_errors
-from udf(event_log_reader('f'))
+from udf("_LIBMGR_".event_log_reader('f'))
 where log_file_name = 'master_exec_regr_999_99999.log';
 -- there are 22 events in 33 lines in this file, with 13 unique query ids and 3 parse errors
 
-select * from udf(event_log_reader('f'))
+select * from udf("_LIBMGR_".event_log_reader('f'))
 where log_file_name = 'master_exec_regr_999_99999.log'
   and (log_file_line in (1,3,8,16,23) or parse_status <> ' ')
 order by log_file_line;
@@ -190,13 +196,17 @@
 -- some negative test cases
 prepare s from select * from udf(event_log_reader(10));
 -- parameter must be a string
-prepare s from select * from udf(event_log_reader(?));
+prepare s from select * from udf("_LIBMGR_".event_log_reader(?));
 -- parameter must be available at compile time
-prepare s from select * from udf(event_log_reader(table(select * from (values (1)) as x)));
+prepare s from
+select *
+from udf("_LIBMGR_".event_log_reader(table(select * from (values (1)) as x)));
 -- table-valued input not allowed
 prepare s from select query_id, log_file_name, parse_status
-from udf(event_log_reader());
+from udf("_LIBMGR_".event_log_reader());
 -- log_file_name and parse_status columns not available without 'f' option
+prepare s from select * from udf(nosuchschema.event_log_reader());
+-- Schema/UDF does not exist
 
 -- some simple tests for the timeseries builtin UDF
 
@@ -248,6 +258,18 @@
               'any', -- no password
               'source',
               'select * from (values (''Hello'', ''World''), (''Hallo'', ''Welt'')) T(a,b)'));
+-- warning 4323, predefined UDF is deprecated
+execute s_traf;
+
+prepare s_traf from
+select *
+from udf("_LIBMGR_".jdbc('jdbcT4-${TRAFODION_VER}.jar',
+                         'org.trafodion.jdbc.t4.T4Driver',
+                         $$QUOTE$$$$JDBC_T4_URL$$$$QUOTE$$,
+                         'any', -- no user id
+                         'any', -- no password
+                         'source',
+                         'select * from (values (''Bonjour'', ''Le monde''), (''Hola'', ''Mundo'')) T(a,b)'));
 execute s_traf;
 
 -- negative tests
@@ -306,6 +328,28 @@
                     'VAL1'));
 -- Arguments not in pairs of column names and instructions
 
+prepare s_traf from
+select *
+from udf("_LIBMGR_".jdbc('trafjdbcT2-99.99.99.jar',
+                         'org.apache.trafodion.jdbc.t2.T2Driver',
+                         'jdbc:t2jdbc:/something',
+                         'any', -- no user id
+                         'any', -- no password
+                         'source',
+                         'some sql'));
+-- T2 driver class is not supported
+
+prepare s_traf from
+select *
+from udf("_LIBMGR_".jdbc('trafjdbcT2-99.99.99.jar',
+                         'someclass',
+                         'jdbc:t2jdbc:/something',
+                         'any', -- no user id
+                         'any', -- no password
+                         'source',
+                         'some sql'));
+-- T2 driver URL is not supported
+
 -- eclipse timeseries builtin function with a true UDF
 create table_mapping function timeseries(pattern char(1))
 returns (outval char(40))
diff --git a/core/sql/runtimestats/ssmpipc.cpp b/core/sql/runtimestats/ssmpipc.cpp
index 857913a..9831d6f 100755
--- a/core/sql/runtimestats/ssmpipc.cpp
+++ b/core/sql/runtimestats/ssmpipc.cpp
@@ -72,7 +72,7 @@
   }
 }
 
-IpcServer *ExSsmpManager::getSsmpServer(char *nodeName, short cpuNum,
+IpcServer *ExSsmpManager::getSsmpServer(NAHeap *heap, char *nodeName, short cpuNum,
                                         ComDiagsArea *&diagsArea)
 {
    char ssmpProcessName[50];
@@ -101,6 +101,8 @@
         // We need to keep 2 entries free - To send QueryFinishedMessage and to get the response for query started message
        if (cbGCTS->numReceiveCallbacksPending()+2 >= cbGCTS->getNowaitDepth())
        {
+          if (diagsArea == NULL)
+             diagsArea = ComDiagsArea::allocate(heap);
           *diagsArea << DgSqlCode(-2026)
             << DgString0(tmpProcessName)
             << DgInt0(GetCliGlobals()->myCpu())
diff --git a/core/sql/runtimestats/ssmpipc.h b/core/sql/runtimestats/ssmpipc.h
index dce7bce..d3e5a9b 100644
--- a/core/sql/runtimestats/ssmpipc.h
+++ b/core/sql/runtimestats/ssmpipc.h
@@ -58,7 +58,7 @@
 public:
   ExSsmpManager(IpcEnvironment *env);
   ~ExSsmpManager();
-  IpcServer *getSsmpServer(char *nodeName, short cpuNum, ComDiagsArea *&diagsArea);
+  IpcServer *getSsmpServer(NAHeap *heap, char *nodeName, short cpuNum, ComDiagsArea *&diagsArea);
   IpcEnvironment *getIpcEnvironment() { return env_; }
   void removeSsmpServer(char *nodeName, short cpuNum);
   void cleanupDeletedSsmpServers();
diff --git a/core/sql/sqlci/Param.cpp b/core/sql/sqlci/Param.cpp
index be09697..62b54af 100644
--- a/core/sql/sqlci/Param.cpp
+++ b/core/sql/sqlci/Param.cpp
@@ -50,6 +50,8 @@
 #include "NLSConversion.h"
 #include "nawstring.h"
 
+extern NAHeap sqlci_Heap;
+
 short convDoItMxcs(char * source,
 		   Lng32 sourceLen,
 		   short sourceType,
@@ -242,7 +244,7 @@
 			  Lng32 targetPrecision,
 			  Lng32 targetScale,
                           Lng32 vcIndLen,
-   			  ComDiagsArea* diags) {
+   			  ComDiagsArea *&diags) {
 
   // get rid of the old converted value
   if (converted_value) {
@@ -418,7 +420,7 @@
 		  targetScale,
 		  VCLen,
 		  VCLenSize,
-		  0,
+		  &sqlci_Heap,
 		  &diags);
     
     if ( ok != ex_expr::EXPR_OK)
@@ -454,7 +456,7 @@
 					   targetScale,
 					   VCLen,
 					   VCLenSize,
-					   0,
+					   &sqlci_Heap,
 					   &diags,
                                            CONV_UNKNOWN,
                                            NULL,
diff --git a/core/sql/sqlci/Param.h b/core/sql/sqlci/Param.h
index 2c4250a..73945e8 100644
--- a/core/sql/sqlci/Param.h
+++ b/core/sql/sqlci/Param.h
@@ -108,7 +108,7 @@
   short convertValue(SqlciEnv *, short targetType, Lng32 &targetLength,
 		     Lng32 targetPrecision, Lng32 targetScale, 
                      Lng32 vcIndLen,
-                     ComDiagsArea* diags = 0);
+                     ComDiagsArea*&diags);
   void setName(const char * name_);
 
   void setValue(const char*, CharInfo::CharSet cs = CharInfo::UnknownCharSet);
diff --git a/core/sql/sqlci/SqlCmd.cpp b/core/sql/sqlci/SqlCmd.cpp
index bb79ff4..e645af4 100644
--- a/core/sql/sqlci/SqlCmd.cpp
+++ b/core/sql/sqlci/SqlCmd.cpp
@@ -146,10 +146,30 @@
 
 volatile Int32 breakReceived = 0;
 
-void HandleCLIError(Lng32 &error, SqlciEnv *sqlci_env,
+void HandleCLIError(SQLSTMT_ID *stmt, Lng32 &error, SqlciEnv *sqlci_env,
 		    NABoolean displayErr, NABoolean * isEOD,
                                Int32 prepcode)
 {
+  Int64 diagsCondCount = 0;
+  if (error == 100) 
+     diagsCondCount = getDiagsCondCount(stmt); 
+  NABoolean getWarningWithEOF = (diagsCondCount > 0); 
+  HandleCLIError(error, sqlci_env, displayErr, isEOD, prepcode, getWarningWithEOF);
+}
+
+void HandleCLIError(Lng32 &error, SqlciEnv *sqlci_env,
+		    NABoolean displayErr, NABoolean * isEOD,
+                               Int32 prepcode, NABoolean getWarningsWithEOF)
+{
+  if (error == 100) 
+  {
+     if (isEOD != NULL)
+        *isEOD = 1;
+     if (! getWarningsWithEOF) {
+        SqlCmd::clearCLIDiagnostics();
+        return;
+     }
+  }
   if (isEOD)
     *isEOD = 0;
 
@@ -366,7 +386,7 @@
 #endif
 			outtext += pfxl;
 		    
-		    #ifdef USE_WCHAR
+#ifdef USE_WCHAR
 		    if (showSQLSTATE)
 		      {
 			$$do something here$$
@@ -429,7 +449,8 @@
 
 } // HandleCLIError
 
-void handleLocalError(ComDiagsArea &diags, SqlciEnv *sqlci_env)
+
+void handleLocalError(ComDiagsArea *diags, SqlciEnv *sqlci_env)
 {
   Logfile *log = sqlci_env->get_logfile();
 
@@ -440,10 +461,10 @@
   // when HandleCLIError() is called with a error after a CLI call.
   // Soln :10-021203-3433
 
-  if (diags.getNumber(DgSqlCode::ERROR_)) {
+  if (diags->getNumber(DgSqlCode::ERROR_)) {
      worstcode = SQL_Error;
   }
-  else if (diags.getNumber(DgSqlCode::WARNING_)) {
+  else if (diags->getNumber(DgSqlCode::WARNING_)) {
     worstcode = SQL_Warning;
   }
 
@@ -451,7 +472,7 @@
   lastLineWasABlank = TRUE;
 
   ostringstream errMsg;
-  NADumpDiags(errMsg, &diags, TRUE/*newline*/, 0, NULL, log->isVerbose(),
+  NADumpDiags(errMsg, diags, TRUE/*newline*/, 0, NULL, log->isVerbose(),
               sqlci_env->getTerminalCharset());
 
   errMsg << ends;
@@ -459,6 +480,31 @@
   log->WriteAllWithoutEOL(errMsg.str().c_str());
 }
 
+Int64 getRowsAffected(SQLSTMT_ID *stmt)
+{
+   Int32 rc;
+   rc = SQL_EXEC_GetDiagnosticsStmtInfo2(stmt,
+                           SQLDIAG_ROW_COUNT, &rowsAffected,
+                           NULL, 0, NULL);
+   if (rc == 0)
+      return rowsAffected; 
+   else
+      return -1;
+}
+
+Int64 getDiagsCondCount(SQLSTMT_ID *stmt)
+{
+   Int32 rc;
+   Int64 diagsCondCount;
+   rc = SQL_EXEC_GetDiagnosticsStmtInfo2(stmt,
+                           SQLDIAG_NUMBER, &diagsCondCount,
+                           NULL, 0, NULL);
+   if (rc == 0)
+      return 0; 
+   else
+      return diagsCondCount;
+}
+
 static char * upshiftStr(char * inStr, char * outStr, UInt32 len)
 {
   for (UInt32 i = 0; i < len; i++)
@@ -1384,7 +1430,7 @@
   Lng32 retcode = 0;
   Int32 num_named_params = 0;
   SqlciList<Param>    *unnamed_param_list = NULL;
-  ComDiagsArea diags(&sqlci_Heap);
+  ComDiagsArea *diags = NULL;
 
   SQLDESC_ID * input_desc  = prep_stmt->getInputDesc();
 
@@ -1546,21 +1592,27 @@
 		NABoolean error = FALSE;
 		
                 Lng32 inLength = length;
-                Int32 previousEntry = diags.getNumber();
+
+                Int32 previousEntry = 0;
+       
+                if (diags != NULL)
+                    previousEntry = diags->getNumber(DgSqlCode::ERROR_);
 		
 		if ( DFS2REC::isAnyCharacter(datatype) ) 
 		  scale = (Lng32)charset; // pass in target charset in argument 'scale'
 		
 		retcode = param->convertValue(sqlci_env, datatype, length,
-					      precision, scale, vcIndLen, &diags);
-                Int32 newestEntry = diags.getNumber();
+					      precision, scale, vcIndLen, diags);
+                Int32 newestEntry = 0;
+                if (diags != NULL)
+                    newestEntry = diags->getNumber(DgSqlCode::ERROR_);
 		
 		//if the convertValue gets a string overflow warning, convert
 		//it to error for non characters and it remains warning for characters
 		if (newestEntry > previousEntry) {
-		  if (diags[newestEntry].getSQLCODE() == EXE_STRING_OVERFLOW ){
+		  if (diags->getErrorEntry(newestEntry)->getSQLCODE() == EXE_STRING_OVERFLOW ){
 		    if (!DFS2REC::isAnyCharacter(datatype)) {
-		      diags.negateCondition(newestEntry-1);
+		      diags->negateCondition(newestEntry-1);
 		      error = TRUE;
 		    }
 		  }
@@ -1665,7 +1717,9 @@
 		  NAString srcval(param->getDisplayValue(sqlci_env->getTerminalCharset()));
 		  if (srcval.isNull()) srcval = "''";	// empty string literal
 		  
-		  diags << DgSqlCode(-SQLCI_PARAM_BAD_CONVERT)
+                  if (diags == NULL)
+                     diags = ComDiagsArea::allocate(&sqlci_Heap);
+		  *diags << DgSqlCode(-SQLCI_PARAM_BAD_CONVERT)
 			<< DgString0(Param::getExternalName(param_name))
 			<< DgString1(srcval)
 			<< DgString2(tgttype);
@@ -1673,7 +1727,9 @@
 	      } // not null param
 	    }	// if param
 	  else {
-	    diags << DgSqlCode(-SQLCI_PARAM_NOT_FOUND)
+            if (diags == NULL)
+                diags = ComDiagsArea::allocate(&sqlci_Heap);
+	    *diags << DgSqlCode(-SQLCI_PARAM_NOT_FOUND)
 		  << DgString0(Param::getExternalName(param_name));
 	  }
 	  
@@ -1701,16 +1757,18 @@
   if (numUnnamedParams > 0 &&
       numUnnamedParams > num_input_entries - num_named_params)
     {
+       if (diags == NULL)
+          diags = ComDiagsArea::allocate(&sqlci_Heap);
       // Warning only, so continue processing after this!
-      diags << DgSqlCode(+SQLCI_EXTRA_PARAMS_SUPPLIED)	// + (i.e. warning)
+      *diags << DgSqlCode(+SQLCI_EXTRA_PARAMS_SUPPLIED)	// + (i.e. warning)
 	    << DgInt0(numUnnamedParams)
 	    << DgInt1(num_input_entries - num_named_params);
     }
   
-  if (diags.getNumber())
+  if (diags != NULL)
     {
       handleLocalError(diags, sqlci_env);
-      if (diags.getNumber(DgSqlCode::ERROR_)) {
+      if (diags->getNumber(DgSqlCode::ERROR_)) {
 	return SQL_Error;
       }
     }
@@ -1782,7 +1840,6 @@
 		     CharInfo::CharSet* unnamedParamCharSetArray,
 		     NABoolean handleError)
 {
-  ComDiagsArea diags(&sqlci_Heap);
   Lng32 retcode = 0;
   rowsAffected = 0;
   SqlciList<Param>    *unnamed_param_list = NULL;
@@ -1809,8 +1866,11 @@
   if (unnamed_param_list)
     delete unnamed_param_list;
 
+  if (retcode > 0)
+     getRowsAffected(stmt);
   return (short)retcode;
 } // SqlCmd::doExec
+
 short SqlCmd::doFetch(SqlciEnv * sqlci_env, SQLSTMT_ID * stmt,
 		      PrepStmt * prep_stmt,
 		      NABoolean firstFetch,
@@ -1827,7 +1887,7 @@
 
   NABoolean isEOD = 0;
   if (handleError)
-    HandleCLIError(retcode, sqlci_env, TRUE, &isEOD,prepcode);
+    HandleCLIError(stmt, retcode, sqlci_env, TRUE, &isEOD,prepcode);
   if (isEOD)
     retcode = SQL_Eof;
 
@@ -1871,7 +1931,8 @@
   if (handleError)
     HandleCLIError(retcode, sqlci_env, TRUE);
 
-
+  if (retcode > 0)
+     getRowsAffected(stmt);
   return (short)retcode;
 }
 
@@ -2047,7 +2108,6 @@
                          CharInfo::CharSet* unnamedParamCharSetArray,
                          Int32 prepcode)
 {
-  ComDiagsArea diags(&sqlci_Heap);
   Lng32 retcode = 0;
   //short ret;
   Logfile *log = sqlci_env->get_logfile();
@@ -2317,6 +2377,7 @@
 	  }
 	HandleCLIError(retcode, sqlci_env);
       }
+    getRowsAffected(stmt); 
     
   } // retcode >= 0
   
@@ -3754,7 +3815,8 @@
   
   retcode = SQL_EXEC_CloseStmt(cursor->cursorStmtId());
   HandleCLIError(retcode, sqlci_env);
-  
+  if (retcode > 0)
+     getRowsAffected(cursor->cursorStmtId());  
   if (donemsg)
     sprintf(donemsg, OP_COMPLETE_MESSAGE);
   
diff --git a/core/sql/sqlci/SqlciCmd.cpp b/core/sql/sqlci/SqlciCmd.cpp
index 665c337..621022f 100644
--- a/core/sql/sqlci/SqlciCmd.cpp
+++ b/core/sql/sqlci/SqlciCmd.cpp
@@ -991,7 +991,7 @@
     // Return - "not authorized" error 
     ComDiagsArea diags;
     diags << DgSqlCode(-1017);
-    handleLocalError(diags, sqlci_env);
+    handleLocalError(&diags, sqlci_env);
     return -1;
   }
 
@@ -1003,7 +1003,7 @@
       // Please use "RESET PARSERFLAGS <value>" to reset the flags.
       ComDiagsArea diags;
       diags << DgSqlCode(3190);
-      handleLocalError(diags, sqlci_env);
+      handleLocalError(&diags, sqlci_env);
     }
     retCode = SQL_EXEC_SetParserFlagsForExSqlComp_Internal2(param);
   }
@@ -1020,7 +1020,7 @@
     // You are not authorized to perform this operation.
     ComDiagsArea diags;
     diags << DgSqlCode(retCode);
-    handleLocalError(diags, sqlci_env);
+    handleLocalError(&diags, sqlci_env);
   }
   return 0;
 }
diff --git a/core/sql/sqlci/sqlcmd.h b/core/sql/sqlci/sqlcmd.h
index 5e48c84..b7d40f9 100644
--- a/core/sql/sqlci/sqlcmd.h
+++ b/core/sql/sqlci/sqlcmd.h
@@ -57,9 +57,15 @@
 extern void HandleCLIError(Lng32 &err, SqlciEnv *sqlci_env, 
 			   NABoolean displayErr = TRUE,
 			   NABoolean * isEOD = NULL,
+                           Int32 prepcode = 0, NABoolean getWarningsWithEOF = FALSE);
+extern void HandleCLIError(SQLSTMT_ID *stmt, Lng32 &err, SqlciEnv *sqlci_env, 
+			   NABoolean displayErr = TRUE,
+			   NABoolean * isEOD = NULL,
                            Int32 prepcode = 0);
-void handleLocalError(ComDiagsArea &diags, SqlciEnv *sqlci_env);
 
+void handleLocalError(ComDiagsArea *diags, SqlciEnv *sqlci_env);
+Int64 getRowsAffected(SQLSTMT_ID *stmt);
+Int64 getDiagsCondCount(SQLSTMT_ID *stmt);
 // for unnamed parameters
 #define MAX_NUM_UNNAMED_PARAMS  128
 #define MAX_LEN_UNNAMED_PARAM 300
diff --git a/core/sql/sqlcomp/CmpDescribe.cpp b/core/sql/sqlcomp/CmpDescribe.cpp
index 26b9b3c..1aef61a 100644
--- a/core/sql/sqlcomp/CmpDescribe.cpp
+++ b/core/sql/sqlcomp/CmpDescribe.cpp
@@ -824,8 +824,7 @@
       goto finally;  // we are done
     }
 
-  if (d->getFormat() >= Describe::CONTROL_FIRST_ &&
-      d->getFormat() <= Describe::CONTROL_LAST_)
+  if (d->getIsControl())
     {
       rc = CmpDescribeControl(d, outbuf, outbuflen, heap);
       goto finally;  // we are done
@@ -865,7 +864,7 @@
   // For now, schemaName of HIVE indicates a hive table.
   // Need to fix that at a later time when multiple hive schemas are supported.
   if (((d->getFormat() == Describe::INVOKE_) ||
-       (d->getFormat() == Describe::LONG_)) &&
+       (d->getFormat() == Describe::SHOWDDL_)) &&
       (d->getDescribedTableName().isHive()) &&
       (!d->getDescribedTableName().isSpecialTable()))
     {
@@ -886,7 +885,7 @@
 
   // check if this is an hbase/seabase table. If so, describe using info from NATable.
   if (((d->getFormat() == Describe::INVOKE_) ||
-       (d->getFormat() == Describe::LONG_)) &&
+       (d->getFormat() == Describe::SHOWDDL_)) &&
       ((d->getDescribedTableName().isHbase()) ||
        (d->getDescribedTableName().isSeabase())))
     {
diff --git a/core/sql/sqlcomp/CmpSeabaseDDL.h b/core/sql/sqlcomp/CmpSeabaseDDL.h
index 1f6295d..f1f9429 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDL.h
+++ b/core/sql/sqlcomp/CmpSeabaseDDL.h
@@ -129,6 +129,11 @@
 
 #include "CmpSeabaseDDLmd.h"
 
+// The value HBase uses for checking key length is HConstants.MAX_ROW_LENGTH.
+// The rowID length limit in HBase is enforced in HBase modules Put.java and
+// Mutation.java. (The above was true as of HBase 1.0.0.)
+#define MAX_HBASE_ROWKEY_LEN 32767
+
 #define SEABASEDDL_INTERNAL_ERROR(text)                                   \
    *CmpCommon::diags() << DgSqlCode(-CAT_INTERNAL_EXCEPTION_ERROR) 	  \
                        << DgString0(__FILE__)   		   	  \
@@ -1306,6 +1311,7 @@
   short dropSeabaseLibmgr(ExeCliInterface *inCliInterface);
   short createLibmgrProcs(ExeCliInterface * cliInterface);
   short grantLibmgrPrivs(ExeCliInterface *cliInterface);
+  short createSeabaseLibmgrCPPLib(ExeCliInterface * cliInterface);
 
   short registerNativeTable
   (
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp b/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
index e767cec..0dc36e5 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLcommon.cpp
@@ -175,27 +175,23 @@
   return 0;
 }
 
+
 short CmpSeabaseDDL::switchBackCompiler()
 {
-  ComDiagsArea * tempDiags = NULL;
+
   if (cmpSwitched_)
-    {
-      tempDiags = ComDiagsArea::allocate(heap_);
-      tempDiags->mergeAfter(*CmpCommon::diags());
-    }
-  
+  {
+      GetCliGlobals()->currContext()->copyDiagsAreaToPrevCmpContext();
+      CmpCommon::diags()->clear();
+  }
   // do restore here even though switching may not have happened, i.e.
   // when switchToCompiler() was not called by the embedded CI, see above.
   restoreAllControlsAndFlags();
   
   if (cmpSwitched_)
     {
-      // ignore new (?) from restore call but restore old diags
+      // Clear the diagnostics area of the current CmpContext
       CmpCommon::diags()->clear();
-      CmpCommon::diags()->mergeAfter(*tempDiags);
-      tempDiags->clear();
-      tempDiags->deAllocate();
-  
       // switch back to the original commpiler, ignore return error
       SQL_EXEC_SWITCH_BACK_COMPILER();
 
@@ -4634,9 +4630,6 @@
   Lng32 indOffset = -1;
   Lng32 varOffset = -1;
   
-  cliRC = cliInterface->getAttributes(1, TRUE, fsDatatype, length, 
-                                      &indOffset, &varOffset);
-
   cliRC = cliInterface->getAttributes(entry, TRUE, fsDatatype, length, 
                                       &indOffset, &varOffset);
   if (cliRC < 0)
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLindex.cpp b/core/sql/sqlcomp/CmpSeabaseDDLindex.cpp
index 2e4c5a0..030d76c 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLindex.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLindex.cpp
@@ -239,6 +239,14 @@
      keyInfoArray[i].hbaseColQual = new(CTXTHEAP) char[strlen(qualNumStr)+1];
      strcpy((char*)keyInfoArray[i].hbaseColQual, qualNumStr);
     }
+
+  if (keyLength > MAX_HBASE_ROWKEY_LEN )
+    {
+      *CmpCommon::diags() << DgSqlCode(-CAT_ROWKEY_LEN_TOO_LARGE)
+                              << DgInt0(keyLength)
+                              << DgInt1(MAX_HBASE_ROWKEY_LEN);
+      return -1;
+    }
   
   if ((syskeyOnly) &&
       (hasSyskey))
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLroutine.cpp b/core/sql/sqlcomp/CmpSeabaseDDLroutine.cpp
index eed28cb..c1fc51d 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLroutine.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLroutine.cpp
@@ -1610,8 +1610,9 @@
                 jarLocation.length() + 100];
 
   // Create the SEABASE_LIBMGR_SCHEMA schema
-  str_sprintf(queryBuf, "create schema if not exists %s.\"%s\" authorization %s ",
-              getSystemCatalog(),SEABASE_LIBMGR_SCHEMA, DB__ROOT);
+  snprintf(queryBuf, sizeof(queryBuf),
+           "create schema if not exists %s.\"%s\" authorization %s ",
+           getSystemCatalog(),SEABASE_LIBMGR_SCHEMA, DB__ROOT);
 
   cliRC = cliInterface->executeImmediate(queryBuf);
   if (cliRC < 0)
@@ -1621,9 +1622,10 @@
     }
 
   // Create the SEABASE_LIBMGR_LIBRARY library
-  str_sprintf(queryBuf, "create library %s.\"%s\".%s file '%s'",
-                         getSystemCatalog(), SEABASE_LIBMGR_SCHEMA, SEABASE_LIBMGR_LIBRARY,
-                         jarLocation.data());
+  snprintf(queryBuf, sizeof(queryBuf),
+           "create library %s.\"%s\".%s file '%s'",
+           getSystemCatalog(), SEABASE_LIBMGR_SCHEMA, SEABASE_LIBMGR_LIBRARY,
+           jarLocation.data());
 
   cliRC = cliInterface->executeImmediate(queryBuf);
   if (cliRC < 0)
@@ -1632,6 +1634,9 @@
       return -1;
     }
 
+  if (createSeabaseLibmgrCPPLib(cliInterface) < 0)
+    return -1;
+
   return (createLibmgrProcs(cliInterface));
 }
 
@@ -1647,6 +1652,7 @@
 
       const QString * qs = NULL;
       Int32 sizeOfqs = 0;
+      const char *libName = NULL;
 
       qs = prd.newDDL;
       sizeOfqs = prd.sizeOfnewDDL;
@@ -1657,25 +1663,38 @@
       glueQueryFragments(qryArraySize,  qs,
                          gluedQuery, gluedQuerySize);
 
+      switch (prd.whichLib)
+        {
+        case LibmgrRoutineInfo::JAVA_LIB:
+          libName = SEABASE_LIBMGR_LIBRARY;
+          break;
+        case LibmgrRoutineInfo::CPP_LIB:
+          libName = SEABASE_LIBMGR_LIBRARY_CPP;
+          break;
+        default:
+          CMPASSERT(0);
+        }
+
       param_[0] = getSystemCatalog();
       param_[1] = SEABASE_LIBMGR_SCHEMA;
       param_[2] = getSystemCatalog();
       param_[3] = SEABASE_LIBMGR_SCHEMA;
-      param_[4] = SEABASE_LIBMGR_LIBRARY;
+      param_[4] = libName;
 
       // Review comment - make sure size of queryBuf is big enough to hold
       // generated text.
       char queryBuf[strlen(getSystemCatalog())*2 + strlen(SEABASE_LIBMGR_SCHEMA)*2 +
                     strlen(SEABASE_LIBMGR_LIBRARY) + gluedQuerySize + 200]; 
 
-      str_sprintf(queryBuf, gluedQuery, param_[0], param_[1], param_[2], param_[3], param_[4]);
+      snprintf(queryBuf, sizeof(queryBuf),
+               gluedQuery, param_[0], param_[1], param_[2], param_[3], param_[4]);
       NADELETEBASICARRAY(gluedQuery, STMTHEAP);
 
       cliRC = cliInterface->executeImmediate(queryBuf);
       if (cliRC < 0)
         {
           cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
-        return -1;
+          return -1;
         }
     } // for
 
@@ -1690,18 +1709,36 @@
 
   Lng32 cliRC = 0;
   char queryBuf[strlen(getSystemCatalog()) + strlen(SEABASE_LIBMGR_SCHEMA) +
-                strlen(SEABASE_LIBMGR_LIBRARY) + strlen(DB__LIBMGRROLE) + 200];
+                strlen(SEABASE_LIBMGR_LIBRARY) +
+                MAXOF(strlen(DB__LIBMGRROLE), strlen(PUBLIC_AUTH_NAME)) + 200];
   for (Int32 i = 0; i < sizeof(allLibmgrRoutineInfo)/sizeof(LibmgrRoutineInfo); i++)
     {
       // Get the next procedure routine details
       const LibmgrRoutineInfo &prd = allLibmgrRoutineInfo[i];
+      const char *grantee = NULL;
+      const char *grantOption = "";
 
-      str_sprintf (queryBuf, "grant execute on %s %s.\"%s\".%s to %s with grant option",
-                              prd.udrType,
-                              getSystemCatalog(),
-                              SEABASE_LIBMGR_SCHEMA,
-                              prd.newName,
-                              DB__LIBMGRROLE);
+      switch (prd.whichRole)
+        {
+        case LibmgrRoutineInfo::LIBMGR_ROLE:
+          grantee = DB__LIBMGRROLE;
+          grantOption = " with grant option";
+          break;
+        case LibmgrRoutineInfo::PUBLIC:
+          grantee = PUBLIC_AUTH_NAME;
+          break;
+        default:
+          CMPASSERT(0);
+        }
+
+      snprintf(queryBuf, sizeof(queryBuf),
+               "grant execute on %s %s.\"%s\".%s to %s%s",
+               prd.udrType,
+               getSystemCatalog(),
+               SEABASE_LIBMGR_SCHEMA,
+               prd.newName,
+               grantee,
+               grantOption);
       cliRC = cliInterface->executeImmediate(queryBuf);
       if (cliRC < 0)
         {
@@ -1731,6 +1768,9 @@
 
   if (cliRC == 0) // does not exist
     {
+      // give an error if the Java library does not exist, since that is
+      // an indication that we never ran
+      // INITIALIZE TRAFODION, CREATE LIBRARY MANAGEMENT
       NAString libraryName(getSystemCatalog());
       libraryName + ".\"" + SEABASE_LIBMGR_SCHEMA + "\"" + SEABASE_LIBMGR_LIBRARY;
       *CmpCommon::diags() << DgSqlCode(-1389)
@@ -1738,6 +1778,25 @@
       return -1;
     }
 
+  // now check for the C++ library, which was added in Trafodion 2.3
+  cliRC = existsInSeabaseMDTable(cliInterface,
+                                 getSystemCatalog(), SEABASE_LIBMGR_SCHEMA,
+                                 SEABASE_LIBMGR_LIBRARY_CPP,
+                                 COM_LIBRARY_OBJECT, TRUE, FALSE);
+  if (cliRC < 0)
+    return -1;
+
+  if (cliRC == 0)
+    {
+      // The Java library exists, but the C++ library does not yet
+      // exist. This means that we last created or upgraded the
+      // library management subsystem in Trafodion 2.2 or earlier.
+      // Create the C++ library, as it is needed for Trafodion 2.3
+      // and higher.
+      if (createSeabaseLibmgrCPPLib(cliInterface) < 0)
+        return -1;
+    }
+
   return (createLibmgrProcs(cliInterface));
 }
 
@@ -1773,6 +1832,31 @@
     }
   return 0;
 }
-
-
   
+short CmpSeabaseDDL::createSeabaseLibmgrCPPLib(ExeCliInterface * cliInterface)
+{
+  Int32 cliRC = 0;
+  NAString dllLocation(getenv("TRAF_HOME"));
+  dllLocation += "/export/lib64";
+  if (strcmp(getenv("SQ_MBTYPE"), "64d") == 0)
+    dllLocation += "d";
+  // for now we use the same DLL as for the predefined UDRs
+  dllLocation += "/libudr_predef.so";
+  char queryBuf[strlen(getSystemCatalog()) + strlen(SEABASE_LIBMGR_SCHEMA) +
+                strlen(SEABASE_LIBMGR_LIBRARY_CPP) +
+                dllLocation.length() + 100];
+
+  // Create the SEABASE_LIBMGR_LIBRARY_CPP library
+  snprintf(queryBuf, sizeof(queryBuf),
+           "create library %s.\"%s\".%s file '%s'",
+           getSystemCatalog(), SEABASE_LIBMGR_SCHEMA, SEABASE_LIBMGR_LIBRARY_CPP,
+           dllLocation.data());
+
+  cliRC = cliInterface->executeImmediate(queryBuf);
+  if (cliRC < 0)
+    {
+      cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
+      return -1;
+    }
+  return 0;
+}
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLroutine.h b/core/sql/sqlcomp/CmpSeabaseDDLroutine.h
index 76f4b65..deadac4 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLroutine.h
+++ b/core/sql/sqlcomp/CmpSeabaseDDLroutine.h
@@ -50,6 +50,8 @@
 #define SYSTEM_PROC_RM           "RM"
 #define SYSTEM_PROC_RMREX        "RMREX"
 #define SYSTEM_TMUDF_SYNCLIBUDF  "SYNCLIBUDF"
+#define SYSTEM_TMUDF_EVENT_LOG_READER "EVENT_LOG_READER"
+#define SYSTEM_TMUDF_JDBC        "JDBC"
 
 // Create procedure text for system procedures
 static const QString seabaseProcAddlibDDL[] =
@@ -244,6 +246,24 @@
   {" ; "}
 };
 
+  static const QString seabaseTMUDFEventLogReaderDDL[] = 
+{
+  {"  CREATE TABLE_MAPPING FUNCTION IF NOT EXISTS %s.\"%s\".EVENT_LOG_READER() "},
+  {"  EXTERNAL NAME 'TRAF_CPP_EVENT_LOG_READER' "},
+  {"  LIBRARY %s.\"%s\".%s "},
+  {"  LANGUAGE CPP "},
+  {" ; "}
+};
+
+  static const QString seabaseTMUDFJDBCDDL[] = 
+{
+  {"  CREATE TABLE_MAPPING FUNCTION IF NOT EXISTS %s.\"%s\".JDBC() "},
+  {"  EXTERNAL NAME 'org.trafodion.libmgmt.JDBCUDR' "},
+  {"  LIBRARY %s.\"%s\".%s "},
+  {"  LANGUAGE JAVA "},
+  {" ; "}
+};
+
 struct LibmgrRoutineInfo
 {
   // type of the UDR (used in grant)
@@ -255,79 +275,131 @@
   // ddl stmt corresponding to the current ddl.
   const QString *newDDL;
   Lng32 sizeOfnewDDL;
+
+  enum LibmgrLibEnum
+    {
+      JAVA_LIB,
+      CPP_LIB
+    } whichLib;
+
+  enum LibmgrRoleEnum
+    {
+      LIBMGR_ROLE,
+      PUBLIC
+    } whichRole;
 };
 
 static const LibmgrRoutineInfo allLibmgrRoutineInfo[] = {
   {"PROCEDURE",
    SYSTEM_PROC_ADDLIB, 
    seabaseProcAddlibDDL, 
-   sizeof(seabaseProcAddlibDDL)
+   sizeof(seabaseProcAddlibDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_ALTERLIB, 
    seabaseProcAlterlibDDL, 
-   sizeof(seabaseProcAlterlibDDL)
+   sizeof(seabaseProcAlterlibDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_DROPLIB, 
    seabaseProcDroplibDDL, 
-   sizeof(seabaseProcDroplibDDL)
+   sizeof(seabaseProcDroplibDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_GETFILE, 
    seabaseProcGetfileDDL, 
-   sizeof(seabaseProcGetfileDDL)
+   sizeof(seabaseProcGetfileDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_HELP, 
    seabaseProcHelpDDL, 
-   sizeof(seabaseProcHelpDDL)
+   sizeof(seabaseProcHelpDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_LS, 
    seabaseProcLsDDL, 
-   sizeof(seabaseProcLsDDL)
+   sizeof(seabaseProcLsDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_LSALL, 
    seabaseProcLsallDDL, 
-   sizeof(seabaseProcLsallDDL)
+   sizeof(seabaseProcLsallDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_PUT, 
    seabaseProcPutDDL, 
-   sizeof(seabaseProcPutDDL)
+   sizeof(seabaseProcPutDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_PUTFILE, 
    seabaseProcPutFileDDL, 
-   sizeof(seabaseProcPutFileDDL)
+   sizeof(seabaseProcPutFileDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_RM, 
    seabaseProcRmDDL, 
-   sizeof(seabaseProcRmDDL)
+   sizeof(seabaseProcRmDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"PROCEDURE",
    SYSTEM_PROC_RMREX, 
    seabaseProcRmrexDDL, 
-   sizeof(seabaseProcRmrexDDL)
+   sizeof(seabaseProcRmrexDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
   },
 
   {"FUNCTION",
    SYSTEM_TMUDF_SYNCLIBUDF,
    seabaseTMUDFSyncLibDDL,
-   sizeof(seabaseTMUDFSyncLibDDL)
+   sizeof(seabaseTMUDFSyncLibDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::LIBMGR_ROLE
+  },
+
+  {"FUNCTION",
+   SYSTEM_TMUDF_EVENT_LOG_READER,
+   seabaseTMUDFEventLogReaderDDL,
+   sizeof(seabaseTMUDFEventLogReaderDDL),
+   LibmgrRoutineInfo::CPP_LIB,
+   LibmgrRoutineInfo::PUBLIC
+  },
+
+  {"FUNCTION",
+   SYSTEM_TMUDF_JDBC,
+   seabaseTMUDFJDBCDDL,
+   sizeof(seabaseTMUDFJDBCDDL),
+   LibmgrRoutineInfo::JAVA_LIB,
+   LibmgrRoutineInfo::PUBLIC
   }
 
 };
diff --git a/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp b/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
index 248deef..b16f777 100644
--- a/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
+++ b/core/sql/sqlcomp/CmpSeabaseDDLtable.cpp
@@ -57,7 +57,6 @@
 
 #include "TrafDDLdesc.h"
 
-#define MAX_HBASE_ROWKEY_LEN 32768
 
 // defined in CmpDescribe.cpp
 extern short CmpDescribeSeabaseTable ( 
diff --git a/core/sql/sqlcomp/DefaultConstants.h b/core/sql/sqlcomp/DefaultConstants.h
index 2671482..fd110de 100644
--- a/core/sql/sqlcomp/DefaultConstants.h
+++ b/core/sql/sqlcomp/DefaultConstants.h
@@ -3310,6 +3310,7 @@
 
   // Use the earlier implementation of HdfsScan via libhdfs
   USE_LIBHDFS_SCAN,
+  
   // This enum constant must be the LAST one in the list; it's a count,
   // not an Attribute (it's not IN DefaultDefaults; it's the SIZE of it)!
   __NUM_DEFAULT_ATTRIBUTES
diff --git a/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java b/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
index fe116d7..ff78d3d 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HDFSClient.java
@@ -21,25 +21,31 @@
 
 package org.trafodion.sql;
 
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.io.EOFException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.Logger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.conf.Configuration;
-import java.nio.ByteBuffer;
-import java.io.IOException;
 import java.io.EOFException;
-import java.io.OutputStream;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Future;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.CompressionCodec;
@@ -48,6 +54,17 @@
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.util.ReflectionUtils;
 
+//
+//  To read a range in a Hdfs file, use the constructor
+//   public HDFSClient(int bufNo, int rangeNo, String filename, ByteBuffer buffer, long position, int length) throws IOException
+// 
+//  For instance methods like hdfsListDirectory use the constructor
+//     public HDFSClient()
+//
+//  For all static methods use
+//     HdfsClient::<static_method_name>
+//
+
 public class HDFSClient 
 {
    static Logger logger_ = Logger.getLogger(HDFSClient.class.getName());
@@ -70,6 +87,9 @@
    private int bytesRead_;
    private Future future_ = null;
    private int isEOF_ = 0; 
+   private int totalBytesWritten_ = 0;
+   private Path filepath_ = null;
+   private boolean compression_;
    static {
       String confFile = System.getProperty("trafodion.log4j.configFile");
       System.setProperty("trafodion.root", System.getenv("TRAF_HOME"));
@@ -88,6 +108,13 @@
       System.loadLibrary("executor");
    }
 
+   // The object instance that runs in the threadpool to read
+   // the requested chunk in the range
+
+   // FSDataInputStream.read method may not read the requested length in one shot
+   // Loop to read the requested length or EOF is reached 
+   // Requested length can never be larger than the buffer size
+
    class HDFSRead implements Callable 
    {
       HDFSRead() 
@@ -135,15 +162,19 @@
 
    // This constructor enables the hdfs data to be read in another thread while the previously 
    // read buffer is being processed by the SQL engine 
+   // Opens the file and hands over the needed info to HdfsRead instance to read 
+   // The passed in length can never be more than the size of the buffer
+   // If the range has a length more than the buffer length, the range is chunked
+   // in HdfsScan
    public HDFSClient(int bufNo, int rangeNo, String filename, ByteBuffer buffer, long position, int length) throws IOException
    {
       bufNo_ = bufNo; 
       rangeNo_ = rangeNo;
       filename_ = filename;
-      Path filepath = new Path(filename_);
-      fs_ = FileSystem.get(filepath.toUri(),config_);
-      fsdis_ = fs_.open(filepath);
-      blockSize_ = (int)fs_.getDefaultBlockSize(filepath);
+      filepath_ = new Path(filename_);
+      fs_ = FileSystem.get(filepath_.toUri(),config_);
+      fsdis_ = fs_.open(filepath_);
+      blockSize_ = (int)fs_.getDefaultBlockSize(filepath_);
       buf_  = buffer;
       bufOffset_ = 0;
       pos_ = position;
@@ -160,6 +191,10 @@
       }
    }
 
+  //  This method waits for the read to complete. Read can complete due to one of the following
+  //  a) buffer is full
+  //  b) EOF is reached
+  //  c) An exception is encountered while reading the file
    public int trafHdfsReadBuffer() throws IOException, InterruptedException, ExecutionException
    {
       Integer retObject = 0;
@@ -167,8 +202,9 @@
       retObject = (Integer)future_.get();
       bytesRead = retObject.intValue();
       fsdis_.close();
+      fsdis_ = null;
       return bytesRead;
-   }
+   }  
 
    public int getRangeNo()
    {
@@ -180,78 +216,127 @@
       return isEOF_;
    }
 
-   boolean hdfsCreate(String fname , boolean compress) throws IOException
+   boolean hdfsCreate(String fname , boolean overwrite, boolean compress) throws IOException
    {
-     if (logger_.isDebugEnabled()) 
+      if (logger_.isDebugEnabled()) 
         logger_.debug("HDFSClient.hdfsCreate() - started" );
-      Path filePath = null;
       if (!compress || (compress && fname.endsWith(".gz")))
-        filePath = new Path(fname);
+        filepath_ = new Path(fname);
       else
-        filePath = new Path(fname + ".gz");
+        filepath_ = new Path(fname + ".gz");
         
-      FileSystem fs = FileSystem.get(filePath.toUri(),config_);
-      FSDataOutputStream fsOut = fs.create(filePath, true);
-      
-      if (compress) {
-        GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, config_);
-        Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
-        outStream_= gzipCodec.createOutputStream(fsOut, gzipCompressor);
+      fs_ = FileSystem.get(filepath_.toUri(),config_);
+      compression_ = compress;
+      fsdis_ = null;      
+      FSDataOutputStream fsOut;
+      if (overwrite)
+         fsOut = fs_.create(filepath_);
+      else
+      if (fs_.exists(filepath_))
+         fsOut = fs_.append(filepath_);
+      else
+         fsOut = fs_.create(filepath_);
+
+      if (compression_) {
+          GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, config_);
+          Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
+          outStream_= gzipCodec.createOutputStream(fsOut, gzipCompressor);
       }
       else
-        outStream_ = fsOut;      
-      if (logger_.isDebugEnabled()) 
-         logger_.debug("HDFSClient.hdfsCreate() - compressed output stream created" );
+         outStream_ = fsOut;
       return true;
-    }
+   }
 
-    boolean hdfsOpen(String fname , boolean compress) throws IOException
-    {
+   boolean hdfsOpen(String fname , boolean compress) throws IOException
+   {
       if (logger_.isDebugEnabled()) 
          logger_.debug("HDFSClient.hdfsOpen() - started" );
-      Path filePath = null;
       if (!compress || (compress && fname.endsWith(".gz")))
-        filePath = new Path(fname);
+        filepath_ = new Path(fname);
       else
-        filePath = new Path(fname + ".gz");
-        
-      FileSystem fs = FileSystem.get(filePath.toUri(),config_);
-      FSDataOutputStream fsOut;
-      if (fs.exists(filePath))
-         fsOut = fs.append(filePath);
-      else
-         fsOut = fs.create(filePath);
-      
-      if (compress) {
-        GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, config_);
-        Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
-        outStream_= gzipCodec.createOutputStream(fsOut, gzipCompressor);
-      }
-      else
-        outStream_ = fsOut;      
-      if (logger_.isDebugEnabled()) 
-         logger_.debug("HDFSClient.hdfsCreate() - compressed output stream created" );
+        filepath_ = new Path(fname + ".gz");
+      fs_ = FileSystem.get(filepath_.toUri(),config_);
+      compression_ = compress;  
+      outStream_ = null;
+      fsdis_ = null;      
       return true;
     }
     
-    boolean hdfsWrite(byte[] buff, long len) throws IOException
+    int hdfsWrite(byte[] buff) throws IOException
     {
-
       if (logger_.isDebugEnabled()) 
          logger_.debug("HDFSClient.hdfsWrite() - started" );
+
+      FSDataOutputStream fsOut;
+      if (outStream_ == null) {
+         if (fs_.exists(filepath_))
+            fsOut = fs_.append(filepath_);
+         else
+            fsOut = fs_.create(filepath_);
+      
+         if (compression_) {
+            GzipCodec gzipCodec = (GzipCodec) ReflectionUtils.newInstance( GzipCodec.class, config_);
+            Compressor gzipCompressor = CodecPool.getCompressor(gzipCodec);
+            outStream_= gzipCodec.createOutputStream(fsOut, gzipCompressor);
+         }
+         else
+            outStream_ = fsOut;      
+         if (logger_.isDebugEnabled()) 
+            logger_.debug("HDFSClient.hdfsWrite() - output stream created" );
+      }
       outStream_.write(buff);
-      outStream_.flush();
-      if (logger_.isDebugEnabled()) logger_.debug("HDFSClient.hdfsWrite() - bytes written and flushed:" + len  );
-      return true;
+      if (outStream_ instanceof FSDataOutputStream)
+         totalBytesWritten_ = ((FSDataOutputStream)outStream_).size();
+      else
+         totalBytesWritten_ += buff.length; 
+      if (logger_.isDebugEnabled()) 
+         logger_.debug("HDFSClient.hdfsWrite() - bytes written " + totalBytesWritten_ );
+      return totalBytesWritten_;
+    }
+
+    int hdfsRead(ByteBuffer buffer) throws IOException
+    {
+      if (logger_.isDebugEnabled()) 
+         logger_.debug("HDFSClient.hdfsRead() - started" );
+      if (fsdis_ == null) {
+         fsdis_ = fs_.open(filepath_);
+         pos_ = 0;
+      }
+      int lenRemain;   
+      int bytesRead;
+      int totalBytesRead = 0;
+      int bufLen;
+      int bufOffset = 0;
+      if (buffer.hasArray())
+         bufLen = buffer.array().length;
+      else
+         bufLen = buffer.capacity();
+      lenRemain = bufLen;
+      do
+      {
+         if (buffer.hasArray())
+            bytesRead = fsdis_.read(pos_, buffer.array(), bufOffset, lenRemain);
+         else
+            bytesRead = fsdis_.read(buffer);    
+         if (bytesRead == -1 || bytesRead == 0)
+            break;    
+         totalBytesRead += bytesRead;
+         pos_ += bytesRead;
+         lenRemain -= bytesRead;
+      } while (lenRemain > 0);
+      return totalBytesRead;
     }
     
     boolean hdfsClose() throws IOException
     {
       if (logger_.isDebugEnabled()) logger_.debug("HDFSClient.hdfsClose() - started" );
       if (outStream_ != null) {
+          outStream_.flush();
           outStream_.close();
           outStream_ = null;
       }
+      if (fsdis_ != null)
+         fsdis_.close();
       return true;
     }
 
@@ -379,6 +464,25 @@
       else  
          return 0;
    }
+
+
+   public void stop() throws IOException
+   {
+      if (future_ != null) {
+         try {
+           future_.get(200, TimeUnit.MILLISECONDS);
+         } catch(TimeoutException e) {
+            logger_.error("Asynchronous Thread of HdfsScan is Cancelled (timeout), ", e);
+            future_.cancel(true);
+        } catch(InterruptedException e) {
+            logger_.error("Asynchronous Thread of HdfsScan is Cancelled (interrupt), ", e);
+            future_.cancel(true); // Interrupt the thread
+        } catch (ExecutionException ee)
+        {
+        }
+        future_ = null;
+      }
+   }
  
    public static void shutdown() throws InterruptedException
    {
@@ -386,9 +490,108 @@
       executorService_.shutdown();
    }
    
+   private static FileSystem getFileSystem() throws IOException
+   {
+       return defaultFs_;
+   }
+
+   // if levelDeep = 0, return the max modification timestamp of the passed-in HDFS URIs
+   // (a tab-separated list of 0 or more paths)
+   // if levelDeep > 0, also check all directories "levelDeep" levels below. Exclude
+   // directories that start with a dot (hidden directories)
+   public static long getHiveTableMaxModificationTs( String stableDirPaths, int levelDeep) throws FileNotFoundException, IOException
+   {
+       long result = 0;
+       if (logger_.isDebugEnabled())
+          logger_.debug("HDFSClient:getHiveTableMaxModificationTs enter");
+
+       String[] tableDirPaths = stableDirPaths.split("\t");
+       // account for root dir
+       for (int i=0; i<tableDirPaths.length; i++) {
+           FileStatus r = getFileSystem().getFileStatus(new Path(tableDirPaths[i]));// super fast API, return in .2ms
+           if (r != null && r.getModificationTime() > result)
+               result = r.getModificationTime();
+       }
+
+       if (levelDeep>0)
+       {
+           Path[] paths = new Path[tableDirPaths.length];
+           for (int i=0; i<tableDirPaths.length; i++)
+               paths[i] = new Path(tableDirPaths[i]);
+           long l = getHiveTableMaxModificationTs2(paths,levelDeep);
+           if (l > result)
+              result = l;
+       }
+       if (logger_.isDebugEnabled())
+           logger_.debug("HDFSClient:getHiveTableMaxModificationTs "+stableDirPaths+" levelDeep"+levelDeep+":"+result);
+       return result;
+   }
+
+   private static long getHiveTableMaxModificationTs2(Path[] paths, int levelDeep)throws FileNotFoundException, IOException
+   {
+       long result = 0;
+       PathFilter filter = new PathFilter(){
+           public boolean accept(Path file){
+             return !file.getName().startsWith(".");//filter out hidden files and directories
+           }
+       };
+       FileStatus[] fileStatuss=null;
+       if (levelDeep == 1){ // stop condition on recursive function
+           //check parent level (important for deletes):
+           for (Path path : paths){
+               FileStatus r = getFileSystem().getFileStatus(path);// super fast API, return in .2ms
+               if (r != null && r.getModificationTime()>result)
+                   result = r.getModificationTime();
+           }
+           if (paths.length==1)
+               fileStatuss = getFileSystem().listStatus(paths[0],filter);// minor optimization. avoid using list based API when not needed
+           else
+               fileStatuss = getFileSystem().listStatus(paths,filter);
+           for(int i=0;i<fileStatuss.length;i++)
+               if (fileStatuss[i].isDirectory() && fileStatuss[i].getModificationTime()>result)
+                   result = fileStatuss[i].getModificationTime();
+       }else{//here levelDeep >1
+           List<Path> pathList = new ArrayList<Path>();
+           if (paths.length==1)
+               fileStatuss = getFileSystem().listStatus(paths[0],filter);// minor optimization. avoid using list based API when not needed
+           else
+               fileStatuss = getFileSystem().listStatus(paths,filter);
+           for(int i=0;i<fileStatuss.length;i++)
+               if (fileStatuss[i].isDirectory())
+               {
+                   pathList.add(fileStatuss[i].getPath());
+                   if (fileStatuss[i].getModificationTime()>result)
+                       result = fileStatuss[i].getModificationTime();// make sure level n-1 is accounted for for delete partition case
+               }
+           long l = getHiveTableMaxModificationTs2(pathList.toArray(new Path[pathList.size()]),levelDeep-1);
+           if (l>result) result = l;
+
+       }
+     return result;
+   }
+
+   public static String getFsDefaultName()
+   {
+      String uri = config_.get("fs.defaultFS");
+      return uri;
+   }
+
+
+   public static boolean hdfsCreateDirectory(String pathStr) throws IOException
+   {
+      if (logger_.isDebugEnabled()) 
+         logger_.debug("HDFSClient.hdfsCreateDirectory()" + pathStr);
+      Path dirPath = new Path(pathStr );
+      FileSystem fs = FileSystem.get(dirPath.toUri(), config_);
+      fs.mkdirs(dirPath);
+      return true;
+   }
+
    private native int sendFileStatus(long jniObj, int numFiles, int fileNo, boolean isDir, 
                         String filename, long modTime, long len,
                         short numReplicas, long blockSize, String owner, String group,
                         short permissions, long accessTime);
 
 }
+
+
diff --git a/core/sql/src/main/java/org/trafodion/sql/HdfsScan.java b/core/sql/src/main/java/org/trafodion/sql/HdfsScan.java
index e216555..99f021d 100644
--- a/core/sql/src/main/java/org/trafodion/sql/HdfsScan.java
+++ b/core/sql/src/main/java/org/trafodion/sql/HdfsScan.java
@@ -21,17 +21,6 @@
 
 package org.trafodion.sql;
 
-// This class implements an efficient mechanism to read hdfs files
-// Trafodion ExHdfsScan operator provides a range of scans to be performed.
-// The range consists of a hdfs filename, offset and length to be read
-// This class takes in two ByteBuffers. These ByteBuffer can be either direct buffers
-// backed up native buffers or indirect buffer backed by java arrays.
-// All the ranges are read alternating between the two buffers using ExecutorService
-// using CachedThreadPool mechanism. 
-// For a given HdfsScan instance, only one thread(IO thread) is scheduled to read
-// the next full or partial buffer while the main thread processes the previously
-// read information from the other buffer
-
 import org.apache.log4j.PropertyConfigurator;
 import org.apache.log4j.Logger;
 import org.apache.hadoop.fs.FileSystem;
@@ -54,6 +43,24 @@
 import org.apache.hadoop.fs.FileStatus;
 import java.net.URI;
 
+// This class implements an efficient mechanism to read hdfs files
+// Trafodion ExHdfsScan operator provides a range of scans to be performed.
+// The range consists of a hdfs filename, offset and length to be read
+// This class takes in two ByteBuffers. These ByteBuffer can be either direct buffers
+// backed up native buffers or indirect buffer backed by java arrays.
+// All the ranges are read alternating between the two buffers using ExecutorService
+// using CachedThreadPool mechanism.
+
+// For a given HdfsScan instance, only one thread(IO thread) is scheduled to read
+// the next full or partial buffer while the main thread processes the previously
+// read information from the other buffer
+// HdfsScan picks up a range and schedules a read into a next available buffer.
+// If the range is more than the buffer size, then the range is read into multiple
+// chunks and schedules one chunk at a time alternatiing the buffers
+// Once the range is completed, the next range in the HdfsScanRange is picked up
+// for scheduling, till all the ranges assigned to the HdfsScan instance are read.
+
+
 public class HdfsScan 
 {
    static Logger logger_ = Logger.getLogger(HdfsScan.class.getName());
@@ -61,10 +68,13 @@
    private int bufLen_[];
    private HDFSClient hdfsClient_[];
    private int currRange_;
-   private long currPos_;
-   private long lenRemain_;
+   private long currRangePos_;
+   private long currRangeLenRemain_;
    private int lastBufCompleted_ = -1;
    private boolean scanCompleted_;
+ 
+   // Structure to hold the Scan ranges for this HdfsScan instance
+   //
    
    class HdfsScanRange 
    {
@@ -95,6 +105,7 @@
 
    public void setScanRanges(ByteBuffer buf1, ByteBuffer buf2, String filename[], long pos[], long len[], int rangeNum[]) throws IOException
    {
+      // Two buffers to hold the data read
       buf_ = new ByteBuffer[2];
       bufLen_ = new int[2];
 
@@ -114,39 +125,47 @@
       }
       if (hdfsScanRanges_.length > 0) {
          currRange_ = 0;
-         currPos_ = hdfsScanRanges_[currRange_].pos_;
-         lenRemain_ = hdfsScanRanges_[currRange_].len_; 
-         hdfsScanRange(0, 0);
+         currRangePos_ = hdfsScanRanges_[currRange_].pos_;
+         currRangeLenRemain_ = hdfsScanRanges_[currRange_].len_; 
+         scheduleHdfsScanRange(0, 0);
       }
       scanCompleted_ = false;
    }
 
-   public void hdfsScanRange(int bufNo, int bytesCompleted) throws IOException
+   public void scheduleHdfsScanRange(int bufNo, int bytesCompleted) throws IOException
    {
-      lenRemain_ -= bytesCompleted;
-      currPos_ += bytesCompleted; 
+      currRangeLenRemain_ -= bytesCompleted;
+      currRangePos_ += bytesCompleted; 
       int readLength;
-      if (lenRemain_ <= 0) {
+      if (currRangeLenRemain_ <= 0) {
          if (currRange_  == (hdfsScanRanges_.length-1)) {
             scanCompleted_ = true;
             return;
          }
          else {
             currRange_++;
-            currPos_ = hdfsScanRanges_[currRange_].pos_;
-            lenRemain_ = hdfsScanRanges_[currRange_].len_; 
+            currRangePos_ = hdfsScanRanges_[currRange_].pos_;
+            currRangeLenRemain_ = hdfsScanRanges_[currRange_].len_; 
          }
       } 
-      if (lenRemain_ > bufLen_[bufNo])
+      if (currRangeLenRemain_ > bufLen_[bufNo])
          readLength = bufLen_[bufNo];
       else
-         readLength = (int)lenRemain_;
+         readLength = (int)currRangeLenRemain_;
       if (! scanCompleted_) {
          if (logger_.isDebugEnabled())
-            logger_.debug(" CurrentRange " + hdfsScanRanges_[currRange_].tdbRangeNum_ + " LenRemain " + lenRemain_ + " BufNo " + bufNo); 
-         hdfsClient_[bufNo] = new HDFSClient(bufNo, hdfsScanRanges_[currRange_].tdbRangeNum_, hdfsScanRanges_[currRange_].filename_, buf_[bufNo], currPos_, readLength);
+            logger_.debug(" CurrentRange " + hdfsScanRanges_[currRange_].tdbRangeNum_ + " LenRemain " + currRangeLenRemain_ + " BufNo " + bufNo); 
+         hdfsClient_[bufNo] = new HDFSClient(bufNo, hdfsScanRanges_[currRange_].tdbRangeNum_, hdfsScanRanges_[currRange_].filename_, buf_[bufNo], currRangePos_, readLength);
       }
    } 
+  
+/* 
+   Method to wait for completion of the scheduled read of a chunk in a range
+   Returns 4 items, bytes read, buf no, range no, is EOF
+   If there are more chunks to be read in the range, schedules a read into the other buffer
+   If EOF is reached or the full range is read, the next range is picked up for 
+   scheduling
+*/
    
    public int[] trafHdfsRead() throws IOException, InterruptedException, ExecutionException
    {
@@ -164,12 +183,14 @@
       switch (lastBufCompleted_) {
          case -1:
          case 1:
+            // Wait for the read to complete in buffer 0
             bytesRead = hdfsClient_[0].trafHdfsReadBuffer(); 
             bufNo = 0;
             rangeNo = hdfsClient_[0].getRangeNo();
             isEOF = hdfsClient_[0].isEOF();
             break;
          case 0:
+            // Wait for the read to complete in buffer 1
             bytesRead = hdfsClient_[1].trafHdfsReadBuffer(); 
             bufNo = 1;
             rangeNo = hdfsClient_[1].getRangeNo();
@@ -194,18 +215,20 @@
             return retArray;
          } else {
             currRange_++;
-            currPos_ = hdfsScanRanges_[currRange_].pos_;
-            lenRemain_ = hdfsScanRanges_[currRange_].len_;
+            currRangePos_ = hdfsScanRanges_[currRange_].pos_;
+            currRangeLenRemain_ = hdfsScanRanges_[currRange_].len_;
             bytesRead = 0;
          }
       }
       switch (lastBufCompleted_)
       {
          case 0:
-            hdfsScanRange(1, bytesRead);
+            // schedule the next chunk or next range to be read in buffer 1
+            scheduleHdfsScanRange(1, bytesRead);
             break;
          case 1:
-            hdfsScanRange(0, bytesRead);
+            // schedule the next chunk or next range to be read in buffer 0
+            scheduleHdfsScanRange(0, bytesRead);
             break;            
          default:
             break;
@@ -213,10 +236,22 @@
       return retArray;
    } 
    
+   public void stop() throws IOException
+   {
+      if (hdfsClient_[0] != null)
+         hdfsClient_[0].stop();
+      if (hdfsClient_[1] != null)
+         hdfsClient_[1].stop();
+      hdfsClient_[0] = null;
+      hdfsClient_[1] = null;
+      return;
+   }
+
    public static void shutdown() throws InterruptedException
    {
       HDFSClient.shutdown();
    }
+
    public static void main(String[] args) throws Exception
    {
 
diff --git a/core/sql/src/main/java/org/trafodion/sql/udr/LmT2Driver.java b/core/sql/src/main/java/org/trafodion/sql/udr/LmT2Driver.java
index 4130b09..12ccb1b 100644
--- a/core/sql/src/main/java/org/trafodion/sql/udr/LmT2Driver.java
+++ b/core/sql/src/main/java/org/trafodion/sql/udr/LmT2Driver.java
@@ -195,6 +195,18 @@
     return false;
   }
 
+  // similar to acceptsURL, but a static method that checks for substrings
+  // acceptsURL is used by the DriverManager, this static method is used
+  // internally by Trafodion (JDBC TMUDF)
+  public static boolean checkURL( String url ) 
+  {
+    for (int i = 0; i < acceptedURLs.length; i++)
+      if (url.indexOf(acceptedURLs[i]) >= 0)
+        return true;
+
+    return false;
+  }
+
   public Connection connect( String url, java.util.Properties props )
       throws SQLException
   {
diff --git a/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java b/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
index 0c6936b..f51568d 100644
--- a/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
+++ b/core/sql/src/main/java/org/trafodion/sql/udr/predef/JDBCUDR.java
@@ -17,6 +17,22 @@
 under the License.
 **********************************************************************/
 
+//*************************************************
+//
+//   ##    ##   ######   ########  ########
+//   ###   ##  ########  ########  ########
+//   ####  ##  ##    ##     ##     ##
+//   ## ## ##  ##    ##     ##     #####
+//   ##  ####  ##    ##     ##     ##
+//   ##   ###  ########     ##     ########
+//   ##    ##   ######      ##     ########
+//
+//**************************************************
+//
+// This file is deprecated. Please update file
+// core/sql/lib_mgmt/src/main/java/org/trafodion/sql/libmgmt/JDBCUDR.java
+// instead!!
+
 /***************************************************
  * A TMUDF that executes a generic JDBC query
  * and returns the result of the one SQL statement
@@ -114,6 +130,20 @@
               driverJarPath = LmUtility.getExternalLibsDirForUser(null).resolve(
                     driverJarPath);
 
+            // for security reasons we also reject the Trafodion T2
+            // driver (check both class name and URL)
+            if (driverClassName_.equals("org.apache.trafodion.jdbc.t2.T2Driver"))
+                throw new UDRException(
+                    38012,
+                    "This UDF does not support the Trafodion T2 driver class %s",
+                    driverClassName_);
+
+            if (LmT2Driver.checkURL(connectionString_))
+                throw new UDRException(
+                    38013,
+                    "This UDF does not support the Trafodion T2 driver URL %s",
+                    connectionString_);
+ 
             // Create a class loader that can access the jar file
             // specified by the caller. Note that this is only needed
             // because the JDBC UDR is a predefined UDR and is loaded
@@ -139,8 +169,9 @@
           catch (ClassNotFoundException cnf) {
               throw new UDRException(
                 38020,
-                "JDBC driver class %s not found. Please make sure the JDBC driver jar is stored in %s. Message: %s",
+                "JDBC driver class %s not found. Please make sure the JDBC driver jar %s is stored in %s. Message: %s",
                 driverClassName_,
+                driverJar_,
                 LmUtility.getSandboxRootForUser(null).toString(),
                 cnf.getMessage());
           }
diff --git a/core/sql/ustat/hs_globals.cpp b/core/sql/ustat/hs_globals.cpp
index c183b01..7267862 100644
--- a/core/sql/ustat/hs_globals.cpp
+++ b/core/sql/ustat/hs_globals.cpp
@@ -5052,7 +5052,7 @@
           }
         else
           {
-            sprintf(sbuf, "%d", col.precision+2);
+            sprintf(sbuf, "%d,0", col.precision+2); // for seconds cast below
             typeName = getIntTypeForInterval(group, 60 * (Int64)pow(10, col.precision));
           }
         group->ISSelectExpn.append("cast(cast(")
@@ -5076,7 +5076,7 @@
           }
         else
           {
-            sprintf(sbuf, "%d", col.precision+4);
+            sprintf(sbuf, "%d,0", col.precision+4); // for seconds cast below
             typeName = getIntTypeForInterval(group, 60 * 60 * (Int64)pow(10, col.precision));
           }
         group->ISSelectExpn.append("cast(cast(")
@@ -5100,7 +5100,7 @@
           }
         else
           {
-            sprintf(sbuf, "%d", col.precision+5);
+            sprintf(sbuf, "%d,0", col.precision+5); // for seconds cast below
             typeName = getIntTypeForInterval(group, 24 * 60 * 60 * (Int64)pow(10, col.precision));
           }
         group->ISSelectExpn.append("cast(cast(")
@@ -11632,7 +11632,12 @@
          memReduceAllowance();
          break;
        }
-
+     //trafodion-2978
+     //group->mcis_memFreed may be set TRUE in HSColGroupStruct::freeISMemory
+     //so if allocate memory success,set group->mcis_memFreed to FALSE agin.
+     if(group->mcis_memFreed)
+         group->mcis_memFreed = FALSE;
+     //trafodion-2978
      group->nextData = group->data;
      group->mcis_nextData = group->mcis_data;
      numCols++;
diff --git a/dcs/bin/dcs-config.sh b/dcs/bin/dcs-config.sh
index b823807..236cd95 100755
--- a/dcs/bin/dcs-config.sh
+++ b/dcs/bin/dcs-config.sh
@@ -48,6 +48,7 @@
   export DCS_HOME=`dirname "$this"`/..
 fi
 
+foreground="false"
 #check to see if the conf dir or dcs home are given as an optional arguments
 while [ $# -gt 1 ]
 do
@@ -63,6 +64,10 @@
     hosts=$1
     shift
     DCS_SERVERS=$hosts
+  elif [ "--foreground" = "$1" ]
+  then
+    shift
+    foreground="true"
   else
     # Presume we are at end of options and break
     break
@@ -73,10 +78,8 @@
 DCS_CONF_DIR="${DCS_CONF_DIR:-$DCS_HOME/conf}"
 # List of DCS servers.
 DCS_SERVERS="${DCS_SERVERS:-$DCS_CONF_DIR/servers}"
-# DCS primary master.
-DCS_PRIMARY_MASTER="${DCS_PRIMARY_MASTER:-$DCS_CONF_DIR/master}"
-# List of DCS secondary masters.
-DCS_BACKUP_MASTERS="${DCS_BACKUP_MASTERS:-$DCS_CONF_DIR/backup-masters}"
+#List of DCS masters
+DCS_MASTERS="${DCS_MASTERS:-$DCS_CONF_DIR/masters}"
 
 # Source the dcs-env.sh.  Will have JAVA_HOME defined.
 if [ -f "${DCS_CONF_DIR}/dcs-env.sh" ]; then
@@ -87,6 +90,9 @@
 if [ -f "${TRAF_HOME}/sqenv.sh" ]; then
   savedir=`pwd`
   cd $TRAF_HOME
+  if [[ -f /etc/trafodion/trafodion_config ]]; then
+     . /etc/trafodion/trafodion_config
+  fi
   . ./sqenv.sh
   cd $savedir
 fi
diff --git a/dcs/bin/dcs-daemon.sh b/dcs/bin/dcs-daemon.sh
index a331c3e..36f9650 100755
--- a/dcs/bin/dcs-daemon.sh
+++ b/dcs/bin/dcs-daemon.sh
@@ -32,7 +32,7 @@
 #   DCS_NICENESS The scheduling priority for daemons. Defaults to 0.
 #
 
-usage="Usage: dcs-daemon.sh [--config <conf-dir>]\
+usage="Usage: dcs-daemon.sh [--foreground] [--config <conf-dir>]\
  (start|stop|restart) <dcs-command> \
  <args...>"
 
@@ -105,14 +105,16 @@
   DCS_PID_DIR="$DCS_HOME/tmp"
 fi
 
-#if [ "$DCS_IDENT_STRING" = "" ]; then
+#DCS_IDENT_STRING can be set in environment to uniquely identify dcs instances
+if [ $command == "master" ] || [ $command == "master-backup" ]; then
+  export DCS_IDENT_STRING="$USER"
+else
   export DCS_IDENT_STRING="$USER-$instance"
-#fi
+fi
 
 # Some variables
 # Work out java location so can print version into log.
 if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
   JAVA_HOME=$JAVA_HOME
 fi
 if [ "$JAVA_HOME" = "" ]; then
@@ -128,6 +130,7 @@
 loggc=$DCS_LOG_DIR/$DCS_LOG_PREFIX.gc
 loglog="${DCS_LOG_DIR}/${DCS_LOGFILE}"
 pid=$DCS_PID_DIR/dcs-$DCS_IDENT_STRING-$command.pid
+stopmode=$DCS_PID_DIR/dcs-server-stop
 
 if [ "$DCS_USE_GC_LOGFILE" = "true" ]; then
   export DCS_GC_OPTS=" -Xloggc:${loggc}"
@@ -138,13 +141,25 @@
     export DCS_NICENESS=0
 fi
 
+if [[ $startStop == 'conditional-start' ]]
+then
+  if [[ -f $stopmode ]]
+  then
+    echo "Server stopped intentionally, no restart"
+    exit 5
+  else
+    startStop=start
+  fi
+fi
+
 case $startStop in
 
   (start)
+    rm -f $stopmode # leaving stop-mode
     mkdir -p "$DCS_PID_DIR"
     if [ -f $pid ]; then
       if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo $command  `cat $pid`.  Stop it first.
+        echo $command running as process `cat $pid`.  Stop it first.
         exit -2
       fi
     fi
@@ -155,14 +170,27 @@
     # Add to the command log file vital stats on our environment.
     # echo "`date` Starting $command on `hostname`" >> $loglog
     # echo "`ulimit -a`" >> $loglog 2>&1
-    nohup nice -n $DCS_NICENESS "$DCS_HOME"/bin/dcs \
-        --config "${DCS_CONF_DIR}" \
-        $command "$@" $startStop > "$logout" 2>&1 < /dev/null &
-    echo $! > $pid
-    sleep 1; head "$logout"
+    if [[ $foreground == "true" ]]
+    then
+      renice -n $DCS_NICENESS $$
+      echo $$ > $pid
+      exec > "$logout" 2>&1 < /dev/null
+      exec "$DCS_HOME"/bin/dcs \
+             --config "${DCS_CONF_DIR}" \
+             $command "$@" $startStop
+      echo "Error: exec failed"
+      exit 1
+    else
+      nohup nice -n $DCS_NICENESS "$DCS_HOME"/bin/dcs \
+          --config "${DCS_CONF_DIR}" \
+          $command "$@" $startStop > "$logout" 2>&1 < /dev/null &
+      echo $! > $pid
+      sleep 1; head "$logout"
+    fi
     ;;
 
   (stop)
+    touch $stopmode # entering stop-mode
     if [ -f $pid ]; then
       # kill -0 == see if the PID exists 
       if kill -0 `cat $pid` > /dev/null 2>&1; then
diff --git a/dcs/bin/getActiveMaster.sh b/dcs/bin/getActiveMaster.sh
new file mode 100755
index 0000000..af969d4
--- /dev/null
+++ b/dcs/bin/getActiveMaster.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+#/**
+# @@@ START COPYRIGHT @@@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# @@@ END COPYRIGHT @@@
+# */
+
+# Get the activeMaster hostname
+
+setup_sqpdsh
+A_PDSH=$SQPDSHA
+
+function getActiveMaster {
+
+   tmpdcsconfig=`mktemp -t`
+   if [[ $? != 0 ]]; then
+     echo "Error while getting a temporary file for tmpdcsconfig. Exiting."
+     exit 3
+   fi
+
+   python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py > $tmpdcsconfig
+   masterport=`cat $tmpdcsconfig |grep "^dcs.master.port:"| cut -f2 -d":"`
+  
+   if [[ ! -z $CLUSTERNAME ]]; then
+    if [[ $ENABLE_HA == "true" ]]; then
+
+      floatip_interface=`cat $tmpdcsconfig |grep "^dcs.master.floating.ip.external.interface:"| cut -f2 -d":"`
+      keepalived=`cat $tmpdcsconfig |grep "^dcs.master.keepalived:"| cut -f2 -d":"`
+
+      if [[ $floatip_interface == "default" ]]; then
+           floatip_interface=`/sbin/route |grep "0.0.0.0" |awk '{print $8}'`
+      fi
+
+      if ! grep -q ^ec2 /sys/hypervisor/uuid 2>/dev/null ; then
+          # Non-AWS system
+          interface_to_use=$floatip_interface":"$masterport
+      else
+          interface_to_use=$floatip_interface
+      fi
+
+      if [[ ${keepalived} != "true" ]]; then
+           activeMaster=`$A_PDSH /sbin/ip addr show |grep $interface_to_use$ |cut -d':' -f1`
+      else
+           activeMaster=`$A_PDSH /sbin/ifconfig |grep $interface_to_use |cut -d':' -f1`
+      fi
+    else
+      tmpnetstat=`$A_PDSH /bin/netstat -antp 2>/dev/null |grep -w :$masterport`
+      tmpcurrentMaster=`echo $tmpnetstat |cut -f1 -d":" |awk '{print $1}'`
+      if [[ ${tmpcurrentMaster} == "tcp" ]]; then
+         activeMaster=`hostname -f`
+      else
+         activeMaster=$tmpcurrentMaster
+      fi
+    fi
+   else
+    activeMaster=localhost
+   fi
+
+   rm -f $tmpdcsconfig
+   echo $activeMaster
+}
+
+getActiveMaster
diff --git a/dcs/bin/master-backup.sh b/dcs/bin/master-backup.sh
index ee396d5..dc46be3 100755
--- a/dcs/bin/master-backup.sh
+++ b/dcs/bin/master-backup.sh
@@ -25,8 +25,8 @@
 #
 # Environment Variables
 #
-#   DCS_BACKUP_MASTERS File naming remote hosts.
-#     Default is ${DCS_CONF_DIR}/backup-masters
+#   DCS_MASTERS File for specifying all DcsMaster hosts
+#     Default is ${DCS_CONF_DIR}/masters
 #   DCS_CONF_DIR  Alternate Dcs conf dir. Default is ${DCS_HOME}/conf.
 #   DCS_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
 #   DCS_SSH_OPTS Options passed to ssh when running remote commands.
@@ -46,47 +46,35 @@
 
 . "$bin"/dcs-config.sh
 
-# If the master backup file is specified in the command line,
-# then it takes precedence over the definition in 
-# dcs-env.sh. Save it here.
-HOSTLIST=$DCS_BACKUP_MASTERS
-
-if [ "$HOSTLIST" = "" ]; then
-  if [ "$DCS_BACKUP_MASTERS" = "" ]; then
-    export HOSTLIST="${DCS_CONF_DIR}/backup-masters"
-  else
-    export HOSTLIST="${DCS_BACKUP_MASTERS}"
-  fi
-fi
-
+activeMaster=$($DCS_INSTALL_DIR/bin/getActiveMaster.sh)
 
 args=${@// /\\ }
 args=${args/master-backup/master}
 
 instance=2
 
-if [ -f $HOSTLIST ]; then
+if [ -f ${DCS_INSTALL_DIR}/conf/masters ]; then
   while read master
   do
-    if [ "$master" == "localhost" ] || [ "$master" == "$HOSTNAME" ] ; then
-      eval $"$args $instance" 2>&1 | sed "s/^/$master: /" &
+    if [[ ! -z $activeMaster && "$master" =~ $activeMaster ]]; then
+      echo "$activeMaster is the current active DcsMaster"
     else
+      L_PDSH="ssh -q -n $DCS_SSH_OPTS"
       if ${DCS_SLAVE_PARALLEL:-true}; then
-        ssh -q -n $DCS_SSH_OPTS $master $"$args $instance"\
+        ${L_PDSH} $master $"$args $instance"\
           2>&1 | sed "s/^/$master: /" &
       else # run each command serially
-        ssh -q -n $DCS_SSH_OPTS $master $"$args $instance" \
-          2>&1 | sed "s/^/$master: /" &
+        ${L_PDSH} $master $"$args $instance" \
+          2>&1 | sed "s/^/$master: /" 
       fi
-    fi
-  
+    fi 
     if [ "$DCS_SLAVE_SLEEP" != "" ]; then
       sleep $DCS_SLAVE_SLEEP
     fi
   
     let instance++
 
-  done < "$HOSTLIST"
+  done < "${DCS_INSTALL_DIR}/conf/masters"
 fi
 
 wait
diff --git a/dcs/bin/scripts/dcsbind.sh b/dcs/bin/scripts/dcsbind.sh
index d791034..fb0eeb4 100755
--- a/dcs/bin/scripts/dcsbind.sh
+++ b/dcs/bin/scripts/dcsbind.sh
@@ -95,10 +95,10 @@
 
 function check_node {
 	 dcsEcho "checking node $1"
-    for myinterface in `pdsh -N -w $1 /sbin/ip link show|awk -F': ' '/^[0-9]+:.*/ {print $2;}'`; do
-		  ip_output=$(pdsh -N -w $1 /sbin/ip addr show $myinterface)
+    for myinterface in `$L_PDSH -w $1 /sbin/ip link show|cut -d: -f2- | cut -c2- | awk -F': ' '/^[0-9]+:.*/ {print $2;}'`; do
+		  ip_output=$($L_PDSH -w $1 /sbin/ip addr show $myinterface | cut -d: -f2-)
 		  if [ $gv_externalip_set -eq 1 -a $external_only -eq 1 ]; then
-				myifport=`echo "$ip_output" | grep $gv_float_external_ip`
+				myifport=`echo "$ip_output" | grep $gv_float_external_ip/`
 				status=$?
 				if [ $status -eq 0 ]; then
 					 tempinterface=`echo $gv_float_external_interface:$gv_port`
@@ -109,8 +109,8 @@
 						  unbindip=`echo "$myifport"|awk '{print $2}'`
 						  unbindlb=`echo "$myifport"|awk '{print $NF}'`
 						  dcsEcho "external ip $gv_float_external_ip is already in use on node $1 bound to interface $myinterface($unbindlb) - unbind..."
-						  dcsEcho "pdsh -S -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface label $unbindlb"
-						  pdsh -S -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface label $unbindlb
+						  dcsEcho "$L_PDSH -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface"
+						  $L_PDSH -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface
 
 						  status=$?
 						  if [ $status -ne 0 ]; then
@@ -132,7 +132,7 @@
 	 
 	#check if external ip is in use
     dcsEcho "check all nodes $allMyNodes"
-    externalNodes=`pdsh $allMyNodes /sbin/ip addr show | grep $gv_float_external_ip | awk -F' ' '/^.+:[[:space:]]+.*/ {print $1;}' | cut -d':' -f1 | sed '/^$/d'`
+    externalNodes=`$L_PDSH $allMyNodes /sbin/ip addr show | grep -w $gv_float_external_ip | awk -F' ' '/^.+:[[:space:]]+.*/ {print $1;}' | cut -d':' -f1 | sed '/^$/d'`
     if [ ! -z "$externalNodes" ]; then
 		  dcsEcho "find possible node `echo $externalNodes`"
 		  external_only=1
@@ -153,7 +153,7 @@
    bcast=`/sbin/ip addr show $gv_float_external_interface | grep "inet .*$gv_float_external_interface\$" | awk '{print $4}'`
    mask=`/sbin/ip addr show $gv_float_external_interface | grep "inet .*$gv_float_external_interface\$" | awk '{print $2}' | cut -d'/' -f2`
 	
-   /sbin/ip addr show| grep 'inet [^[:space:]]\+ '| awk '{print $2}'| sed -e 's/\/.*//'|grep $gv_float_external_ip > /dev/null
+   /sbin/ip addr show| grep 'inet [^[:space:]]\+ '| awk '{print $2}'| sed -e 's/\/.*//'|grep -w $gv_float_external_ip > /dev/null
    status=$?
    if [ $status -eq 0 ]; then
       dcsEcho "external ip is already bound on node $gv_myhostname - skip bind step"
@@ -231,7 +231,7 @@
 }
 
 function configure_route_tables {
-    gv_default_interface=eth0
+    gv_default_interface=$(/sbin/route | grep default | awk '{print $(NF)}')
     bcast=`/sbin/ip addr show $gv_default_interface | grep "inet .*$gv_default_interface\$" | awk '{print $4}'`
     status=$?
     if [ $status -ne 0 ]; then
@@ -356,35 +356,65 @@
 dcsEcho "gv_float_external_ip :" $gv_float_external_ip
 dcsEcho "gv_float_internal_ip :" $gv_float_internal_ip
 
-#Check if AWS_CLOUD environment variable defined
-if [[ $AWS_CLOUD != "true" ]]; then
+if ! grep -q ^ec2 /sys/hypervisor/uuid 2>/dev/null ; then
+    # Non-AWS system
+    L_PDSH="pdsh -S"
     Check_VirtualIP_InUse_Unbind
     BindFloatIp
 else
-    awscmd="/usr/local/bin/aws ec2 --output text "
-    device_index_to_use=`echo $gv_float_external_interface | sed -e "s@eth\([0-9][0-9]*\)@\1@"`
+    # AWS system
+    awscmd="/usr/bin/aws ec2 --output text "
+    device_index_to_use=`echo $gv_float_external_interface | sed 's/[^0-9]//g'`
     dcsEcho "Using device index $device_index_to_use for $gv_float_external_interface"
 
+    # Test if .aws file exists on this node 
+    awstmp=`mktemp -t`
+    if [[ $? != 0 ]]; then
+       dcsEcho "Error while getting a temporary file for $awstmp. Exiting."
+       exit $gv_error
+    fi
+
+    $awscmd describe-instances --query 'Reservations[*].Instances[*].[InstanceId,PrivateDnsName]' >$awstmp 2>/dev/null
+    if [[ $? != 0 ]]; then
+       dcsEcho "Missing .aws config files on node $gv_myhostname"
+       rm -f $awstmp
+       exit $gv_error
+    fi
+     
+    rm -f $awstmp
     # Get instance Id of the instance
-    INSTANCEID=`$awscmd describe-instances |grep -i instances |grep -i $gv_myhostname |cut -f8`
+    INSTANCEID=`$awscmd describe-instances --query 'Reservations[*].Instances[*].[InstanceId,PrivateDnsName]' |grep -i -w $gv_myhostname |cut -f1`
     dcsEcho "Using Instance id $INSTANCEID"
 
     # Get the network interface configured for the vpc
-    NETWORKINTERFACE=`$awscmd describe-network-interfaces| grep -i networkinterfaces| grep -i $gv_float_internal_ip|cut -f5`
+    NETWORKINTERFACE=`$awscmd describe-network-interfaces --query 'NetworkInterfaces[*].[NetworkInterfaceId,PrivateIpAddress]' |grep -i -w $gv_float_internal_ip |cut -f1`
     dcsEcho "Using network interface $NETWORKINTERFACE"
 
     # Get the attachment id for the network interface
-    ATTACH_ID=`$awscmd describe-network-interfaces --network-interface-ids $NETWORKINTERFACE |grep -i attachment |cut -f3`
+    ATTACH_ID=`$awscmd describe-network-interfaces --network-interface-ids $NETWORKINTERFACE --filters Name=attachment.device-index,Values=$device_index_to_use --query 'NetworkInterfaces[*].[Attachment.AttachmentId]'`
     if [ ! -z "$ATTACH_ID" ]; then
         dcsEcho "Detaching attachment Id:" $ATTACH_ID
         $awscmd detach-network-interface --attachment-id $ATTACH_ID
+        network_interface_status=`$awscmd describe-network-interfaces --filters Name=attachment.attachment-id,Values=$ATTACH_ID --query NetworkInterfaces[*].[Status]`
+        while [[ "$network_interface_status" = "in-use" ]] 
+        do
+           dcsEcho "Attachment Status ... " $network_interface_status
+           sleep 10
+           network_interface_status=`$awscmd describe-network-interfaces --filters Name=attachment.attachment-id,Values=$ATTACH_ID --query NetworkInterfaces[*].[Status]`
+        done
     fi
 
     dcsEcho "Going to attach network interface $NETWORKINTERFACE to the another instance"
-    sleep 10
     NEWATTACH_ID=`$awscmd attach-network-interface --network-interface-id $NETWORKINTERFACE --instance-id $INSTANCEID --device-index $device_index_to_use`
     dcsEcho "New attachment Id " $NEWATTACH_ID
-    sleep 10
+    newattachment_status=`$awscmd describe-network-interfaces --filters Name=attachment.attachment-id,Values=$NEWATTACH_ID --query NetworkInterfaces[*].[Attachment.Status]`
+    while [[ "$newattachment_status" != "attached" ]] 
+    do
+     dcsEcho "New Attachment Status ... " $newattachment_status
+     sleep 10
+     newattachment_status=`$awscmd describe-network-interfaces --filters Name=attachment.attachment-id,Values=$NEWATTACH_ID --query NetworkInterfaces[*].[Attachment.Status]`
+    done
+
     configure_route_tables
 fi
 
diff --git a/dcs/bin/scripts/dcsunbind.sh b/dcs/bin/scripts/dcsunbind.sh
index e58f4b3..6f2111e 100755
--- a/dcs/bin/scripts/dcsunbind.sh
+++ b/dcs/bin/scripts/dcsunbind.sh
@@ -24,26 +24,24 @@
 #
 
 function check_node {
-    for myinterface in `$SQ_PDSH -N -w $1 /sbin/ip link show|awk -F': ' '/^[0-9]+:.*/ {print $2;}'`; do
-	ip_output=$($SQ_PDSH -N -w $1 /sbin/ip addr show $myinterface)
+    for myinterface in `$L_PDSH -w $1 /sbin/ip link show|cut -d: -f2- | cut -c2- | awk -F': ' '/^[0-9]+:.*/ {print $2;}'`; do
+	ip_output=$($L_PDSH -w $1 /sbin/ip addr show $myinterface | cut -d: -f2- | cut -c2-)
 	if [ $gv_externalip_set -eq 1 -a $external_only -eq 1 ]; then
-            myifport=`echo "$ip_output" | grep $gv_float_external_ip`
+            myifport=`echo "$ip_output" | grep -w $gv_float_external_ip`
 	    status=$?
 	    if [ $status -eq 0 ]; then
-	       tempinterface=`echo $gv_float_external_interface:$gv_port`
+	       tempinterface=`echo $gv_float_interface:$gv_port`
 	       # check if another interface is bound to this virtual ip address
 	       echo "$myifport" | grep "$tempinterface"  > /dev/null
 	       if [ $? -eq 1 -o "$1" != "$gv_myhostname" ]; then
                    unbindip=`echo "$myifport" | awk '{print $2}'`
 		   unbindlb=`echo "$myifport"|awk '{print $NF}'`
-		   echo "External ip $gv_float_external_ip is in use on node $1 bound to interface $myinterface($unbindlb) - unbind..."
-		   $SQ_PDSH -S -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface label $unbindlb
+		   echo "Virtual ip $gv_float_external_ip is in use on node $1 bound to interface $myinterface($unbindlb) - unbinding..."
+		   $L_PDSH -w $1 sudo /sbin/ip addr del $unbindip dev $myinterface
                    status=$?
 		   if [ $status -ne 0 ]; then
 		      echo "Failed to unbind - status is $status"
 		      exit -1 
-                   else
-                      echo "Unbind successful"
                    fi
 	       fi # endif node+name match
 	    fi # endif looking for external ip
@@ -52,9 +50,8 @@
 }
 
 function Check_VirtualIP_InUse_And_Unbind {
-    echo "check all nodes to see if external virtual ip address is in use and unbind if necessary"
     mynode=""
-    externalNodes=`$SQ_PDSH $MY_NODES /sbin/ip addr show | grep $gv_float_external_ip | awk -F' ' '/^.+:[[:space:]]+.*/ {print $1;}' | cut -d':' -f1 | sed '/^$/d'`
+    externalNodes=`$L_PDSH $MY_NODES /sbin/ip addr show | grep -w $gv_float_external_ip | awk -F' ' '/^.+:[[:space:]]+.*/ {print $1;}' | cut -d':' -f1 | sed '/^$/d'`
     if [ ! -z "$externalNodes" ]; then
 	external_only=1
 	internal_only=0
@@ -69,24 +66,28 @@
 if [[ $ENABLE_HA == "false" ]]; then
  exit 0
 fi
-
-gv_float_internal_ip=`python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py|cut -d$'\n' -f2`
-gv_float_external_ip=`python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py|cut -d$'\n' -f2`
-gv_float_interface=`python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py|cut -d$'\n' -f1`
-gv_port=`python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py|cut -d$'\n' -f3`
+dcsunbindtmp=`mktemp -t`
+python $DCS_INSTALL_DIR/bin/scripts/parse_dcs_site.py > $dcsunbindtmp
+gv_float_internal_ip=`cat $dcsunbindtmp |grep "^dcs.master.floating.ip.external.ip.address:"| cut -f2 -d":"`
+gv_float_external_ip=$gv_float_internal_ip
+gv_float_interface=`cat $dcsunbindtmp |grep "^dcs.master.floating.ip.external.interface:"| cut -f2 -d":"`
+device_index_to_use=`echo $gv_float_interface | sed 's/[^0-9]//g'`
+gv_port=`cat $dcsunbindtmp |grep "^dcs.master.port:"| cut -f2 -d":"`
 if [[ -z $gv_port ]]; then
    gv_port=23400
 fi
 gv_externalip_set=1
 gv_internalip_set=1
 
-if [[ $AWS_CLOUD == "true" ]]; then
-   awscmd="/usr/local/bin/aws ec2 --output text "
+if grep -q ^ec2 /sys/hypervisor/uuid 2>/dev/null ; then
+   # AWS system
+   awscmd="/usr/bin/aws ec2 --output text "
+
    #Get the network interface
-   NETWORKINTERFACE=`$awscmd describe-network-interfaces| grep -i networkinterfaces| grep -i $gv_float_internal_ip|cut -f5`
+   NETWORKINTERFACE=`$awscmd describe-network-interfaces --query 'NetworkInterfaces[*].[NetworkInterfaceId,PrivateIpAddress]' |grep -i -w $gv_float_internal_ip |cut -f1`
 
    # Get the attachment id for the network interface
-   ATTACH_ID=`$awscmd describe-network-interfaces --network-interface-ids $NETWORKINTERFACE |grep -i attachment |cut -f3`
+   ATTACH_ID=`$awscmd describe-network-interfaces --network-interface-ids $NETWORKINTERFACE --filters Name=attachment.device-index,Values=$device_index_to_use --query 'NetworkInterfaces[*].[Attachment.AttachmentId]'`
 
    echo "Detaching attachment Id:" $ATTACH_ID
    if [ ! -z "$ATTACH_ID" ]; then
@@ -94,6 +95,10 @@
       echo "Detached interface :" $NETWORKINTERFACE
    fi
 else
+   # non-AWS
+   L_PDSH="pdsh -S"
+
    Check_VirtualIP_InUse_And_Unbind
 fi
+rm -f $dcsunbindtmp
 exit 0
diff --git a/dcs/bin/scripts/parse_dcs_site.py b/dcs/bin/scripts/parse_dcs_site.py
index c5947a4..aab5a00 100755
--- a/dcs/bin/scripts/parse_dcs_site.py
+++ b/dcs/bin/scripts/parse_dcs_site.py
@@ -31,16 +31,8 @@
 doc = minidom.parse(dcsconfig_dir+"/dcs-site.xml")
 props = doc.getElementsByTagName("property")
 for prop in props:
-        pname = prop.getElementsByTagName("name")[0]
-        if (pname.firstChild.data == "dcs.master.port"):
-           pvalue = prop.getElementsByTagName("value")[0]
-           dcsPort=pvalue.firstChild.data
-           print("%s" % (dcsPort))
-        if (pname.firstChild.data == "dcs.master.floating.ip.external.ip.address"):
-           pvalue = prop.getElementsByTagName("value")[0]
-           float_ipaddress=pvalue.firstChild.data
-           print("%s" % (float_ipaddress))
-        if (pname.firstChild.data == "dcs.master.floating.ip.external.interface"):
-           pvalue = prop.getElementsByTagName("value")[0]
-           float_interface=pvalue.firstChild.data
-           print("%s" % (float_interface))
+      tagName = prop.getElementsByTagName ("name")[0]
+      pname=tagName.childNodes[0].data
+      tagValue = prop.getElementsByTagName("value")[0]
+      pvalue=tagValue.childNodes[0].data
+      print("%s:%s" % (pname,pvalue))
diff --git a/dcs/bin/start-dcs.sh b/dcs/bin/start-dcs.sh
index 8894453..fcd4677 100755
--- a/dcs/bin/start-dcs.sh
+++ b/dcs/bin/start-dcs.sh
@@ -47,8 +47,8 @@
 fi
 
 if [ -z "$master" ] ; then
-  if [ ! -z "${DCS_PRIMARY_MASTER}" ] && [ -s ${DCS_PRIMARY_MASTER} ] ; then
-    master_node=`cat ${DCS_PRIMARY_MASTER}| egrep -v '^#|^$'`
+  if [ ! -z "${DCS_MASTERS}" ] && [ -s ${DCS_MASTERS} ] ; then
+    master_node=`head -n 1 ${DCS_MASTERS}`
     if [ ! -z "$master_node" ] ; then
       master=`echo $master_node | awk '{print $1}'`
     fi
@@ -59,8 +59,9 @@
   "$bin"/dcs-daemon.sh --config "${DCS_CONF_DIR}" start master 
 else
   remote_cmd="cd ${DCS_HOME}; $bin/dcs-daemon.sh --config ${DCS_CONF_DIR} start master"
-  ssh -q -n $DCS_SSH_OPTS $master $remote_cmd 2>&1 | sed "s/^/$master: /"
+  L_PDSH="ssh -q -n $DCS_SSH_OPTS"
+  ${L_PDSH} $master $remote_cmd 2>&1 | sed "s/^/$master: /"
 fi
 
 "$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_SERVERS}" start server
-"$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_BACKUP_MASTERS}" start master-backup
+"$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_MASTERS}" start master-backup
diff --git a/dcs/bin/stop-dcs.sh b/dcs/bin/stop-dcs.sh
index fbb1818..cdd40ad 100755
--- a/dcs/bin/stop-dcs.sh
+++ b/dcs/bin/stop-dcs.sh
@@ -36,7 +36,7 @@
   exit $errCode
 fi
 
-"$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_BACKUP_MASTERS}" stop master-backup
+"$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_MASTERS}" stop master-backup
 
 master=`$bin/dcs --config "${DCS_CONF_DIR}" org.trafodion.dcs.zookeeper.ZkUtil /$USER/dcs/master|tail -n 1`
 errCode=$?
@@ -49,12 +49,16 @@
   exit $errCode
 fi
 
-if [ "$master" == "" ] || [ "$master" == "$(hostname -f)" ] ; then
-  "$bin"/dcs-daemon.sh --config "${DCS_CONF_DIR}" stop master 
-else
+    activeMaster=$($DCS_INSTALL_DIR/bin/getActiveMaster.sh)
+
     remote_cmd="cd ${DCS_HOME}; $bin/dcs-daemon.sh --config ${DCS_CONF_DIR} stop master"
-    ssh -q -n $DCS_SSH_OPTS $master $remote_cmd 2>&1 | sed "s/^/$master: /"
-fi
+    L_PDSH="ssh -q -n $DCS_SSH_OPTS"
+
+    if [[ ! -z $activeMaster ]]; then
+        ${L_PDSH} $activeMaster $remote_cmd 2>&1 | sed "s/^/$activeMaster: /"
+    else
+        ${L_PDSH} $master $remote_cmd 2>&1 | sed "s/^/$master: /"
+    fi
 
 "$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" --hosts "${DCS_SERVERS}" stop server 
 "$bin"/dcs-daemons.sh --config "${DCS_CONF_DIR}" stop zookeeper
diff --git a/dcs/conf/master b/dcs/conf/master
deleted file mode 100644
index e69de29..0000000
--- a/dcs/conf/master
+++ /dev/null
diff --git a/dcs/conf/backup-masters b/dcs/conf/masters
similarity index 100%
rename from dcs/conf/backup-masters
rename to dcs/conf/masters
diff --git a/dcs/src/main/asciidoc/_chapters/configuration.adoc b/dcs/src/main/asciidoc/_chapters/configuration.adoc
index 39180c6..b4a4c3d 100644
--- a/dcs/src/main/asciidoc/_chapters/configuration.adoc
+++ b/dcs/src/main/asciidoc/_chapters/configuration.adoc
@@ -28,7 +28,7 @@
 :experimental:
 
 This chapter is the Not-So-Quick start guide to DCS configuration.
-Please read this chapter carefully and ensure that all requirements have 
+Please read this chapter carefully and ensure that all requirements have
 been satisfied.  Failure to do so will cause you (and us) grief debugging strange errors.
 
 DCS uses the same configuration mechanism as Apache Hadoop.
@@ -36,10 +36,10 @@
 
 [TIP]
 ====
-Be careful editing XML.  Make sure you close all elements. Run your file through +xmllint+ or similar to 
+Be careful editing XML.  Make sure you close all elements. Run your file through +xmllint+ or similar to
 ensure well-formedness of your document after an edit session.
 ====
- 
+
 .Keep Configuration In Sync Across the Cluster
 [WARNING]
 ====
@@ -48,101 +48,71 @@
 A restart is needed for servers to pick up changes.
 ====
 
-This section lists required services and some required system configuration. 
+This section lists required services and some required system configuration.
 
-== Java    
-.Java
-[cols="1,1,1,4", options="header"]
-|===
-|DCS Version
-|JDK 6
-|JDK 7
-|JDK 8
-
-|1.1
-|Not Supported
-|yes
-|Running with JDK 8 has not been tested.
-
-|1.0
-|yes
-|Not Supported
-|Not Supported
-
-|===
+== Java
+DCS is configured to use default version of JDK as defined by Trafodion configuration.
 
 [[os]]
-== Operating System  
- 
-=== ssh  
- 
+== Operating System
+
+=== ssh
+
 _ssh_ must be installed and _sshd_ must be running to use DCS's' scripts to manage remote DCS daemons. You must be able to ssh to all
 nodes, including your local node, using passwordless login (Google "ssh passwordless login").
 
-=== DNS  
+=== DNS
 Both forward and reverse DNS resolving should work. If your machine has multiple interfaces, DCS will use the
 interface that the primary hostname resolves to.
 
-=== Loopback IP 
+=== Loopback IP
 DCS expects the loopback IP address to be 127.0.0.1.  Ubuntu and some other distributions,
 for example, will default to 127.0.1.1 and this will cause problems for you. _/etc/hosts_ should look something like this:
 ----
             127.0.0.1 localhost
             127.0.0.1 ubuntu.ubuntu-domain ubuntu
 ----
-        
-=== NTP 
+=== NTP
 The clocks on cluster members should be in basic alignments. Some skew is tolerable but wild skew could generate odd behaviors. Run
 link:http://en.wikipedia.org/wiki/Network_Time_Protocol[NTP] on your cluster, or an equivalent.
 
-=== Windows 
-DCS is not supported on Windows.
+== Run modes
 
-== Run modes  
-      
-=== Single Node 
+=== Single Node
 This is the default mode. Single node is what is described in the <<quickstart,quickstart>> section. In
 single node, it runs all DCS daemons and a local ZooKeeper all on the same node. Zookeeper binds to a well known port.
 
-=== Multi Node 
+=== Multi Node
 Multi node is where the daemons are spread across all nodes in the cluster. Before proceeding, ensure you have a working Trafodion instance.
 
-Below we describe the different setups. Starting, verification and exploration of your install. Configuration is described in a
-section that follows, <<running.and.confirming.your.installation,Running and Confirming Your Installation>>. 
+Below sections describe the various configuration files that needs to be set up for starting DCS processes. Configuration is described in a
+section that follows, <<running.and.confirming.your.installation,Running and Confirming Your Installation>>.
 
 To set up a multi-node deploy, you will need to configure DCS by editing files in the DCS _conf_ directory.
-        
+
 You may need to edit _conf/dcs-env.sh_ to tell DCS which _java_ to use. In this file you set DCS environment
 variables such as the heap size and other options for the _JVM_, the preferred location for log files,
 etc. Set `JAVA_HOME` to point at the root of your _java_ install.
 
-==== _servers_ 
+==== _servers_
 
-In addition, a multi-node deploy requires that you
-modify _conf/servers_. The  _servers_ file  lists all hosts that you would have running
-DcsServers, one host per line or the host name followed by the number of master executor servers. 
+A multi-node deploy requires that you modify _conf/servers_. The  _servers_ file  lists all hosts that you would have running
+DcsServers, one host per line or the host name followed by the number of master executor servers.
 All servers listed in this file will be started and stopped when DCS start or stop is run.
 
-==== _backup-masters_ 
+==== _masters_
 
-The  _backup-masters_ file lists all hosts that you would have running
-backup DcsMaster processes, one host per line. All servers listed in this file will be started 
-and stopped when DCS start or stop is run.
+The  _masters_ file lists the host of the primary and backup DcsMaster processes, one host per line. All servers listed
+in this file will be started and stopped when DCS stop and start is run.
 
-==== _master_ 
-
-The  _master_ file lists the host of the primary DcsMaster process. Only one host is allowed to 
-be the primary master. The server listed in this file will be started 
-and stopped when DCS start or stop is run.
- 
-==== ZooKeeper and DCS 
+==== ZooKeeper and DCS
 See section <<zookeeper,Zookeeper>> for ZooKeeper setup for DCS.
 
 [[running.and.confirming.your.installation]]
-=== Running and Confirming Your Installation 
+=== Running and Confirming Your Installation
 
-Make sure Trafodion is running first. Start and stop the Trafodion instance by running _sqstart.sh_ over in the
-`TRAF_HOME/sql/scripts` directory. You can ensure it started properly by testing with _sqcheck_. 
+Before you start DCS make sure Trafodion is up and running first. Start and stop the Trafodion instance by running _sqstart.sh_ over in the
+`$TRAF_HOME/sql/scripts` directory. You can ensure it started properly by testing with _sqcheck_.
 If you are managing your own ZooKeeper, start it and confirm its running else, DCS will start up ZooKeeper
 for you as part of its start process.
 
@@ -151,17 +121,17 @@
 bin/start-dcs.sh
 ----
 
-Run the above from the `DCS_HOME` directory. 
+Run the above from the `DCS_HOME` directory.
 
 You should now have a running DCS instance. DCS logs can be
 found in the _logs_ subdirectory. Check them out
 especially if DCS had trouble starting.
 
-DCS also puts up a UI listing vital attributes and metrics. By default its deployed on the DcsMaster 
-host at port 24410 (DcsServers put up an informational http server at 24430+their instance number). 
+DCS also puts up a UI listing vital attributes and metrics. By default its deployed on the DcsMaster
+host at port 24410 (DcsServers put up an informational http server at 24430+their instance number).
 If the DcsMaster were running on a host named `master.example.org` on the default port, to see the
 DcsMaster's homepage you'd point your browser at  _http://master.example.org:24410_.
- 
+
 To stop DCS after exiting the DCS shell enter
 ----
 ./bin/stop-dcs.sh
@@ -170,12 +140,12 @@
 Shutdown can take a moment to  complete. It can take longer if your cluster is comprised of many machines.
 
 [[zookeeper]]
-== ZooKeeper 
+== ZooKeeper
 
 DCS depends on a running ZooKeeper cluster.All participating nodes and clients need to be able to access the
 running ZooKeeper ensemble. DCS by default manages a ZooKeeper "cluster" for you. It will start and stop the ZooKeeper ensemble
 as part of the DCS start/stop process. You can also manage the ZooKeeper ensemble independent of DCS and just point DCS at
-the cluster it should use. To toggle DCS management of ZooKeeper, use the `DCS_MANAGES_ZK` variable in 
+the cluster it should use. To toggle DCS management of ZooKeeper, use the `DCS_MANAGES_ZK` variable in
 _conf/dcs-env.sh_. This variable, which defaults to `true`, tells DCS whether to start/stop the ZooKeeper ensemble servers as part of DCS
 start/stop.
 
@@ -185,22 +155,22 @@
 _dcs-site.xml_ XML configuration file by prefacing the ZooKeeper option name with
 `dcs.zookeeper.property`. For example, the `clientPort` setting in ZooKeeper can be changed
 by setting the `dcs.zookeeper.property.clientPort` property. For all default values used by DCS, including ZooKeeper
-configuration, see section <<dcs_default_configurations,DCS Default Configuration>>. Look for the `dcs.zookeeper.property` prefix 
+configuration, see section <<dcs_default_configurations,DCS Default Configuration>>. Look for the `dcs.zookeeper.property` prefix
 For the full list of ZooKeeper configurations, see ZooKeeper's _zoo.cfg_. DCS does not ship with a _zoo.cfg_ so you will need to browse
 the _conf_ directory in an appropriate ZooKeeper download.
 
 You must at least list the ensemble servers in _dcs-site.xml_ using the `dcs.zookeeper.quorum` property. This property
-defaults to a single ensemble member at `localhost` which is not suitable for a fully distributed DCS. 
+defaults to a single ensemble member at `localhost` which is not suitable for a fully distributed DCS.
 (It binds to the local machine only and remote clients will not be able to connect).
 
 How many ZooKeepers should I run?
 
 You can run a ZooKeeper ensemble that comprises 1 node only but in production it is recommended that you run a
 ZooKeeper ensemble of 3, 5 or 7 machines; the more members a nensemble has, the more tolerant the ensemble is of host
-failures. Also, run an odd number of machines. In ZooKeeper, an even number of peers is supported, but it is normally not used 
-because an even sized ensemble requires, proportionally, more peers to form a quorum than an odd sized ensemble requires. For example, an 
-ensemble with 4 peers requires 3 to form a quorum, while an ensemble with 5 also requires 3 to form a quorum. Thus, an ensemble of 5 allows 2 peers to 
-fail, and thus is more fault tolerant than the ensemble of 4, which allows only 1 down peer.                 
+failures. Also, run an odd number of machines. In ZooKeeper, an even number of peers is supported, but it is normally not used
+because an even sized ensemble requires, proportionally, more peers to form a quorum than an odd sized ensemble requires. For example, an
+ensemble with 4 peers requires 3 to form a quorum, while an ensemble with 5 also requires 3 to form a quorum. Thus, an ensemble of 5 allows 2 peers to
+fail, and thus is more fault tolerant than the ensemble of 4, which allows only 1 down peer.
 
 Give each ZooKeeper server around 1GB of RAM, and if possible, its own dedicated disk (A dedicated disk is the best thing you can do
 to ensure a performant ZooKeeper ensemble). For very heavily loaded clusters, run ZooKeeper servers on separate machines
@@ -211,8 +181,8 @@
 and then edit _conf/dcs-site.xml_ and set `dcs.zookeeper.property.clientPort` and
 `dcs.zookeeper.quorum`. You should also set `dcs.zookeeper.property.dataDir` to other than
 the default as the default has ZooKeeper persist data under _/tmp_ which is often cleared on system
-restart. In the example below we have ZooKeeper persist to _/user/local/zookeeper_. 
-            
+restart. In the example below we have ZooKeeper persist to _/user/local/zookeeper_.
+
 [source,xml]
 ----
 <configuration>
@@ -250,16 +220,16 @@
 === Using existing ZooKeeper ensemble
 
 To point DCS at an existing ZooKeeper cluster, one that is not managed by DCS, uncomment and set `DCS_MANAGES_ZK`
-in _conf/dcs-env.sh_ to `false`  
+in _conf/dcs-env.sh_ to `false`
 
-[source,console] 
+[source,console]
 ----
 # Tell DCS whether it should manage it's own instance of Zookeeper or not.
 export DCS_MANAGES_ZK=false
 ----
-  
+
 Next set ensemble locations and client port, if non-standard, in
-_dcs-site.xml_, or add a suitably configured _zoo.cfg_ to DCS's _CLASSPATH_. 
+_dcs-site.xml_, or add a suitably configured _zoo.cfg_ to DCS's _CLASSPATH_.
 DCS will prefer the configuration found in _zoo.cfg_ over any settings in _dcs-site.xml_.
 
 When DCS manages ZooKeeper, it will start/stop the
@@ -278,28 +248,28 @@
 DCS shuts down, it doesn't take ZooKeeper down with it.
 
 For more information about running a distinct ZooKeeper
-cluster, see the link:http://hadoop.apache.org/zookeeper/docs/current/zookeeperStarted.html[ZooKeeper Getting Started Guide].  
-Additionally, see the link:http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the 
-link:http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper documentation] 
+cluster, see the link:http://hadoop.apache.org/zookeeper/docs/current/zookeeperStarted.html[ZooKeeper Getting Started Guide].
+Additionally, see the link:http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A7[ZooKeeper Wiki] or the
+link:http://zookeeper.apache.org/doc/r3.3.3/zookeeperAdmin.html#sc_zkMulitServerSetup[ZooKeeper documentation]
 for more information on ZooKeeper sizing.
- 
+
 == Configuration Files
-         
+
 === _dcs-site.xml_ and _dcs-default.xml_
 You add site-specific configuration to the _dcs-site.xml_ file, for DCS, site specific customizations go into
 the file _conf/dcs-site.xml_. For the list of configurable properties, see <<dcs_default_configurations,DCS Default Configuration>>
 below or view the raw _dcs-default.xml_ source file in the DCS source code at _src/main/resources_.
 
 Not all configuration options make it out to _dcs-default.xml_.  Configuration
-that it is thought rare anyone would change can exist only in code; the only way 
+that it is thought rare anyone would change can exist only in code; the only way
 to turn up such configurations is via a reading of the source code itself.
-      
+
 Currently, changes here will require a cluster restart for DCS to notice the change.
-      
+
 //The file dcs-default.xml is generated as part of  the build of the dcs site.  See the dcs pom.xml.
 //The generated file is an asciidoc file.
 // dcs/src/main/asciidoc
-// 
+//
 include::../../../../target/asciidoc/dcs-default.adoc[]
 
 === _dcs-env.sh_
@@ -307,21 +277,19 @@
 an DCS daemon such as heap size and garbage collector configs. You can also set configurations for DCS configuration, log directories,
 niceness, ssh options, where to locate process pid files, etc. Open the file at _conf/dcs-env.sh_ and peruse its content.
 Each option is fairly well documented.  Add your own environment variables here if you want them read by DCS daemons on startup. Changes done to this file requires restart of DCS.
- 
+
 === _log4j.properties_
 
 Edit this file to change rate at which DCS files are rolled over and, to change the level at which DCS logs messages. Changes done to this file will require restart of DCS.
 
-=== _master_ 
-A plain-text file which lists hostname or host IP address  on which the primary master process should be started. Only one host is allowed to be the primary master
-
-=== _backup-masters_ 
-A plain-text file which lists hosts on which the backup master process should be started. Only one host per line is allowed
+=== _masters_
+A plain-text file which lists of hostname or host IP address  on which the primary and backup master process should be started. The first entry will be the primary DcsMaster and the renamining lines
+will be the backup DcsMaster nodes. Only one host per line is allowed
 
 === _servers_
 A plain-text file which lists hosts on which the DcsServer server process should be started. Only one host per line or the host name followed by the count or number of master executor servers. All servers listed in this file will be started and stopped when DCS start or stop is run.
- 
-== Example Configurations 
+
+== Example Configurations
 
 === Basic Distributed DCS Install
 
@@ -357,9 +325,9 @@
 
 ==== _servers_
 
-In this file, you list the nodes that will run DcsServers. In this case, 
+In this file, you list the nodes that will run DcsServers. In this case,
 there are two DcsServrs per node each starting a single mxosrvr:
-[source,console] 
+[source,console]
 ----
   example1
   example2
@@ -380,21 +348,17 @@
   example4 2
 ----
 
-==== _master_
+==== _masters_
 
-In this file, you list the node that will run primary DcsMasters. 
-[source,console] 
+In this file, you list all the nodes that will run DcsMasters. The first entry
+will be the primary DcsMaster and the remaining nodes will be the backup DcsMasters.
+In the below example, host4 will be the primary DcsMaster node and host5 and host6 are
+the backup DcsMaster nodes
+[source,console]
 ----
-  example4
-----
-
-==== _backup-masters_
-
-In this file, you list the nodes that will run backup DcsMasters. In this case, 
-there is a backup master running on the second node:
-[source,console] 
-----
-  example2
+  host4
+  host5
+  host6
 ----
 
 ==== _dcs-env.sh_
@@ -403,7 +367,7 @@
 are setting the DCS heap to be 4G instead of the default 128M.
 
 [source,console]
-----    
+----
 $ git diff dcs-env.sh
 diff --git a/conf/dcs-env.sh b/conf/dcs-env.sh
 index e70ebc6..96f8c27 100644
@@ -411,58 +375,58 @@
 +++ b/conf/dcs-env.sh
 @@ -31,7 +31,7 @@ export JAVA_HOME=/usr/java/jdk1.7.0/
  # export DCS_CLASSPATH=
- 
+
  # The maximum amount of heap to use, in MB. Default is 128.
 -# export DCS_HEAPSIZE=128
 +export DCS_HEAPSIZE=4096
 
- 
+
  # Extra Java runtime options.
  # Below are what we set by default. May only work with SUN JVM.
 ----
- 
+
 Use _rsync_ to copy the content of the  _conf_ directory to all nodes of the cluster.
-      
+
 [[ha.configurations]]
-== High Availability(HA) Configuration 
-The master configuration file for DcsMaster may be configured by adding the host name to the _conf/master_ file. If the master is 
-configured to start on the remote node then, during start of dcs the primary master will be started on the remote 
-node. If the _conf/master_ file is empty then the primary master will be started on the host where the dcs start script was run.
-Similarly, DcsMaster backup servers may be configured by adding host names to the _conf/backup-masters_ file. They are
-started and stopped automatically by the _bin/master-backup.sh_ script whenever DCS is started or stopped. Every backup 
+== High Availability(HA) Configuration
+The master configuration file for DcsMaster may be configured by adding the host name to the _conf/masters_ file. If the master is
+configured to start on the remote node then, during start of dcs the primary master will be started on the remote
+node. If the _conf/masters_ file is empty then the primary master will be started on the host where the dcs start script was run.
+Similarly, DcsMaster backup servers may be configured by adding additional host names to the _conf/masters_ file. They are
+started and stopped automatically by the _bin/master-backup.sh_ script whenever DCS is started or stopped. Every backup
 DcsMaster follows the current leader DcsMaster watching for it to fail. If failure of the leader occurs, first
 backup DcsMaster in line for succession checks to see if floating IP is enabled. If enabled it executes
 the _bin/scripts/dcsbind.sh_ script to add a floating IP address to an interface on its node. It then continues
 with normal initialization and eventually starts listening for new client connections. It may take
 several seconds for the take over to complete. When a failed node is restored a new DcsMaster backup may
-be started manually by executing the _bin/dcs-daemon.sh_ script on the restored node.
+be started manually by executing the _dcstart_ script from any node.
 
 ----
->bin/dcs-daemon.sh start master
-----   
+>`TRAF_HOME/sql/scripts/dcsstart`
+----
 
 The newly created DcsMaster backup process will take its place at the back of the line waiting for the current DcsMaster leader to fail.
-   
+
 === `dcs.master.port`
 The default value is 23400. This is the port the DcsMaster listener binds to
 waiting for JDBC/ODBC T4 client connections. The value may need to be changed
 if this port number conflicts with other ports in use on your cluster.
-           
+
 To change this configuration, edit _conf/dcs-site.xml_, copy the changed file around the cluster and restart.
 
 === `dcs.master.port.range`
 The default value is 100. This is the total number of ports that MXOSRVRs will scan trying
 to find an available port to use. You must ensure the value is large enough to support the
-number of MXOSRVRs configured in _conf/servers_. 
+number of MXOSRVRs configured in _conf/servers_.
 
 === `dcs.master.floating.ip`
-The default value is false. When set to true the floating IP feature in the DcsMaster is enabled via the _bin/dcsbind.sh_ script. 
+The default value is false. When set to true the floating IP feature in the DcsMaster is enabled via the _bin/dcsbind.sh_ script.
 This allows backup DcsMaster to take over and set the floating IP address.
 
 === `dcs.master.floating.ip.external.interface`
-There is no default value. You must ensure the value contains the correct interface for your network configuration. 
+There is no default value. You must ensure the value contains the correct interface for your network configuration.
 
 === `dcs.master.floating.ip.external.ip.address`
 There is no default value. It is important that you set this to the dotted IP address appropriate for your network.
-          
+
 To change this configuration, edit _dcs-site.xml_, copy the changed file to all nodes in the cluster and restart dcs.
diff --git a/docs/client_install/src/asciidoc/_chapters/odbc_windows.adoc b/docs/client_install/src/asciidoc/_chapters/odbc_windows.adoc
index 67e2d6d..bb85eca 100644
--- a/docs/client_install/src/asciidoc/_chapters/odbc_windows.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/odbc_windows.adoc
@@ -106,9 +106,9 @@
 == Set Up ODBC Data Source

 

 1.  Start the Microsoft ODBC Administrator:

-* On Windows 7: *Start>All Programs>{project-name} ODBC _version_>MS ODBC Administrator*

-* On Windows 8: Right-click the *{project-name} ODBC _version_* icon on the desktop and select MS ODBC Administrator.

-* On Windows 10: Click the Windows icon in the menu bar. Type *Set up ODBC data sources (64-bit)*. Click on the found item. 

+* On Windows 7: *Start > All Programs>{project-name} ODBC _version_ > MS ODBC Administrator*

+* On Windows 8: *Start > Control Panel > System and Security > Administrative Tool > ODBC Data Sources (64-bit)*

+* On Windows 10: Click the Windows icon in the menu bar. Select *Control Panel*. Search for *Set up ODBC data sources (64-bit)*. Click on the found item. 

 +

 image:{images}/winodbc_admin_intro.jpg[Windows ODBC Admin Intro Screen]

 +

@@ -167,9 +167,9 @@
 To enable compression in the ODBC driver or to change the compression setting, follow these steps:

 

 1.  Launch the MS ODBC Administrator. 

-* On Windows 7: *Start>All Programs>{project-name} ODBC _version_>MS ODBC Administrator*

-* On Windows 8: Right-click the *{project-name} ODBC _version_* icon on the desktop and select MS ODBC Administrator.

-* On Windows 10: Right-click the Windows icon in the menu bar. Select *Settings*. Search for *Set up ODBC data sources (64-bit)*. Click on the found item. 

+* On Windows 7: *Start > All Programs > {project-name} ODBC _version_ > MS ODBC Administrator*

+* On Windows 8: *Start > Control Panel > System and Security > Administrative Tools > ODBC Data Sources (64-bit)*

+* On Windows 10: Right-click the Windows icon in the menu bar. Select *Control Panel*. Search for *Set up ODBC data sources (64-bit)*. Click on the found item. 

 

 2.  In the *ODBC Data Source Administrator* dialog box, select the *User DSN* tab, select the name of your data source under 

 *User Data Sources*, and click *Configure*. If you did not create a data source, please refer to 

@@ -233,9 +233,9 @@
 [[win_odbc_uninstall]]

 == Uninstalling Windows ODBC Driver

 1.  Start to remove the ODBC driver:

-* On Windows 7: *Start>All Programs>{project-name} ODBC _version_>Remove TRAF ODBC _version_*

-* On Windows 8: Right-click the *{project-name} ODBC _version_* icon on the desktop and select *Remove TRAF ODBC _version_*.

-* On Windows 10: Right-click the Windows icon in the menu bar. Select *Control Panel*. Click on *Uninstall a program*. Locate *{project-name} ODBC64 _version_* and select it. Click on *Uninstall*.

+* On Windows 7: *Start > All Programs>{project-name} ODBC _version_ > Remove TRAF ODBC _version_*

+* On Windows 8: *Start* > *Control Panel* > *Uninstall a program* > Locate *{project-name} ODBC64 _version_* > *Uninstall {project-name} ODBC64 _version_*

+* On Windows 10: Right-click the Windows icon in the menu bar. Select *Control Panel*. Click on *Apps and features*. Locate *{project-name} ODBC64 _version_* and right-click it. Select *Uninstall*.

 

 2.  When the *Windows Installer* dialog box asks you if you want to uninstall this product, click *Yes*.

 3.  The *{project-name} ODBC _version_* dialog box displays the status and asks you to wait while `Windows configures {project-name} ODBC _version_` (that is, removes

diff --git a/docs/client_install/src/asciidoc/_chapters/preparation.adoc b/docs/client_install/src/asciidoc/_chapters/preparation.adoc
index 9b635b3..c55023f 100644
--- a/docs/client_install/src/asciidoc/_chapters/preparation.adoc
+++ b/docs/client_install/src/asciidoc/_chapters/preparation.adoc
@@ -104,10 +104,12 @@
 |===

 | File                               | Usage

 | `JDBCT4.zip`                       | {project-name} JDBC Type 4 Driver.

+| `DISCLAIMER`                       | Apache disclaimer.

 | `LICENCE`                          | Apache license.

 | `NOTICE`                           | Apache notice.

 | `odbc64_linux.tar.gz`              | {project-name} odb tool.

 | `TRAF_ODBC_Linux_Driver_64.tar.gz` | {project-name} ODBC driver for Linux.

+| `TRAFODB-2.2.0.exe`                | Trafodion odb tool for windows.

 | `trafci.zip`                       | The {project-name} command interpreter `trafci`.

 | `TFODBC64-*.exe`                   | *[Not included in this release]*^1^ {project-name} ODBC Driver for Windows.

 |===

diff --git a/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc b/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
index 22327ee..3d866e3 100644
--- a/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
+++ b/docs/messages_guide/src/asciidoc/_chapters/binder_msgs.adoc
@@ -2889,3 +2889,16 @@
 
 *Recovery:* Set the ATTEMPT_ASYNCHRONOUS_ACCESS flag to ON and resubmit.
 
+[[SQL-4323]]
+== SQL 4323
+
+```
+Use of predefined UDF <name> is deprecated and this function will be removed in a future release. Please use the function with the same name in schema TRAFODION."_LIBMGR_" instead. You may need to issue this command first: INITIALIZE TRAFODION, UPGRADE LIBRARY MANAGEMENT.
+```
+
+*Cause:* See message.
+
+*Effect:* The operation succeeds - this is only a warning.
+
+*Recovery:* See message.
+
diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
index c72feeb..83f8313 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_functions_and_expressions.adoc
@@ -7953,6 +7953,211 @@
 ```
 
 <<<
+[[sysdate_function]]
+== SYSDATE Function
+
+The SYSDATE function, which is equivalent to the `CURRENT_DATE` function, retrieves the current date of the server rather than the session.
+
+The returned value is `DATE` and the default format is `YYYY-MM-DD`.
+
+For example, if you execute a query on your local machine located in Shanghai on 2018-03-14 06:00:00 (UTC+8) against a database server located in Berlin on 2018-03-13 23:00:00 (UTC+1), the result of `SELECT SYSDATE FROM DUAL;` is 2018-03-13 rather than 2018-03-14.
+
+```
+SYSDATE
+```
+
+[[examples_of_sysdate]]
+=== Examples of SYSDATE
+
+* This example returns the current date.
+
++
+```
+SQL>SELECT SYSDATE FROM DUAL;
+
+(EXPR)
+----------
+2018-03-15
+
+--- 1 row(s) selected.
+```
+
+* This example returns the date of yesterday, today and tomorrow.
+
++
+```
+SQL>SELECT SYSDATE -1 AS yesterday, 
+SYSDATE AS today,
+SYSDATE +1 AS tomorrow FROM DUAL;
+ 
+YESTERDAY  TODAY      TOMORROW  
+---------- ---------- ----------
+2018-03-14 2018-03-15 2018-03-16
+ 
+--- 1 row(s) selected.
+```
+
+* The following examples show that the values of `SYSDATE` can be converted to character values.
+
++
+```
+SQL>SELECT TO_CHAR (SYSDATE, 'DD-MON-YYYY') FROM DUAL;
+ 
+(EXPR)
+-----------
+15-MAR-2018
+ 
+--- 1 row(s) selected.
+```
+
++
+```
+SQL>SELECT TO_CHAR (SYSDATE,'HH:MI:SS') FROM DUAL;
+ 
+*** ERROR[4072] The operand of function TO_CHAR must be a datetime containing a time. [2018-03-15 11:49:22]
+```
+
++
+``` 
+SQL>SELECT TO_CHAR (SYSDATE, 'MM/DD/YYYY HH24:MI:SS') FROM DUAL;
+ 
+(EXPR)
+-------------------
+03/15/2018 00:00:00
+ 
+--- 1 row(s) selected.
+```
+
+* This example converts days to minutes using `SYSDATE`.
++
+```
+SQL>SELECT (SYSDATE-(SYSDATE-7))*1440 FROM DUAL;
+ 
+(EXPR)
+-----------------
+           10080 
+ 
+--- 1 row(s) selected.
+```
+
+* This example demonstrates how the SYSDATE function works in SQL statement.
++
+Suppose that we have the following table:
+
++
+```
+SQL>SELECT * FROM orders;
+
+ORDERNUM ORDER_DATE DELIV_DATE SALESREP CUSTNUM
+-------- ---------- ---------- -------- -------
+  100210 2018-03-02 2018-04-10      220     127
+  100250 2018-01-23 2018-06-16      220     123
+  101220 2018-02-21 2018-12-15      221     156
+  200300 2018-02-06 2018-07-15      222     126
+  200320 2018-03-08 2018-07-20      223     121
+  200490 2018-02-19 2018-11-01      226     123
+  300350 2018-03-03 2018-08-10      231     123
+  300380 2018-01-19 2018-08-15      226     156
+  400410 2018-01-27 2018-09-14      227     154
+  500450 2018-03-12 2018-09-16      220     124
+  600480 2018-02-12 2018-10-14      226     123
+  700510 2018-02-01 2018-10-16      220     143
+  800660 2018-01-09 2018-11-01      229     100
+
+--- 13 row(s) selected.
+```
+
++
+The SYSDATE is 2018-03-15.
++
+```
+SQL>SELECT SYSDATE FROM DUAL;
+ 
+(EXPR)
+----------
+2018-03-15
+ 
+--- 1 row(s) selected.
+```
+
++
+This statement returns qualified rows using `SYSDATE` function.
++
+```
+SQL>SELECT * FROM orders 
+WHERE DAY(deliv_date) = DAY(sysdate+1) 
+AND ordernum <>100210 
+AND salesrep=220 
+ORDER BY order_date DESC;
+ 
+ORDERNUM ORDER_DATE DELIV_DATE SALESREP CUSTNUM
+-------- ---------- ---------- -------- -------
+  500450 2018-03-12 2018-09-16      220     124
+  700510 2018-02-01 2018-10-16      220     143
+  100250 2018-01-23 2018-06-16      220     123
+ 
+--- 3 row(s) selected.
+```
+
+<<<
+[[systimestamp_function]]
+== SYSTIMESTAMP Function
+
+The SYSTIMESTAMP function, which is equivalent to the `CURRENT_TIMESTAMP` function, provides much high granularity than the `SYSDATE` function and retrieves the current date and time (including fractional seconds with six-digit precision) of the server rather than the session.
+
+The returned value is `TIMESTAMP` and the default format is `YYYY-MM-DD HH:MM:SS.FFFFFF`.
+
+For example, if you execute a query on your local machine located in Shanghai on 2018-03-14 06:00:00 (UTC+8) against a database server located in Berlin on 2018-03-13 23:00:00 (UTC+1), the result of `SELECT SYSTIMESTAMP FROM DUAL;` is 2018-03-13 23:00:00 rather than 2018-03-14 06:00:00.
+
+```
+SYSTIMESTAMP
+```
+
+[[examples_of_systimestamp]]
+=== Examples of SYSTIMESTAMP
+
+* This example calculates the date and time of anniversary using SYSTIMESTAMP function.
+
++
+```
+SELECT SYSTIMESTAMP AS today, SYSTIMESTAMP + INTERVAL '12' MONTH AS Annisversary FROM DUAL; 
+
+TODAY                      ANNISVERSARY              
+-------------------------- --------------------------
+2018-03-15 11:19:42.400382 2019-03-15 11:19:42.400382
+ 
+--- 1 row(s) selected.
+```
+
+* This example demonstrates how to insert the value of SYSTIMESTAMP into a column.
+
++
+```
+SQL>CREATE TABLE test1 (C1 TIMESTAMP, C2 VARCHAR(40));
+
+--- SQL operation complete.
+```
+
++
+```
+SQL>INSERT INTO test1 VALUES (SYSTIMESTAMP, 'This is the time that I insert values');
+
+--- 1 row(s) inserted.
+```
+
++
+```
+SQL>SELECT * FROM test1;
+
+C1                         C2                                      
+-------------------------- ----------------------------------------
+2018-03-15 11:33:32.091057 This is the time that I insert values        
+
+--- 1 row(s) selected.
+```
+
+
+<<<
 [[tan_function]]
 == TAN Function
 
diff --git a/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc b/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
index 48c2f4f..72bfac6 100644
--- a/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
+++ b/docs/sql_reference/src/asciidoc/_chapters/sql_statements.adoc
@@ -2982,14 +2982,6 @@
 When you perform a CREATE TABLE LIKE, whether or not you include the WITH CONSTRAINTS clause, the target table will have all

 the NOT NULL column constraints that exist for the source table with different constraint names.

 

-*** `WITH PARTITIONS`

-+

-directs {project-name} SQL to use partition definitions from _source-table_. Each new table partition resides on the same volume

-as its original _source-table_ counterpart. The new table partitions do not inherit partition names from the original table.

-Instead, {project-name} SQL generates new names based on the physical file location.

-+

-If you specify the LIKE clause and the SALT USING _num_ PARTITIONS clause, you cannot specify WITH PARTITIONS.

-

 *** `WITHOUT DIVISION`

 +

 directs {project-name} SQL to not use divisioning from _source-table_. If this clause is omitted, then 

@@ -3500,6 +3492,148 @@
 ```

 

 <<<

+[[create_table_examples_create_external_table]]

+==== Examples of CREATE EXTERNAL TABLE 

+

+This example compares the execution time of using external table and not using external table when reading hive tables.

+

+The former takes less time than the latter, since the trafodion external table supplies upper bounds for varchar lengths, which may lead to better plans and/or run-time behavior.

+

+TIP: Either running UPDATE STATISTICS or using a trafodion external table may improve performance. To get full performance benefit, you must run UPDATE STATISTICS and use the trafodion external table.

+

+This is the definition of the *hive table* _test_mix_ which has a trafodion external table, the size is 137.6G.

+

+```

+SQL>SHOWDDL test_mix;

+

+CREATE EXTERNAL TABLE test_mix(

+	mix_id int not null not droppable primary key,

+	mix_age int,

+	mix_name string,

+	mix_timestamp01 string,

+	mix_timestamp02 string,

+	mix_other01 string,

+	mix_other02 string,

+	mix_other03 string,

+	mix_other04 string,

+	mix_other05 string,

+	mix_other06 string,

+	mix_other07 string,

+	mix_other08 string,

+	mix_other09 string,

+	mix_other10 string,

+	mix_other11 string,

+	mix_other12 string,

+	mix_other13 string,

+	mix_other14 string,

+	mix_other15 string

+  )

+row format delimited fields terminated by '|'

+  location '/user/trafodion/data/ExternalTable_data';

+--  01-06 short 06-11medium  12-15 long  

+```

+

+This is the definition of the *trafodion external table* _test_mix_, it has the same structure and size as the hive table _test_mix_.

+

+```

+SQL>SHOWDDL text_mix;

+

+CREATE EXTERNAL TABLE test_mix(

+	mix_id int,

+	mix_age int,

+	mix_name varchar(20),

+	mix_timestamp01 timestamp,

+	mix_timestamp02 varchar(20),

+	mix_other01 varchar(12),

+	mix_other02 varchar(12),

+	mix_other03 varchar(12),

+	mix_other04 varchar(12),

+	mix_other05 varchar(12),

+	mix_other06 varchar(12),

+	mix_other07 varchar(64),

+	mix_other08 varchar(64),

+	mix_other09 varchar(64),

+	mix_other10 varchar(64),

+	mix_other11 varchar(128),

+	mix_other12 varchar(128),

+	mix_other13 varchar(128),

+	mix_other14 varchar(1024),

+	mix_other15 varchar(1024)

+  )for hive.hive.test_mix;

+--  01-06 short 07-11medium  12-15 long  

+```

+

+* When executing the following query:

+

++

+```

+SELECT [LAST 1] * FROM hive.hive.test_mix WHERE mix_other02 = 'Ot';

+```

+

++

+it takes approximately *6 minutes* (average value) to get the result using the trafodion external table. 

+

++

+[cols="20%,20%,20%,20%,20%",options="header"]

+|=====

+|                | First Result | Second Result | Third Result | Average Value

+| Start Time     | 2018/03/07 18:40:31.655159 | 2018/03/07 09:37:50.801345 | 2018/03/07 09:45:05.921706 |

+| End Time       | 2018/03/07 18:49:08.879780 | 2018/03/07 09:43:16.695492 | 2018/03/07 09:48:58.251764 |

+| Elapsed Time   | 2018/03/07 00:08:37.224621 | 2018/03/07 00:05:25.894147 | 2018/03/07 00:03:52.330058 | 00:06:12.23

+| Compile Time   | 2018/03/07 00:00:03.497624 | 2018/03/07 00:00:11.595054 | 2018/03/07 00:00:00.551781 | 00:00:04.8

+| Execution Time | 2018/03/07 00:08:33.715742 | 2018/03/07 00:05:14.295840 | 2018/03/07 00:03:51.708673 | *00:06:12*

+|=====

+

++

+while it takes approximately *14 minutes* (average value) to get the result without using the trafodion external table.

+

++

+[cols="20%,20%,20%,20%,20%",options="header"]

+|=====

+|                | First Result | Second Result | Third Result | Average Value

+| Start Time     | 2018/03/07 13:33:46.722646 | 2018/03/07 14:39:30.323730 | 2018/03/07 14:54:58.177258 |

+| End Time       | 2018/03/07 13:48:35.028916 | 2018/03/07 14:53:53.887911 | 2018/03/07 15:09:11.517646 |

+| Elapsed Time   | 2018/03/07 00:14:48.306270 | 2018/03/07 00:14:23.564181 | 2018/03/07 00:14:13.340388 | 00:14:28.40

+| Compile Time   | 2018/03/07 00:00:00.773770 | 2018/03/07 00:00:00.388777 | 2018/03/07 00:00:14.856643 | 00:00:04

+| Execution Time | 2018/03/07 00:14:47.530017 | 2018/03/07 00:14:23.146420 | 2018/03/07 00:13:58.463850 | *00:13:58*

+|=====

+

+* When executing the following query:

+

++

+```

+SELECT [LAST 1] mix_other02, substring(mix_other12 from 1 for 10) FROM hive.hive.test_mix WHERE substring(mix_other02 from 1 for 1) = 'O';

+```

+

++

+it takes approximately *6 minutes* (average value) to get the result using the trafodion external table. 

+

++

+[cols="20%,20%,20%,20%,20%",options="header"]

+|=====

+|                | First Result | Second Result | Third Result | Average Value

+| Start Time     | 2018/03/09 14:07:59.353015 | 2018/03/09 14:16:27.725035 | 2018/03/09 14:41:01.454408 |

+| End Time       | 2018/03/09 14:15:05.979546 | 2018/03/09 14:20:44.939776 | 2018/03/09 14:46:58.238246 |

+| Elapsed Time   | 2018/03/09 00:07:06.626531 | 2018/03/09 00:04:17.214741 | 2018/03/09 00:05:56.783838 | 00:05:59

+| Compile Time   | 2018/03/09 00:00:00.197789 | 2018/03/09 00:00:00.296705 | 2018/03/09 00:00:00.227511 | 00:00:00.23 

+| Execution Time | 2018/03/09 00:07:06.411065 | 2018/03/09 00:04:16.873090 | 2018/03/09 00:05:56.554411 | *00:05:59*

+|=====

+

++

+while it takes approximately 35 minutes (average value) to get the result without using the trafodion external table.

+

++

+[cols="20%,20%,20%,20%,20%",options="header"]

+|=====

+|                | First Result | Second Result | Third Result | Average Value

+| Start Time     | 2018/03/09 11:01:12.676307 | 2018/03/09 11:35:54.514479 | 2018/03/09 13:15:07.006658 |

+| End Time       | 2018/03/09 11:35:16.264756 | 2018/03/09 12:11:09.587147 | 2018/03/09 13:49:23.740406 |

+| Elapsed Time   | 2018/03/09 00:34:03.588449 | 2018/03/09 00:35:15.072668 | 2018/03/09 00:34:16.733748 | 34:44:00

+| Compile Time   | 2018/03/09 00:00:00.703053 | 2018/03/09 00:00:00.280146 | 2018/03/09 00:00:00.536929 | 00:00:00.5 

+| Execution Time | 2018/03/09 00:34:02.828529 | 2018/03/09 00:35:14.743914 | 2018/03/09 00:34:16.155336 | *34:44:00*

+|=====

+

+<<<

 [[create_view_statement]]

 == CREATE VIEW Statement

 

@@ -6844,7 +6978,7 @@
 

 query-specification is:

 [with-clause]

-SELECT [ "[" ANY N "]" | "[" FIRST N "]" ] [ALL | DISTINCT] select-list

+SELECT [ "[" ANY N "]" | "[" FIRST N "]" | "[" LAST N "]" ] [ALL | DISTINCT] select-list

    FROM table-ref [,table-ref]...

    [WHERE search-condition]

    [SAMPLE sampling-method]

@@ -6989,13 +7123,13 @@
 +

 specifies the unique name of the CTE to be created, which is a valid SQL identifier with a maximum of 128 characters. Duplicate names are not allowed in a single with-clause.

 

-* `"[" ANY _N_ "]" | "[" FIRST _N_ "]"`

+* `"[" ANY _N_ "]" | "[" FIRST _N_ "]" | "[" LAST _N_ "]" `

 +

 specifies that _N_ rows are to be returned (assuming the table has at least _N_ rows and that the qualification

 criteria specified in the WHERE clause, if any, would select at least _N_ rows) and you do not care which _N_ rows

 are chosen (out of the qualified rows) to actually be returned.

 +

-_You must enclose ANY N or FIRST N in square brackets ([])._ The quotation marks ("") around each square bracket in

+You must enclose `ANY _N_`, `FIRST _N_` or `LAST _N_` in square brackets ([]). The quotation marks ("") around each square bracket in

 the syntax diagram indicate that the bracket is a required character that you must type as shown (for example, [ANY 10]

 or [FIRST 5]). Do not include quotation marks in ANY or FIRST clauses.

 +

@@ -7003,6 +7137,8 @@
 result table of the SELECT statement. _N_ is an unsigned numeric literal with no scale. If _N_ is greater than the number

 of rows in the table, all rows are returned. [ANY _N_] and [FIRST _N_] are disallowed in nested SELECT statements and on

 either side of a UNION operation.

++

+`[LAST _N_]` performs the entire query and calculates elapsed time. The _N_ must be 0 or 1. `[LAST _0_]` does not return any rows. `[LAST _1_]` returns only the last qualified row.

 

 * `ALL | DISTINCT`

 +

@@ -8097,6 +8233,31 @@
 SELECT a+1 FROM t GROUP BY 1+a;

 ```

 

+* Examples of using `[LAST _N_]` option:

++

+```

+SQL>SELECT [LAST 0] * FROM employee;

+

+--- 0 row(s) selected.

+```

++

+```

+SQL>SELECT [LAST 1] * FROM employee WHERE jobcode <> 100 AND salary = 30000;

+

+EMPNUM FIRST_NAME      LAST_NAME            DEPTNUM JOBCODE SALARY    

+------ --------------- -------------------- ------- ------- ----------

+   227 XAVIER          SEDLEMEYER              3300     300   30000.00

+

+--- 1 row(s) selected.

+```

++

+```

+SQL>SELECT [LAST 2] * FROM employee;

+

+*** ERROR[15002] Internal parser error: Number of rows must be 0 or 1 with LAST option. 

+. [2018-02-28 18:05:12]

+```

+

 <<<

 [[set_schema_statement]]

 == SET SCHEMA Statement

diff --git a/docs/src/site/markdown/documentation.md b/docs/src/site/markdown/documentation.md
index da13f12..699ab35 100644
--- a/docs/src/site/markdown/documentation.md
+++ b/docs/src/site/markdown/documentation.md
@@ -36,6 +36,27 @@
 Trafodion Stored Procedures in Java (SPJ) Guide       | [Web Book](docs/spj_guide/index.html),          [PDF](docs/spj_guide/Trafodion_SPJ_Guide.pdf)
 UDF Tutorial                                          | [wiki](https://cwiki.apache.org/confluence/display/TRAFODION/Tutorial%3A+The+object-oriented+UDF+interface)
 
+## 2.2.0 Release
+
+Document                                              | Formats
+------------------------------------------------------|-----------------------------------
+Scalar UDFs in C                                      | [wiki](https://cwiki.apache.org/confluence/display/TRAFODION/Scalar+UDFs+-+In+C)
+Trafodion Client Installation Guide                   | [Web Book](docs/2.2.0/client_install/index.html),     [PDF](docs/2.2.0/client_install/Trafodion_Client_Installation_Guide.pdf)
+Trafodion Code Examples                               | [wiki](https://cwiki.apache.org/confluence/display/TRAFODION/Trafodion+Code+Examples)
+Trafodion Command Interface Guide                     | [Web Book](docs/2.2.0/command_interface/index.html),  [PDF](docs/2.2.0/command_interface/Trafodion_Command_Interface_Guide.pdf)
+Trafodion Control Query Default (CQD) Reference Guide | [Web Book](docs/2.2.0/cqd_reference/index.html),      [PDF](docs/2.2.0/cqd_interface/Trafodion_CQD_Reference_Guide.pdf)
+Trafodion Database Connectivity Services Guide        | [Web Book](docs/2.2.0/dcs_reference/index.html),      [API](docs/2.2.0/dcs_reference/apidocs/2.1.0/index.html)
+Trafodion JDBC Type 4 Programmer Reference Guide      | [Web Book](docs/2.2.0/jdbct4ref_guide/index.html),    [PDF](docs/2.2.0/jdbct4ref_guide/Trafodion_JDBCT4_Reference_Guide.pdf)
+Trafodion Load and Transform Guide                    | [Web Book](docs/2.2.0/load_transform/index.html),     [PDF](docs/2.2.0/load_transform/Trafodion_Load_Transform_Guide.pdf)
+Trafodion Messages Guide                              | [Web Book](docs/2.2.0/messages_guide/index.html),     [PDF](docs/2.2.0/messages_guide/Trafodion_Messages_Guide.pdf)
+Trafodion Manageability                               | [wiki](https://cwiki.apache.org/confluence/display/TRAFODION/Trafodion+Manageability)
+Trafodion odb User Guide                              | [Web Book](docs/2.2.0/odb/index.html),                [PDF](docs/2.2.0/odb/Trafodion_odb_User_Guide.pdf)
+Trafodion Provisioning Guide                          | [Web Book](docs/2.2.0/provisioning_guide/index.html), [PDF](docs/2.2.0/provisioning_guide/Trafodion_Provisioning_Guide.pdf)
+Trafodion REST Server Reference Guide                 | [Web Book](docs/2.2.0/rest_reference/index.html),     [API](docs/2.2.0/rest_reference/apidocs/2.1.0/index.html)
+Trafodion SQL Reference Manual                        | [Web Book](docs/2.2.0/sql_reference/index.html),      [PDF](docs/2.2.0/sql_reference/Trafodion_SQL_Reference_Manual.pdf)
+Trafodion Stored Procedures in Java (SPJ) Guide       | [Web Book](docs/2.2.0/spj_guide/index.html),          [PDF](docs/2.2.0/spj_guide/Trafodion_SPJ_Guide.pdf)
+UDF Tutorial                                          | [wiki](https://cwiki.apache.org/confluence/display/TRAFODION/Tutorial%3A+The+object-oriented+UDF+interface)
+
 ## 2.1.0 Release
 
 Document                                              | Formats
diff --git a/docs/src/site/markdown/download.md b/docs/src/site/markdown/download.md
index 8ebac31..d95b20d 100644
--- a/docs/src/site/markdown/download.md
+++ b/docs/src/site/markdown/download.md
@@ -24,6 +24,38 @@
 
 # Download
 
+## 2.2.0 (March 2018)
+
+* [Release Notes](release-notes-2-2-0.html)
+* [Source Code Release][src220]  -  [PGP][pgp220] [SHA1][sha220]
+* Convenience Binaries
+    * [Python Installer][pins220]  -  [PGP][pinpgp220] [SHA1][pinsha220]
+    * [Server][ser220]  -  [PGP][sepgp220] [SHA1][sesha220]
+    * [Clients][cl220]  -  [PGP][clpgp220] [SHA1][clsha220]
+    * [Ambari RPMs][ar220]  -  [PGP][arpgp220] [SHA1][arsha220]
+    * [Ambari Plugin][ap220]  -  [PGP][appgp220] [SHA1][apsha220]
+* [Documentation](documentation.html#220_Release)
+
+[src220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/src/apache-trafodion-2.2.0-src.tar.gz
+[pgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/src/apache-trafodion-2.2.0-src.tar.gz.asc
+[sha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/src/apache-trafodion-2.2.0-src.tar.gz.sha
+[pins220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_pyinstaller-2.2.0.tar.gz
+[pinpgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_pyinstaller-2.2.0.tar.gz.asc
+[pinsha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_pyinstaller-2.2.0.tar.gz.sha
+[ser220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_server-2.2.0-RH6-x86_64.tar.gz
+[sepgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_server-2.2.0-RH6-x86_64.tar.gz.asc
+[sesha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_server-2.2.0-RH6-x86_64.tar.gz.sha
+[cl220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_clients-2.2.0-RH6-x86_64.tar.gz
+[clpgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_clients-2.2.0-RH6-x86_64.tar.gz.asc
+[clsha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/apache-trafodion_clients-2.2.0-RH6-x86_64.tar.gz.sha
+[ar220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/apache-trafodion_server-2.2.0-1.x86_64.rpm
+[arpgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/apache-trafodion_server-2.2.0-1.x86_64.rpm.asc
+[arsha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/apache-trafodion_server-2.2.0-1.x86_64.rpm.sha
+[ap220]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/traf_ambari-2.2.0-1.noarch.rpm
+[appgp220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/traf_ambari-2.2.0-1.noarch.rpm.asc
+[apsha220]: http://www.apache.org/dist/trafodion/apache-trafodion-2.2.0/bin/traf_ambari_rpms/traf_ambari-2.2.0-1.noarch.rpm.sha
+
+
 ## 2.1.0 (May 2017)
 
 * [Release Notes](release-notes-2-1-0.html)
@@ -37,34 +69,34 @@
     * [Ambari Plugin][ap210]  -  [PGP][appgp210] [MD5][apmd5210] [SHA1][apsha210]
 * [Documentation](documentation.html#210_Release)
 
-[src210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz
-[pgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.asc
-[md5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.md5
-[sha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.sha
-[ins210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz
-[inpgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.asc
-[inmd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.md5
-[insha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.sha
-[pins210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz
-[pinpgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.asc
-[pinmd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.md5
-[pinsha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.sha
-[ser210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz
-[sepgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.asc
-[semd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.md5
-[sesha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.sha
-[cl210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz
-[clpgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.asc
-[clmd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.md5
-[clsha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.sha
-[ar210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm
-[arpgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.asc
-[armd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.md5
-[arsha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.sha
-[ap210]: http://www.apache.org/dyn/closer.lua/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm
-[appgp210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.asc
-[apmd5210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.md5
-[apsha210]: http://www.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.sha
+[src210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz
+[pgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.asc
+[md5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.md5
+[sha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/src/apache-trafodion-2.1.0-incubating-src.tar.gz.sha
+[ins210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz
+[inpgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.asc
+[inmd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.md5
+[insha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_installer-2.1.0-incubating.tar.gz.sha
+[pins210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz
+[pinpgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.asc
+[pinmd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.md5
+[pinsha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_pyinstaller-2.1.0-incubating.tar.gz.sha
+[ser210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz
+[sepgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.asc
+[semd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.md5
+[sesha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_server-2.1.0-RH6-x86_64-incubating.tar.gz.sha
+[cl210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz
+[clpgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.asc
+[clmd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.md5
+[clsha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/apache-trafodion_clients-2.1.0-RH6-x86_64-incubating.tar.gz.sha
+[ar210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm
+[arpgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.asc
+[armd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.md5
+[arsha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/apache-trafodion_server-2.1.0-1.x86_64.rpm.sha
+[ap210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm
+[appgp210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.asc
+[apmd5210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.md5
+[apsha210]: http://archive.apache.org/dist/trafodion/apache-trafodion-2.1.0-incubating/bin/traf_ambari_rpms/traf_ambari-2.1.0-1.noarch.rpm.sha
 
 ## 2.0.1 (June 2016)
 
diff --git a/docs/src/site/markdown/release-notes-2-2-0.md b/docs/src/site/markdown/release-notes-2-2-0.md
new file mode 100644
index 0000000..a086b68
--- /dev/null
+++ b/docs/src/site/markdown/release-notes-2-2-0.md
@@ -0,0 +1,539 @@
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+# 2.2.0-Release Notes
+
+This is the first release of the Apache Trafodion project.
+
+Build instructions are available in the [Trafodion Contributor Guide](https://cwiki.apache.org/confluence/display/TRAFODION/Create+Build+Environment).
+
+##  New Feature
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td>Trafodion Elasticity enhancements</td>
+      <td>[TRAFODION-2001](https://issues.apache.org/jira/browse/TRAFODION-2001)</td>
+    </tr>
+    <tr>
+      <td>LOB Support in JDBC</td>
+      <td>[TRAFODION-2287](https://issues.apache.org/jira/browse/TRAFODION-2287)</td>
+    </tr>
+    <tr>
+      <td>Improve UPDATE STATISTICS performance for varchar columns</td>
+      <td>[TRAFODION-2376](https://issues.apache.org/jira/browse/TRAFODION-2376)</td>
+    </tr>
+    <tr>
+      <td>RMS enhancements</td>
+      <td>[TRAFODION-2420](https://issues.apache.org/jira/browse/TRAFODION-2420)</td>
+    </tr>
+    <tr>
+      <td>jdbcT4 profile configuration for publish to maven central</td>
+      <td>[TRAFODION-2513](https://issues.apache.org/jira/browse/TRAFODION-2513)</td>
+    </tr>
+    <tr>
+      <td>Improve the log4j and log4cxx infrastructure in Trafodion</td>
+      <td>[TRAFODION-2596](https://issues.apache.org/jira/browse/TRAFODION-2596)</td>
+    </tr>
+    <tr>
+      <td>Port Esgyn DTM changes to Trafodion</td>
+      <td>[TRAFODION-2623](https://issues.apache.org/jira/browse/TRAFODION-2623)</td>
+    </tr>
+  </table>
+</span>
+
+##  Improvement
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td>hdfs directories owned by trafodion id  should be under /user/trafodion</td>
+      <td>[TRAFODION-2098](https://issues.apache.org/jira/browse/TRAFODION-2098)</td>
+    </tr>
+    <tr>
+      <td>populateSortCols was flagged as major perf offender during profiling</td>
+      <td>[TRAFODION-2422](https://issues.apache.org/jira/browse/TRAFODION-2422)</td>
+    </tr>
+    <tr>
+      <td>Enhance stringtolob builtin function to take varchar/char columns as parameter</td>
+      <td>[TRAFODION-2516](https://issues.apache.org/jira/browse/TRAFODION-2516)</td>
+    </tr>
+    <tr>
+      <td>Allow scalar UDFs with delimited identifiers</td>
+      <td>[TRAFODION-2517](https://issues.apache.org/jira/browse/TRAFODION-2517)</td>
+    </tr>
+    <tr>
+      <td>Improve handling of index hints</td>
+      <td>[TRAFODION-2569](https://issues.apache.org/jira/browse/TRAFODION-2569)</td>
+    </tr>
+    <tr>
+      <td>Remove deprecated CQD HIVE_MAX_STRING_LENGTH</td>
+      <td>[TRAFODION-2583](https://issues.apache.org/jira/browse/TRAFODION-2583)</td>
+    </tr>
+    <tr>
+      <td>Remove obsolete utility commands, turn off obsolete privileges</td>
+      <td>[TRAFODION-2603](https://issues.apache.org/jira/browse/TRAFODION-2603)</td>
+    </tr>
+    <tr>
+      <td>set rowcount option in UPDATE STATISTICS does not suppress rowcount logic</td>
+      <td>[TRAFODION-2618](https://issues.apache.org/jira/browse/TRAFODION-2618)</td>
+    </tr>
+    <tr>
+      <td>Simplify installation setting of HBase config parameters</td>
+      <td>[TRAFODION-2663](https://issues.apache.org/jira/browse/TRAFODION-2663)</td>
+    </tr>
+    <tr>
+      <td>Ensure RMS can be disabled properly</td>
+      <td>[TRAFODION-2698](https://issues.apache.org/jira/browse/TRAFODION-2698)</td>
+    </tr>
+  </table>
+</span>
+
+## Sub-task
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td>Implement relational operator for common subexpressions and transformation</td>
+      <td>[TRAFODION-2317](https://issues.apache.org/jira/browse/TRAFODION-2317)</td>
+    </tr>
+    <tr>
+      <td>Add optimizer/opt_error.h to analyzeMessageGuide.py</td>
+      <td>[TRAFODION-2474](https://issues.apache.org/jira/browse/TRAFODION-2474)</td>
+    </tr>
+    <tr>
+      <td>function support: Reverse()</td>
+      <td>[TRAFODION-2485](https://issues.apache.org/jira/browse/TRAFODION-2485)</td>
+    </tr>
+    <tr>
+      <td>Update wiki for scalar UDFs to explain trusted flavor</td>
+      <td>[TRAFODION-2558](https://issues.apache.org/jira/browse/TRAFODION-2558)</td>
+    </tr>
+    <tr>
+      <td>Support Index hints in a DML statement</td>
+      <td>[TRAFODION-2573](https://issues.apache.org/jira/browse/TRAFODION-2573)</td>
+    </tr>
+    <tr>
+      <td>SQL engine work for Elasticity</td>
+      <td>[TRAFODION-2628](https://issues.apache.org/jira/browse/TRAFODION-2628)</td>
+    </tr>
+    <tr>
+      <td>Add check to NATable cache for snapshot info</td>
+      <td>[TRAFODION-2723](https://issues.apache.org/jira/browse/TRAFODION-2723)</td>
+    </tr>
+  </table>
+</span>
+        
+##  Bug Fixes
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td>LP Bug: 1442483 - SQL queries hang when Region Server goes down</td>
+      <td>[TRAFODION-1151](https://issues.apache.org/jira/browse/TRAFODION-1151)</td>
+    </tr>
+    <tr>
+      <td>LP Bug: 1443482 - Accessing hive table with ucs2 encoded field returns 0 rows</td>
+      <td>[TRAFODION-1165](https://issues.apache.org/jira/browse/TRAFODION-1165)</td>
+    </tr>
+    <tr>
+      <td>after uninstall, and reinstall again, dcscheck report dcs master not up</td>
+      <td>[TRAFODION-1989](https://issues.apache.org/jira/browse/TRAFODION-1989)</td>
+    </tr>
+    <tr>
+      <td>Need better errors when DIVISION BY expression is incorrect</td>
+      <td>[TRAFODION-2240](https://issues.apache.org/jira/browse/TRAFODION-2240)</td>
+    </tr>
+    <tr>
+      <td>WITH clause with CTE used in subquery gives error 3288</td>
+      <td>[TRAFODION-2248](https://issues.apache.org/jira/browse/TRAFODION-2248)</td>
+    </tr>
+    <tr>
+      <td>TLOG repeatedly reporting exceptions in deleteEntriesOlderThanASN</td>
+      <td>[TRAFODION-2253](https://issues.apache.org/jira/browse/TRAFODION-2253)</td>
+    </tr>
+    <tr>
+      <td>need add privilege checking for explain statement</td>
+      <td>[TRAFODION-2294](https://issues.apache.org/jira/browse/TRAFODION-2294)</td>
+    </tr>
+    <tr>
+      <td>Sequence operators are not parallel starting with the WITH clause support</td>
+      <td>[TRAFODION-2324](https://issues.apache.org/jira/browse/TRAFODION-2324)</td>
+    </tr>
+    <tr>
+      <td>bash installer: always copy bashrc template file to trafodion&#39;s $HOME</td>
+      <td>[TRAFODION-2428](https://issues.apache.org/jira/browse/TRAFODION-2428)</td>
+    </tr>
+    <tr>
+      <td>Invalid characters in UCS2 to UTF8 translation are not handled correctly</td>
+      <td>[TRAFODION-2477](https://issues.apache.org/jira/browse/TRAFODION-2477)</td>
+    </tr>
+    <tr>
+      <td>HDFS connection issue during LOB creation</td>
+      <td>[TRAFODION-2495](https://issues.apache.org/jira/browse/TRAFODION-2495)</td>
+    </tr>
+    <tr>
+      <td>TMUDF sometimes does not pass errors from its input table up to the caller</td>
+      <td>[TRAFODION-2499](https://issues.apache.org/jira/browse/TRAFODION-2499)</td>
+    </tr>
+    <tr>
+      <td>Obscure cores seen in Trafodion while running jenkins tests with RH7</td>
+      <td>[TRAFODION-2514](https://issues.apache.org/jira/browse/TRAFODION-2514)</td>
+    </tr>
+    <tr>
+      <td>process abend when updating primary key with TRAF_RELOAD_NATABLE_CACHE ON</td>
+      <td>[TRAFODION-2527](https://issues.apache.org/jira/browse/TRAFODION-2527)</td>
+    </tr>
+    <tr>
+      <td>Update stats on cell or row access to a Trafodion table raises error 9252</td>
+      <td>[TRAFODION-2529](https://issues.apache.org/jira/browse/TRAFODION-2529)</td>
+    </tr>
+    <tr>
+      <td> UPDATE STATISTICS is sensitive to tdm_arkcmp autocommit setting</td>
+      <td>[TRAFODION-2530](https://issues.apache.org/jira/browse/TRAFODION-2530)</td>
+    </tr>
+    <tr>
+      <td> Create index succeeds but resulting index is empty, if dop greater than 32</td>
+      <td>[TRAFODION-2535](https://issues.apache.org/jira/browse/TRAFODION-2535)</td>
+    </tr>
+    <tr>
+      <td>Salted indexes do not result in parallel index scan plans</td>
+      <td>[TRAFODION-2537](https://issues.apache.org/jira/browse/TRAFODION-2537)</td>
+    </tr>
+    <tr>
+      <td>Skew buster plan not chosen when join predicate involves SUBSTRs</td>
+      <td>[TRAFODION-2552](https://issues.apache.org/jira/browse/TRAFODION-2552)</td>
+    </tr>
+    <tr>
+      <td>RIGHT function gives incorrect answer on UTF-8 varchars sometimes</td>
+      <td>[TRAFODION-2559](https://issues.apache.org/jira/browse/TRAFODION-2559)</td>
+    </tr>
+    <tr>
+      <td>Index plan not chosen for UPDATE when WHERE clause and set clause are on the same index column</td>
+      <td>[TRAFODION-2574](https://issues.apache.org/jira/browse/TRAFODION-2574)</td>
+    </tr>
+    <tr>
+      <td>UPDATE STATS sometimes fails on very long varchars</td>
+      <td>[TRAFODION-2575](https://issues.apache.org/jira/browse/TRAFODION-2575)</td>
+    </tr>
+    <tr>
+      <td>Incremental UPDATE STATS fails on long varchar values</td>
+      <td>[TRAFODION-2576](https://issues.apache.org/jira/browse/TRAFODION-2576)</td>
+    </tr>
+    <tr>
+      <td>installers should allow multiple ldap hosts and ldap UID lines</td>
+      <td>[TRAFODION-2579](https://issues.apache.org/jira/browse/TRAFODION-2579)</td>
+    </tr>
+    <tr>
+      <td>UPSERT USING LOAD running slower than UPSERT with transactions disabled</td>
+      <td>[TRAFODION-2586](https://issues.apache.org/jira/browse/TRAFODION-2586)</td>
+    </tr>
+    <tr>
+      <td>Give better diagnostics when HBase is not available while Trafodion starts</td>
+      <td>[TRAFODION-2592](https://issues.apache.org/jira/browse/TRAFODION-2592)</td>
+    </tr>
+    <tr>
+      <td>Insert Select to/from Trafodion tables containing LOB columns</td>
+      <td>[TRAFODION-2598](https://issues.apache.org/jira/browse/TRAFODION-2598)</td>
+    </tr>
+    <tr>
+      <td>sort operator merge phase memory pool improvement</td>
+      <td>[TRAFODION-2604](https://issues.apache.org/jira/browse/TRAFODION-2604)</td>
+    </tr>
+    <tr>
+      <td>Rework fix to JIRA Trafodion 2294</td>
+      <td>[TRAFODION-2605](https://issues.apache.org/jira/browse/TRAFODION-2605)</td>
+    </tr>
+    <tr>
+      <td>Input parameters and current functions in input tables of TMUDFs</td>
+      <td>[TRAFODION-2611](https://issues.apache.org/jira/browse/TRAFODION-2611)</td>
+    </tr>
+    <tr>
+      <td>Internal assert in CLEANUP command in some unusual cases</td>
+      <td>[TRAFODION-2612](https://issues.apache.org/jira/browse/TRAFODION-2612)</td>
+    </tr>
+    <tr>
+      <td>HBaseTxClient throws TableNotFoundException for TRAFODION._DTM_.TDDL</td>
+      <td>[TRAFODION-2614](https://issues.apache.org/jira/browse/TRAFODION-2614)</td>
+    </tr>
+    <tr>
+      <td>TMUDF returns wrong results with small numeric inputs</td>
+      <td>[TRAFODION-2615](https://issues.apache.org/jira/browse/TRAFODION-2615)</td>
+    </tr>
+    <tr>
+      <td> Nested join regression after fix for TRAFODION-2569</td>
+      <td>[TRAFODION-2616](https://issues.apache.org/jira/browse/TRAFODION-2616)</td>
+    </tr>
+    <tr>
+      <td>Memory leak in emitRow() in table-mapping UDFs</td>
+      <td>[TRAFODION-2625](https://issues.apache.org/jira/browse/TRAFODION-2625)</td>
+    </tr>
+    <tr>
+      <td>Disk IO counter is not populated for hdfs/hive IOs</td>
+      <td>[TRAFODION-2631](https://issues.apache.org/jira/browse/TRAFODION-2631)</td>
+    </tr>
+    <tr>
+      <td>FLOOR and CEIL return a float data type instead of the argument data type</td>
+      <td>[TRAFODION-2634](https://issues.apache.org/jira/browse/TRAFODION-2634)</td>
+    </tr>
+    <tr>
+      <td>Core on select count( *) using hbase cell access on a salted Trafodion table</td>
+      <td>[TRAFODION-2635](https://issues.apache.org/jira/browse/TRAFODION-2635)</td>
+    </tr>
+    <tr>
+      <td>Modest memory leak in metadata context and with CQS</td>
+      <td>[TRAFODION-2636](https://issues.apache.org/jira/browse/TRAFODION-2636)</td>
+    </tr>
+    <tr>
+      <td>Library management in the absence of password-less ssh</td>
+      <td>[TRAFODION-2637](https://issues.apache.org/jira/browse/TRAFODION-2637)</td>
+    </tr>
+    <tr>
+      <td>Ambari integration - dcs install with HA enabled</td>
+      <td>[TRAFODION-2642](https://issues.apache.org/jira/browse/TRAFODION-2642)</td>
+    </tr>
+    <tr>
+      <td>Obsolete the bash installer</td>
+      <td>[TRAFODION-2644](https://issues.apache.org/jira/browse/TRAFODION-2644)</td>
+    </tr>
+    <tr>
+      <td>mxosrvr connection state doesn&#39;t change to AVAILABLE after the timeout if no client connect to it</td>
+      <td>[TRAFODION-2646](https://issues.apache.org/jira/browse/TRAFODION-2646)</td>
+    </tr>
+    <tr>
+      <td>sqgen no longer provides &quot;overflow&quot; directive for scratch disks</td>
+      <td>[TRAFODION-2647](https://issues.apache.org/jira/browse/TRAFODION-2647)</td>
+    </tr>
+    <tr>
+      <td>New added persist configuration section missing program run time options</td>
+      <td>[TRAFODION-2648](https://issues.apache.org/jira/browse/TRAFODION-2648)</td>
+    </tr>
+    <tr>
+      <td>Method used in rmscheck script for obtaining status is incompatible with elasticity</td>
+      <td>[TRAFODION-2649](https://issues.apache.org/jira/browse/TRAFODION-2649)</td>
+    </tr>
+    <tr>
+      <td>Sort operator loops at times</td>
+      <td>[TRAFODION-2653](https://issues.apache.org/jira/browse/TRAFODION-2653)</td>
+    </tr>
+    <tr>
+      <td>Change the location of trafodion-site.xml from $TRAF_HOME/etc to config</td>
+      <td>[TRAFODION-2654](https://issues.apache.org/jira/browse/TRAFODION-2654)</td>
+    </tr>
+    <tr>
+      <td>MDAM plans on prefixes sometimes not chosen when they should be</td>
+      <td>[TRAFODION-2655](https://issues.apache.org/jira/browse/TRAFODION-2655)</td>
+    </tr>
+    <tr>
+      <td>Incremental UPDATE STATS fails on very large sample tables</td>
+      <td>[TRAFODION-2662](https://issues.apache.org/jira/browse/TRAFODION-2662)</td>
+    </tr>
+    <tr>
+      <td>privileges regression tests privs1/TEST040-44 output non-printable characters</td>
+      <td>[TRAFODION-2678](https://issues.apache.org/jira/browse/TRAFODION-2678)</td>
+    </tr>
+    <tr>
+      <td>Repeated execution of prepared SQL select statement causes memory leak</td>
+      <td>[TRAFODION-2681](https://issues.apache.org/jira/browse/TRAFODION-2681)</td>
+    </tr>
+    <tr>
+      <td>JVM startup options like heap are not passed correctly</td>
+      <td>[TRAFODION-2682](https://issues.apache.org/jira/browse/TRAFODION-2682)</td>
+    </tr>
+    <tr>
+      <td> the value of Numeric Struct need not to be changed to BidEndian</td>
+      <td>[TRAFODION-2687](https://issues.apache.org/jira/browse/TRAFODION-2687)</td>
+    </tr>
+    <tr>
+      <td>Log files are not created by monitor child processes</td>
+      <td>[TRAFODION-2689](https://issues.apache.org/jira/browse/TRAFODION-2689)</td>
+    </tr>
+    <tr>
+      <td>Monitor fails to start when node names are not of the right form</td>
+      <td>[TRAFODION-2692](https://issues.apache.org/jira/browse/TRAFODION-2692)</td>
+    </tr>
+    <tr>
+      <td>control query cancel qid fails with error 8031 sometimes</td>
+      <td>[TRAFODION-2696](https://issues.apache.org/jira/browse/TRAFODION-2696)</td>
+    </tr>
+    <tr>
+      <td>[ODBC]The maxlength for LargeInt was fixed to 8</td>
+      <td>[TRAFODION-2701](https://issues.apache.org/jira/browse/TRAFODION-2701)</td>
+    </tr>
+    <tr>
+      <td>[ODBC] The SQL type is set to CHARACTER(n) CHARACTER set USC2, SQLGetData to read data multiple times returns the wrong length value</td>
+      <td>[TRAFODION-2702](https://issues.apache.org/jira/browse/TRAFODION-2702)</td>
+    </tr>
+    <tr>
+      <td>Using multi-threads app with linux-odbc to connect trafodion will make dcs down</td>
+      <td>[TRAFODION-2709](https://issues.apache.org/jira/browse/TRAFODION-2709)</td>
+    </tr>
+    <tr>
+      <td>JDBC LOB tests show symptoms of a leaked statement handle</td>
+      <td>[TRAFODION-2724](https://issues.apache.org/jira/browse/TRAFODION-2724)</td>
+    </tr>
+    <tr>
+      <td>SQL types are real, FLOAT, and DOUBLE. Some values are inserted, a stack overflow occurs when SQLGetData is executed.</td>
+      <td>[TRAFODION-2725](https://issues.apache.org/jira/browse/TRAFODION-2725)</td>
+    </tr>
+    <tr>
+      <td>Using function strtod is not enough to convert C_CHAR to DOUBLE</td>
+      <td>[TRAFODION-2750](https://issues.apache.org/jira/browse/TRAFODION-2750)</td>
+    </tr>
+    <tr>
+      <td>JDBC executeQuery() throws exception on the with ... select stmt</td>
+      <td>[TRAFODION-2757](https://issues.apache.org/jira/browse/TRAFODION-2757)</td>
+    </tr>
+    <tr>
+      <td>LOAD and UNLOAD statements with LOB columns cause runtime errors</td>
+      <td>[TRAFODION-2764](https://issues.apache.org/jira/browse/TRAFODION-2764)</td>
+    </tr>
+    <tr>
+      <td>Select count( * ) from a renamed table should return error 4082 instead of error 8448</td>
+      <td>[TRAFODION-2767](https://issues.apache.org/jira/browse/TRAFODION-2767)</td>
+    </tr>
+    <tr>
+      <td> When convert NULL  to  SQL_VARCHAR , the length was not be sent</td>
+      <td>[TRAFODION-2811](https://issues.apache.org/jira/browse/TRAFODION-2811)</td>
+    </tr>
+    <tr>
+      <td>For Server 2008, function pow() in driver ODBC throws STATUS_ILLEGAL_INSTRUCTION</td>
+      <td>[TRAFODION-2818](https://issues.apache.org/jira/browse/TRAFODION-2818)</td>
+    </tr>
+    <tr>
+      <td>When using failed connection handle to alloc statement handle, crash happens</td>
+      <td>[TRAFODION-2890](https://issues.apache.org/jira/browse/TRAFODION-2890)</td>
+    </tr>
+    <tr>
+      <td>datalen is wrong while converting varchar in table to local datetime struct</td>
+      <td>[TRAFODION-2902](https://issues.apache.org/jira/browse/TRAFODION-2902)</td>
+    </tr>
+    <tr>
+      <td>Catalog Api gives wrong values about NON_UNIQUE column</td>
+      <td>[TRAFODION-2911](https://issues.apache.org/jira/browse/TRAFODION-2911)</td>
+    </tr>
+    <tr>
+      <td>Python installer does fails when Kerberos is enabled</td>
+      <td>[TRAFODION-2935](https://issues.apache.org/jira/browse/TRAFODION-2935)</td>
+    </tr>
+    <tr>
+      <td>initialize trafodion failed at CentOS 6.9</td>
+      <td>[TRAFODION-2941](https://issues.apache.org/jira/browse/TRAFODION-2941)</td>
+    </tr>
+    <tr>
+      <td>license year should be updated</td>
+      <td>[TRAFODION-2942](https://issues.apache.org/jira/browse/TRAFODION-2942)</td>
+    </tr>
+  </table>
+</span>
+
+
+## Task
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td> JDBCT4 group id needs to be changed to org.apache.trafodion in JAR</td>
+      <td>[TRAFODION-2544](https://issues.apache.org/jira/browse/TRAFODION-2544)</td>
+    </tr>
+    <tr>
+      <td>JDBC T2 build changes needed for Trafodion release</td>
+      <td>[TRAFODION-2554](https://issues.apache.org/jira/browse/TRAFODION-2554)</td>
+    </tr>
+  </table>
+</span>
+
+## Documentation
+
+<span>
+  <table>
+    <tr>
+      <th>Feature</th>
+      <th>Jira ID</th>
+    </tr>
+    <tr>
+      <td>update sql reference manual about with clause syntax</td>
+      <td>[TRAFODION-2405](https://issues.apache.org/jira/browse/TRAFODION-2405)</td>
+    </tr>
+    <tr>
+      <td>Add SCRATCH_DISKS CQD</td>
+      <td>[TRAFODION-2521](https://issues.apache.org/jira/browse/TRAFODION-2521)</td>
+    </tr>
+    <tr>
+      <td>Add with Clause</td>
+      <td>[TRAFODION-2522](https://issues.apache.org/jira/browse/TRAFODION-2522)</td>
+    </tr>
+    <tr>
+      <td>Add tinyint data type for sql reference manual</td>
+      <td>[TRAFODION-2548](https://issues.apache.org/jira/browse/TRAFODION-2548)</td>
+    </tr>
+    <tr>
+      <td>update sql reference manual about new hive data type</td>
+      <td>[TRAFODION-2549](https://issues.apache.org/jira/browse/TRAFODION-2549)</td>
+    </tr>
+    <tr>
+      <td>update sql reference manual about metadata clean up command</td>
+      <td>[TRAFODION-2550](https://issues.apache.org/jira/browse/TRAFODION-2550)</td>
+    </tr>
+    <tr>
+      <td>Remove Automating Update Statistics for SQL Reference Manual</td>
+      <td>[TRAFODION-2657](https://issues.apache.org/jira/browse/TRAFODION-2657)</td>
+    </tr>
+    <tr>
+      <td>Update Character String Data Types</td>
+      <td>[TRAFODION-2665](https://issues.apache.org/jira/browse/TRAFODION-2665)</td>
+    </tr>
+  </table>
+</span>
+
+## Supported Platforms
+
+<span>
+  <table>
+    <tr>
+      <td>**Operating Systems**</td>
+      <td>RedHat / CentOS 6.5 -- 6.8</td>
+    </tr>
+    <tr>
+      <td>**Hadoop Distributions**</td>
+      <td>Cloudera distributions CDH 5.4 -- 5.6<br/>
+          Hortonworks distributions HDP 2.3 -- 2.4<br/>
+          Apache Hadoop with Apache HBase 1.0 -- 1.1</td>
+     </tr>
+    <tr>
+      <td>**Java Version**</td>
+      <td>JDK 1.7.0_67 or newer</td>
+    </tr>
+  </table>
+</span>
diff --git a/docs/src/site/markdown/release-notes.md b/docs/src/site/markdown/release-notes.md
index 29f2794..d42d93c 100644
--- a/docs/src/site/markdown/release-notes.md
+++ b/docs/src/site/markdown/release-notes.md
@@ -16,6 +16,7 @@
 
 Release                               | Description                                                           | Date
 --------------------------------------|-----------------------------------------------------------------------|--------------
+**[2.2.0](release-notes-2-2-0.html)** | First Apache Trafodion top level project release.                     | March 2018
 **[2.1.0](release-notes-2-1-0.html)** | Major feature enhancements.                                           | May 2017
 **[2.0.1](release-notes-2-0-1.html)** | Patch release. Client package added to convenience binaries.          | June 2016
 **[2.0.0](release-notes-2-0-0.html)** | Major feature enhancements.                                           | June 2016
diff --git a/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/configuration/dcs-env.xml b/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/configuration/dcs-env.xml
index d44366d..b168db1 100644
--- a/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/configuration/dcs-env.xml
+++ b/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/configuration/dcs-env.xml
@@ -101,11 +101,8 @@
 # export DCS_REST_OPTS="$DCS_REST_OPTS $DCS_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
 # export DCS_ZOOKEEPER_OPTS="$DCS_ZOOKEEPER_OPTS $DCS_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
 
-# File naming host on which DCS Primary Master is configured to run. $DCS_HOME/conf/master by default.
-# export DCS_PRIMARY_MASTER=${DCS_HOME}/conf/master
-
-# File naming hosts on which DCS Backup Masters is configured to run. $DCS_HOME/conf/backup-masters by default.
-# export DCS_BACKUP_MASTERS=${DCS_HOME}/conf/backup-masters
+# File naming hosts on which DCS Masters is configured to run. $DCS_HOME/conf/masters by default.
+# export DCS_MASTERS=${DCS_HOME}/conf/masters
 
 # File naming hosts on which DCS Servers will run. $DCS_HOME/conf/servers by default.
 # export DCS_SERVERS=${DCS_HOME}/conf/servers
diff --git a/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/package/scripts/trafodionnode.py b/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/package/scripts/trafodionnode.py
index 584d25d..642851e 100755
--- a/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/package/scripts/trafodionnode.py
+++ b/install/ambari-installer/traf-mpack/common-services/TRAFODION/2.1/package/scripts/trafodionnode.py
@@ -138,15 +138,8 @@
          content = InlineTemplate(params.dcs_log4j_template),
          mode=0644)
 
-    serverlist = params.dcs_mast_node_list[0] + '\n'
-    File(os.path.join(trafhome,"master"),
-         owner = params.traf_user, 
-         group = params.traf_group, 
-         content = serverlist,
-         mode=0644)
-
-    serverlist = '\n'.join(params.dcs_back_node_list) + '\n'
-    File(os.path.join(trafhome,"backup-masters"),
+    serverlist = '\n'.join(params.dcs_mast_node_list[1:len(params.dcs_mast_node_list)]) + '\n'
+    File(os.path.join(trafhome,"masters"),
          owner = params.traf_user, 
          group = params.traf_group, 
          content = serverlist,
@@ -172,7 +165,7 @@
               owner=params.traf_user,
               mode=0644)
     # install DCS conf files
-    cmd = "source ~/.bashrc ; mv -f ~/dcs-env.sh ~/log4j.properties ~/dcs-site.xml ~/master ~/backup-masters ~/servers $DCS_INSTALL_DIR/conf/"
+    cmd = "source ~/.bashrc ; mv -f ~/dcs-env.sh ~/log4j.properties ~/dcs-site.xml ~/masters ~/servers $DCS_INSTALL_DIR/conf/"
     Execute(cmd,user=params.traf_user)
 
     XmlConfig("rest-site.xml",
diff --git a/install/python-installer/scripts/dcs_setup.py b/install/python-installer/scripts/dcs_setup.py
index 0d94da0..239d953 100755
--- a/install/python-installer/scripts/dcs_setup.py
+++ b/install/python-installer/scripts/dcs_setup.py
@@ -39,8 +39,7 @@
 
     dcs_conf_dir = '%s/dcs-%s/conf' % (traf_home, traf_ver)
     dcs_srv_file = dcs_conf_dir + '/servers'
-    dcs_master_file = dcs_conf_dir + '/master'
-    dcs_bkmaster_file = dcs_conf_dir + '/backup-masters'
+    dcs_master_file = dcs_conf_dir + '/masters'
     dcs_site_file = dcs_conf_dir + '/dcs-site.xml'
     rest_site_file = '%s/rest-%s/conf/rest-site.xml' % (traf_home, traf_ver)
 
@@ -57,7 +56,7 @@
     ### modify dcs config files ###
     # modify master
     dcs_master = nodes[0]
-    append_file(dcs_master_file, dcs_master)
+    append_file(dcs_master_file, dcs_master+'\n')
 
     # modify dcs-site.xml
     net_interface = run_cmd('ip route |grep default|awk \'{print $5}\'')
@@ -82,9 +81,9 @@
         dcs_floating_ip_cfg = 'export DCS_MASTER_FLOATING_IP=%s' % dcs_floating_ip
         append_file(TRAF_CFG_FILE, dcs_floating_ip_cfg)
 
-        # modify backup_master
+        # modify master with backup master host
         for dcs_backup_node in dcs_backup_nodes.split(','):
-            append_file(dcs_bkmaster_file, dcs_backup_node)
+            append_file(dcs_master_file, dcs_backup_node)
 
     p.write_xml()