[CARBONDATA-3589]: Adding NULL segments check and empty segments check before prepriming

Insert into select from hive table into carbon table having partition
fails with index server running because of the fact that empty segments
were being sent for prepriming. Added a check for the same.

This closes #3468
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index fa7b9f5..6bb7e76 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -28,6 +28,7 @@
 import scala.util.Random
 import scala.util.control.Breaks._
 
+import org.apache.commons.lang3.StringUtils
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.io.NullWritable
@@ -594,7 +595,7 @@
 
       // code to handle Pre-Priming cache for loading
 
-      if (carbonLoadModel.getSegmentId != null) {
+      if (!StringUtils.isEmpty(carbonLoadModel.getSegmentId)) {
         DistributedRDDUtils.triggerPrepriming(sqlContext.sparkSession, carbonTable, Seq(),
           operationContext, hadoopConf, List(carbonLoadModel.getSegmentId))
       }
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index 72340f4..2227094 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -869,7 +869,7 @@
       }
 
       // Prepriming for Partition table here
-      if (carbonLoadModel.getSegmentId != null) {
+      if (!StringUtils.isEmpty(carbonLoadModel.getSegmentId)) {
         DistributedRDDUtils.triggerPrepriming(sparkSession,
           table,
           Seq(),