[CARBONDATA-3626] Improve performance when load data into carbon table with lots of columns

This closes #3525
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
index 99a9981..d1379aa 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
@@ -353,7 +353,8 @@
   def writeCarbon(row: InternalRow): Unit = {
     val data = new Array[AnyRef](fieldTypes.length + partitionData.length)
     var i = 0
-    while (i < fieldTypes.length) {
+    val fieldTypesLen = fieldTypes.length
+    while (i < fieldTypesLen) {
       if (!row.isNullAt(i)) {
         fieldTypes(i) match {
           case StringType =>
@@ -367,7 +368,7 @@
       i += 1
     }
     if (partitionData.length > 0) {
-      System.arraycopy(partitionData, 0, data, fieldTypes.length, partitionData.length)
+      System.arraycopy(partitionData, 0, data, fieldTypesLen, partitionData.length)
     }
     writable.set(data)
     recordWriter.write(NullWritable.get(), writable)