[MINOR] Fix wrong javadoc and refactor some naming issues (#2156)

diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/AbstractHoodieWriteClient.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/AbstractHoodieWriteClient.java
index 0f35e27..222e1ab 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/AbstractHoodieWriteClient.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/AbstractHoodieWriteClient.java
@@ -268,7 +268,7 @@
    *
    * @param preppedRecords Prepared HoodieRecords to upsert
    * @param instantTime Instant time of the commit
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O upsertPreppedRecords(I preppedRecords, final String instantTime);
 
@@ -280,7 +280,7 @@
    *
    * @param records HoodieRecords to insert
    * @param instantTime Instant time of the commit
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O insert(I records, final String instantTime);
 
@@ -293,7 +293,7 @@
    *
    * @param preppedRecords HoodieRecords to insert
    * @param instantTime Instant time of the commit
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O insertPreppedRecords(I preppedRecords, final String instantTime);
 
@@ -306,7 +306,7 @@
    *
    * @param records HoodieRecords to insert
    * @param instantTime Instant time of the commit
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O bulkInsert(I records, final String instantTime);
 
@@ -323,7 +323,7 @@
    * @param instantTime Instant time of the commit
    * @param userDefinedBulkInsertPartitioner If specified then it will be used to partition input records before they are inserted
    * into hoodie.
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O bulkInsert(I records, final String instantTime,
                                Option<BulkInsertPartitioner<I>> userDefinedBulkInsertPartitioner);
@@ -343,7 +343,7 @@
    * @param instantTime Instant time of the commit
    * @param bulkInsertPartitioner If specified then it will be used to partition input records before they are inserted
    * into hoodie.
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O bulkInsertPreppedRecords(I preppedRecords, final String instantTime,
                                              Option<BulkInsertPartitioner<I>> bulkInsertPartitioner);
@@ -354,7 +354,7 @@
    *
    * @param keys {@link List} of {@link HoodieKey}s to be deleted
    * @param instantTime Commit time handle
-   * @return JavaRDD[WriteStatus] - RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public abstract O delete(K keys, final String instantTime);
 
@@ -653,7 +653,7 @@
    * Performs Compaction for the workload stored in instant-time.
    *
    * @param compactionInstantTime Compaction Instant Time
-   * @return RDD of WriteStatus to inspect errors and counts
+   * @return Collection of WriteStatus to inspect errors and counts
    */
   public O compact(String compactionInstantTime) {
     return compact(compactionInstantTime, config.shouldAutoCommit());
@@ -663,7 +663,7 @@
    * Commit a compaction operation. Allow passing additional meta-data to be stored in commit instant file.
    *
    * @param compactionInstantTime Compaction Instant Time
-   * @param writeStatuses RDD of WriteStatus to inspect errors and counts
+   * @param writeStatuses Collection of WriteStatus to inspect errors and counts
    * @param extraMetadata Extra Metadata to be stored
    */
   public abstract void commitCompaction(String compactionInstantTime, O writeStatuses,
@@ -710,7 +710,7 @@
    * Ensures compaction instant is in expected state and performs Compaction for the workload stored in instant-time.
    *
    * @param compactionInstantTime Compaction Instant Time
-   * @return RDD of Write Status
+   * @return Collection of Write Status
    */
   protected abstract O compact(String compactionInstantTime, boolean shouldComplete);
 
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/bootstrap/FullRecordBootstrapDataProvider.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/bootstrap/FullRecordBootstrapDataProvider.java
index 542dad9..0a07ee5 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/bootstrap/FullRecordBootstrapDataProvider.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/bootstrap/FullRecordBootstrapDataProvider.java
@@ -45,7 +45,7 @@
   }
 
   /**
-   * Generates a list of input partition and files and returns a RDD representing source.
+   * Generates a list of input partition and files and returns a collection representing source.
    * @param tableName Hudi Table Name
    * @param sourceBasePath Source Base Path
    * @param partitionPaths Partition Paths
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndex.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndex.java
index c71b34e..6d04594 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndex.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndex.java
@@ -62,7 +62,7 @@
    * TODO(vc): We may need to propagate the record as well in a WriteStatus class
    */
   @PublicAPIMethod(maturity = ApiMaturityLevel.STABLE)
-  public abstract O updateLocation(O writeStatusRDD, HoodieEngineContext context,
+  public abstract O updateLocation(O writeStatuses, HoodieEngineContext context,
                                    HoodieTable<T, I, K, O> hoodieTable) throws HoodieIndexException;
 
   /**
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java
index 8cd0cb2..ad7807b 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/index/HoodieIndexUtils.java
@@ -73,7 +73,7 @@
   public static HoodieRecord getTaggedRecord(HoodieRecord inputRecord, Option<HoodieRecordLocation> location) {
     HoodieRecord record = inputRecord;
     if (location.isPresent()) {
-      // When you have a record in multiple files in the same partition, then rowKeyRecordPairRDD
+      // When you have a record in multiple files in the same partition, then <row key, record> collection
       // will have 2 entries with the same exact in memory copy of the HoodieRecord and the 2
       // separate filenames that the record is found in. This will result in setting
       // currentLocation 2 times and it will fail the second time. So creating a new in memory
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/BulkInsertPartitioner.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/BulkInsertPartitioner.java
index b571fd9..fd1558a 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/BulkInsertPartitioner.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/BulkInsertPartitioner.java
@@ -35,7 +35,7 @@
   I repartitionRecords(I records, int outputSparkPartitions);
 
   /**
-   * @return {@code true} if the records within a RDD partition are sorted; {@code false} otherwise.
+   * @return {@code true} if the records within a partition are sorted; {@code false} otherwise.
    */
   boolean arePartitionRecordsSorted();
 }
diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/AbstractWriteHelper.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/AbstractWriteHelper.java
index f5e5e35..caa6ecd 100644
--- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/AbstractWriteHelper.java
+++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/commit/AbstractWriteHelper.java
@@ -32,7 +32,7 @@
 public abstract class AbstractWriteHelper<T extends HoodieRecordPayload, I, K, O, R> {
 
   public HoodieWriteMetadata<O> write(String instantTime,
-                                      I inputRecordsRDD,
+                                      I inputRecords,
                                       HoodieEngineContext context,
                                       HoodieTable<T, I, K, O> table,
                                       boolean shouldCombine,
@@ -42,7 +42,7 @@
     try {
       // De-dupe/merge if needed
       I dedupedRecords =
-          combineOnCondition(shouldCombine, inputRecordsRDD, shuffleParallelism, table);
+          combineOnCondition(shouldCombine, inputRecords, shuffleParallelism, table);
 
       Instant lookupBegin = Instant.now();
       I taggedRecords = dedupedRecords;
@@ -79,7 +79,7 @@
    *
    * @param records     hoodieRecords to deduplicate
    * @param parallelism parallelism or partitions to be used while reducing/deduplicating
-   * @return RDD of HoodieRecord already be deduplicated
+   * @return Collection of HoodieRecord already be deduplicated
    */
   public I deduplicateRecords(
       I records, HoodieTable<T, I, K, O> table, int parallelism) {