[MINOR] Remove meaningless string concatenation, use slf4j (#1647)

diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
index 3bcfddb..d81ab33 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssBypassWriter.java
@@ -19,8 +19,6 @@
 
 import java.lang.reflect.Field;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.compress.CodecPool;
 import org.apache.hadoop.io.compress.Decompressor;
 
@@ -29,7 +27,6 @@
 // In MR shuffle, MapOutput encapsulates the logic to fetch map task's output data via http.
 // So, in RSS, we should bypass this logic, and directly write data to MapOutput.
 public class RssBypassWriter {
-  private static final Log LOG = LogFactory.getLog(RssBypassWriter.class);
 
   public static void write(MapOutput mapOutput, byte[] buffer) {
     // Write and commit uncompressed data to MapOutput.
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
index 397d45f..d2fdcdb 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssEventFetcher.java
@@ -21,8 +21,6 @@
 import java.util.LinkedList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapTaskCompletionEventsUpdate;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
@@ -30,11 +28,13 @@
 import org.apache.hadoop.mapreduce.RssMRUtils;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.roaringbitmap.longlong.Roaring64NavigableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.uniffle.common.exception.RssException;
 
 public class RssEventFetcher<K, V> {
-  private static final Log LOG = LogFactory.getLog(RssEventFetcher.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RssEventFetcher.class);
 
   private final TaskAttemptID reduce;
   private final TaskUmbilicalProtocol umbilical;
@@ -104,7 +104,7 @@
     if (taskIdBitmap.getLongCardinality() + tipFailedCount != totalMapsCount) {
       for (int index = 0; index < totalMapsCount; index++) {
         if (!mapIndexBitmap.contains(index)) {
-          LOG.error("Fail to fetch " + " map task on index: " + index);
+          LOG.error("Fail to fetch map task on index: {}", index);
         }
       }
       throw new IllegalStateException(errMsg);
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
index 27d382c..b07581a 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssFetcher.java
@@ -22,8 +22,6 @@
 import java.text.DecimalFormat;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.Counters;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Reporter;
@@ -32,6 +30,8 @@
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.util.Progress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.uniffle.client.api.ShuffleReadClient;
 import org.apache.uniffle.client.response.CompressedShuffleBlock;
@@ -42,7 +42,7 @@
 
 public class RssFetcher<K, V> {
 
-  private static final Log LOG = LogFactory.getLog(RssFetcher.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RssFetcher.class);
 
   private final Reporter reporter;
 
@@ -235,7 +235,7 @@
     }
     // Check if we can shuffle *now* ...
     if (mapOutput == null) {
-      LOG.info("RssMRFetcher" + " - MergeManager returned status WAIT ...");
+      LOG.info("RssMRFetcher - MergeManager returned status WAIT ...");
       // Not an error but wait to process data.
       // Use a retry flag to avoid re-fetch and re-uncompress.
       hasPendingData = true;
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
index e29601e..bb8309c 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssInMemoryRemoteMerger.java
@@ -21,8 +21,6 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -40,9 +38,11 @@
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class RssInMemoryRemoteMerger<K, V> extends MergeThread<InMemoryMapOutput<K, V>, K, V> {
-  private static final Log LOG = LogFactory.getLog(RssInMemoryRemoteMerger.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RssInMemoryRemoteMerger.class);
 
   private static final String SPILL_OUTPUT_PREFIX = "spill";
   private final RssRemoteMergeManagerImpl<K, V> manager;
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
index c9f0d45..16e3a3a 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssRemoteMergeManagerImpl.java
@@ -22,8 +22,6 @@
 import java.util.TreeSet;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
@@ -40,13 +38,15 @@
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.Progress;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.uniffle.common.exception.RssException;
 import org.apache.uniffle.common.filesystem.HadoopFilesystemProvider;
 
 public class RssRemoteMergeManagerImpl<K, V> extends MergeManagerImpl<K, V> {
 
-  private static final Log LOG = LogFactory.getLog(RssRemoteMergeManagerImpl.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RssRemoteMergeManagerImpl.class);
 
   private final String appId;
   private final TaskAttemptID reduceId;
@@ -173,8 +173,7 @@
             (this.memoryLimit
                 * jobConf.getFloat(
                     MRJobConfig.SHUFFLE_MERGE_PERCENT, MRJobConfig.DEFAULT_SHUFFLE_MERGE_PERCENT));
-    LOG.info(
-        "MergerManager: memoryLimit=" + memoryLimit + ", " + "mergeThreshold=" + mergeThreshold);
+    LOG.info("MergerManager: memoryLimit={}, mergeThreshold={}", memoryLimit, mergeThreshold);
 
     this.inMemoryMerger = createRssInMemoryMerger();
     this.inMemoryMerger.start();
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
index df3f667..704c267 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/RssShuffle.java
@@ -23,8 +23,6 @@
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RawKeyValueIterator;
@@ -38,6 +36,8 @@
 import org.apache.hadoop.mapreduce.RssMRUtils;
 import org.apache.hadoop.util.Progress;
 import org.roaringbitmap.longlong.Roaring64NavigableMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.uniffle.client.api.ShuffleReadClient;
 import org.apache.uniffle.client.api.ShuffleWriteClient;
@@ -48,7 +48,7 @@
 
 public class RssShuffle<K, V> implements ShuffleConsumerPlugin<K, V>, ExceptionReporter {
 
-  private static final Log LOG = LogFactory.getLog(RssShuffle.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RssShuffle.class);
 
   private static final int MAX_EVENTS_TO_FETCH = 10000;
 
diff --git a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
index a631696..4fdf48b 100644
--- a/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
+++ b/client-mr/core/src/main/java/org/apache/hadoop/mapreduce/v2/app/RssMRAppMaster.java
@@ -446,7 +446,7 @@
           MRJobConfig.CACHE_FILE_TIMESTAMPS,
           ts == null ? String.valueOf(currentTs) : currentTs + "," + ts);
       String vis = conf.get(MRJobConfig.CACHE_FILE_VISIBILITIES);
-      conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, vis == null ? "false" : "false" + "," + vis);
+      conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, vis == null ? "false" : "false," + vis);
       long size = status.getLen();
       String sizes = conf.get(MRJobConfig.CACHE_FILES_SIZES);
       conf.set(
diff --git a/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java b/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
index 4b78c3c..6905961 100644
--- a/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
+++ b/client-mr/hadoop2.8/src/main/java/org/apache/uniffle/hadoop/shim/HadoopShimImpl.java
@@ -20,8 +20,6 @@
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.task.reduce.ShuffleClientMetrics;
@@ -30,10 +28,12 @@
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class HadoopShimImpl {
 
-  private static final Log LOG = LogFactory.getLog(HadoopShimImpl.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopShimImpl.class);
 
   public static ShuffleClientMetrics createShuffleClientMetrics(
       TaskAttemptID taskAttemptID, JobConf jobConf) {
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
index a734b10..bcd089b 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssShuffleManager.java
@@ -545,7 +545,7 @@
         }
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug(srcNameTrimmed + ": " + "NumCompletedInputs: " + numCompletedInputs);
+          LOG.debug("{}: NumCompletedInputs: {}", srcNameTrimmed, numCompletedInputs);
         }
 
         if (!isAllInputFetched() && !isShutdown.get()) {
@@ -576,7 +576,7 @@
               }
 
               if (LOG.isDebugEnabled()) {
-                LOG.debug(srcNameTrimmed + ": " + "Processing pending partition: " + partition);
+                LOG.debug("{}: Processing pending partition: {}", srcNameTrimmed, partition);
               }
 
               if (!isShutdown.get()
@@ -810,8 +810,7 @@
       }
     }
     if (LOG.isDebugEnabled()) {
-      LOG.debug(
-          srcNameTrimmed + ": " + "Adding input: " + srcAttemptIdentifier + ", to host: " + host);
+      LOG.debug("{}: Adding input: {}, to host: {}", srcNameTrimmed, srcAttemptIdentifier, host);
     }
 
     if (!validateInputAttemptForPipelinedShuffle(srcAttemptIdentifier)) {
@@ -1196,7 +1195,7 @@
     if (Thread.currentThread().isInterrupted()) {
       // need to cleanup all FetchedInput (DiskFetchedInput, LocalDisFetchedInput), lockFile
       // As of now relying on job cleanup (when all directories would be cleared)
-      LOG.info(srcNameTrimmed + ": " + "Thread interrupted. Need to cleanup the local dirs");
+      LOG.info("{}: Thread interrupted. Need to cleanup the local dirs", srcNameTrimmed);
     }
     if (!isShutdown.getAndSet(true)) {
       // Shut down any pending fetchers
@@ -1364,17 +1363,17 @@
   private class SchedulerFutureCallback implements FutureCallback<Void> {
     @Override
     public void onSuccess(Void result) {
-      LOG.info(srcNameTrimmed + ": " + "Scheduler thread completed");
+      LOG.info("{}: Scheduler thread completed", srcNameTrimmed);
     }
 
     @Override
     public void onFailure(Throwable t) {
       if (isShutdown.get()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring error: " + t);
+          LOG.debug("{}: Already shutdown. Ignoring error: ", srcNameTrimmed, t);
         }
       } else {
-        LOG.error(srcNameTrimmed + ": " + "Scheduler failed with error: ", t);
+        LOG.error("{}: Scheduler failed with error: ", srcNameTrimmed, t);
         inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "Shuffle Scheduler Failed");
       }
     }
@@ -1405,7 +1404,7 @@
       fetcher.shutdown();
       if (isShutdown.get()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring event from fetcher");
+          LOG.debug("{}: Already shutdown. Ignoring event from fetcher", srcNameTrimmed);
         }
       } else {
         lock.lock();
@@ -1430,10 +1429,10 @@
       fetcher.shutdown();
       if (isShutdown.get()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug(srcNameTrimmed + ": " + "Already shutdown. Ignoring error from fetcher: " + t);
+          LOG.debug("{}: Already shutdown. Ignoring error from fetcher: ", srcNameTrimmed, t);
         }
       } else {
-        LOG.error(srcNameTrimmed + ": " + "Fetcher failed with error: ", t);
+        LOG.error("{}: Fetcher failed with error: ", srcNameTrimmed, t);
         shuffleError = t;
         inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "Fetch failed");
         doBookKeepingForFetcherComplete();
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
index 584c571..7abfce6 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/impl/RssSimpleFetchedInputAllocator.java
@@ -316,7 +316,7 @@
   private synchronized void unreserve(long size) {
     this.usedMemory -= size;
     if (LOG.isDebugEnabled()) {
-      LOG.debug(srcNameTrimmed + ": " + "Used memory after freeing " + size + " : " + usedMemory);
+      LOG.debug("{}: Used memory after freeing {}: {}", srcNameTrimmed, size, usedMemory);
     }
   }
 
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
index 56e00b3..f8e873d 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffle.java
@@ -444,8 +444,7 @@
         }
       } catch (Throwable e) {
         if (ignoreErrors) {
-          LOG.info(
-              srcNameTrimmed + ": " + "Exception while trying to shutdown merger, Ignoring", e);
+          LOG.info("{}: Exception while trying to shutdown merger, Ignoring", srcNameTrimmed, e);
         } else {
           throw e;
         }
@@ -467,7 +466,7 @@
       }
       cleanupMerger(true);
     } catch (Throwable t) {
-      LOG.info(srcNameTrimmed + ": " + "Error in cleaning up.., ", t);
+      LOG.info("{}: Error in cleaning up.., ", srcNameTrimmed, t);
     }
   }
 
@@ -516,15 +515,15 @@
   private class RssShuffleRunnerFutureCallback implements FutureCallback<TezRawKeyValueIterator> {
     @Override
     public void onSuccess(TezRawKeyValueIterator result) {
-      LOG.info(srcNameTrimmed + ": " + "RSSShuffle Runner thread complete");
+      LOG.info(srcNameTrimmed + ": RSSShuffle Runner thread complete");
     }
 
     @Override
     public void onFailure(Throwable t) {
       if (isShutDown.get()) {
-        LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring error");
+        LOG.info(srcNameTrimmed + ": Already shutdown. Ignoring error");
       } else {
-        LOG.error(srcNameTrimmed + ": " + "RSSShuffleRunner failed with error", t);
+        LOG.error(srcNameTrimmed + ": RSSShuffleRunner failed with error", t);
         // In case of an abort / Interrupt - the runtime makes sure that this is ignored.
         inputContext.reportFailure(TaskFailureType.NON_FATAL, t, "RSSShuffle Runner Failed");
         cleanupIgnoreErrors();
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
index 0528037..ea2fe45 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssShuffleScheduler.java
@@ -1056,7 +1056,7 @@
 
   @Override
   public void reportLocalError(IOException ioe) {
-    LOG.error(srcNameTrimmed + ": " + "Shuffle failed : caused by local error", ioe);
+    LOG.error("{}: Shuffle failed: caused by local error", srcNameTrimmed, ioe);
     // Shuffle knows how to deal with failures post shutdown via the onFailure hook
     exceptionReporter.reportException(ioe);
   }
@@ -1320,7 +1320,7 @@
   @Override
   public void obsoleteInput(InputAttemptIdentifier srcAttempt) {
     // The incoming srcAttempt does not contain a path component.
-    LOG.info(srcNameTrimmed + ": " + "Adding obsolete input: " + srcAttempt);
+    LOG.info("{}: Adding obsolete input: {}", srcNameTrimmed, srcAttempt);
     ShuffleEventInfo eventInfo = pipelinedShuffleInfoEventsMap.get(srcAttempt.getInputIdentifier());
 
     // Pipelined shuffle case (where pipelinedShuffleInfoEventsMap gets populated).
@@ -1684,8 +1684,7 @@
         }
 
         if (LOG.isDebugEnabled()) {
-          LOG.debug(
-              srcNameTrimmed + ": " + "NumCompletedInputs: {}" + (numInputs - remainingMaps.get()));
+          LOG.debug("{}: NumCompletedInputs: {}", srcNameTrimmed, numInputs - remainingMaps.get());
         }
         // Ensure there's memory available before scheduling the next Fetcher.
         try {
@@ -1731,13 +1730,14 @@
                 break; // Check for the exit condition.
               }
               if (LOG.isDebugEnabled()) {
-                LOG.debug(srcNameTrimmed + ": " + "Processing pending host: " + mapHost.toString());
+                LOG.debug("{}: Processing pending host: {}", srcNameTrimmed, mapHost.toString());
               }
               if (!isShutdown.get()) {
                 count++;
                 if (LOG.isDebugEnabled()) {
                   LOG.debug(
-                      srcNameTrimmed + ": " + "Scheduling fetch for inputHost: {}",
+                      "{}: Scheduling fetch for inputHost: {}",
+                      srcNameTrimmed,
                       mapHost.getHostIdentifier() + ":" + mapHost.getPartitionId());
                 }
 
@@ -1937,7 +1937,7 @@
       rssFetcherOrderedGrouped.shutDown();
 
       if (isShutdown.get()) {
-        LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring fetch complete");
+        LOG.info("{}: Already shutdown. Ignoring fetch complete", srcNameTrimmed);
       } else {
         successRssPartitionSet.add(partitionId);
         MapHost mapHost = runningRssPartitionMap.remove(partitionId);
@@ -1962,9 +1962,9 @@
       LOG.error("Failed to fetch.", t);
       rssFetcherOrderedGrouped.shutDown();
       if (isShutdown.get()) {
-        LOG.info(srcNameTrimmed + ": " + "Already shutdown. Ignoring fetch complete");
+        LOG.info("{}: Already shutdown. Ignoring fetch complete", srcNameTrimmed);
       } else {
-        LOG.error(srcNameTrimmed + ": " + "Fetcher failed with error", t);
+        LOG.error("{}: Fetcher failed with error", srcNameTrimmed, t);
         exceptionReporter.reportException(t);
         doBookKeepingForFetcherComplete();
       }
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
index 66859c2..992f509 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/common/shuffle/orderedgrouped/RssTezShuffleDataFetcher.java
@@ -241,7 +241,7 @@
     }
     // Check if we can shuffle *now* ...
     if (mapOutput == null || mapOutput.getType() == MapOutput.Type.WAIT) {
-      LOG.info("RssMRFetcher" + " - MergeManager returned status WAIT ...");
+      LOG.info("RssMRFetcher - MergeManager returned status WAIT ...");
       // Not an error but wait to process data.
       // Use a retry flag to avoid re-fetch and re-uncompress.
       hasPendingData = true;
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
index 8215ecc..176b434 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValueInput.java
@@ -60,7 +60,7 @@
           Reader reader = getInputs().get(currentReaderIndex).getReader();
           if (!(reader instanceof KeyValueReader)) {
             throw new TezUncheckedException(
-                "Expected KeyValueReader. " + "Got: " + reader.getClass().getName());
+                "Expected KeyValueReader. Got: " + reader.getClass().getName());
           }
           currentReader = (KeyValueReader) reader;
           currentReaderIndex++;
diff --git a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
index aafa046..f4abebc 100644
--- a/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
+++ b/client-tez/src/main/java/org/apache/tez/runtime/library/input/RssConcatenatedMergedKeyValuesInput.java
@@ -61,7 +61,7 @@
           Reader reader = getInputs().get(currentReaderIndex).getReader();
           if (!(reader instanceof KeyValuesReader)) {
             throw new TezUncheckedException(
-                "Expected KeyValuesReader. " + "Got: " + reader.getClass().getName());
+                "Expected KeyValuesReader. Got: " + reader.getClass().getName());
           }
           currentReader = (KeyValuesReader) reader;
           currentReaderIndex++;
diff --git a/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java b/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
index 181e4a5..259c81e 100644
--- a/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
+++ b/common/src/main/java/org/apache/uniffle/common/ShuffleServerInfo.java
@@ -110,7 +110,7 @@
           + nettyPort
           + "]}";
     } else {
-      return "ShuffleServerInfo{host[" + host + "]," + " grpc port[" + grpcPort + "]}";
+      return "ShuffleServerInfo{host[" + host + "], grpc port[" + grpcPort + "]}";
     }
   }
 
diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
index d3d75ad..5830194 100644
--- a/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
+++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/web/request/ApplicationRequest.java
@@ -80,6 +80,6 @@
 
   @Override
   public String toString() {
-    return "ApplicationRequest{" + "applications=" + StringUtils.join(applications, ",") + '}';
+    return "ApplicationRequest{applications=" + StringUtils.join(applications, ",") + '}';
   }
 }
diff --git a/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java b/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
index 489465c..00133e0 100644
--- a/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
+++ b/coordinator/src/test/java/org/apache/uniffle/coordinator/conf/YamlClientConfParserTest.java
@@ -43,7 +43,7 @@
 
     // rssClientConf with format of 'k : v'
 
-    String yaml = "rssClientConf:\n" + "    k1: v1\n" + "    k2: v2";
+    String yaml = "rssClientConf:\n    k1: v1\n    k2: v2";
 
     ClientConf conf = parser.tryParse(IOUtils.toInputStream(yaml));
     assertEquals(2, conf.getRssClientConf().size());
diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
index bae77cf..549ae21 100644
--- a/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
+++ b/integration-test/common/src/test/java/org/apache/uniffle/test/ShuffleServerWithMemLocalHadoopTest.java
@@ -162,7 +162,7 @@
     LOG.info("checkSkippedMetrics={}, isNettyMode={}", checkSkippedMetrics, isNettyMode);
     ShuffleServerGrpcClient shuffleServerClient =
         isNettyMode ? nettyShuffleServerClient : grpcShuffleServerClient;
-    String testAppId = "memoryLocalFileHDFSReadWithFilterTest_" + "ship_" + checkSkippedMetrics;
+    String testAppId = "memoryLocalFileHDFSReadWithFilterTest_ship_" + checkSkippedMetrics;
     int shuffleId = 0;
     int partitionId = 0;
     RssRegisterShuffleRequest rrsr =
diff --git a/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java b/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
index 750fffe..4add6e8 100644
--- a/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
+++ b/integration-test/tez/src/test/java/org/apache/uniffle/test/TezSimpleSessionExampleTest.java
@@ -103,7 +103,7 @@
   @Override
   public String[] getTestArgs(String uniqueOutputName) {
     return new String[] {
-      inputPath + ".0" + "," + inputPath + ".1" + "," + inputPath + ".2",
+      inputPath + ".0," + inputPath + ".1," + inputPath + ".2",
       outputPath
           + "/"
           + uniqueOutputName
diff --git a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
index a7b1400..a7d8254 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServerConf.java
@@ -468,7 +468,7 @@
           .intType()
           .checkValue(
               ConfigUtils.SERVER_PORT_VALIDATOR,
-              "check server port value is 0 " + "or value >= 1024 && value <= 65535")
+              "check server port value is 0 or value >= 1024 && value <= 65535")
           .defaultValue(-1)
           .withDescription("Shuffle netty server port");
 
diff --git a/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java b/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
index 47a0e80..c088242 100644
--- a/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
+++ b/server/src/main/java/org/apache/uniffle/server/ShuffleServerGrpcService.java
@@ -699,7 +699,7 @@
             .getGrpcMetrics()
             .recordProcessTime(ShuffleServerGrpcMetrics.GET_SHUFFLE_DATA_METHOD, readTime);
         LOG.info(
-            "Successfully getShuffleData cost {} ms for shuffle" + " data with {}",
+            "Successfully getShuffleData cost {} ms for shuffle data with {}",
             readTime,
             requestInfo);
         reply =
@@ -791,7 +791,7 @@
             .getGrpcMetrics()
             .recordProcessTime(ShuffleServerGrpcMetrics.GET_SHUFFLE_INDEX_METHOD, readTime);
         LOG.info(
-            "Successfully getShuffleIndex cost {} ms for {}" + " bytes with {}",
+            "Successfully getShuffleIndex cost {} ms for {} bytes with {}",
             readTime,
             data.remaining(),
             requestInfo);
@@ -894,7 +894,7 @@
             .getGrpcMetrics()
             .recordProcessTime(ShuffleServerGrpcMetrics.GET_MEMORY_SHUFFLE_DATA_METHOD, costTime);
         LOG.info(
-            "Successfully getInMemoryShuffleData cost {} ms with {} bytes shuffle" + " data for {}",
+            "Successfully getInMemoryShuffleData cost {} ms with {} bytes shuffle data for {}",
             costTime,
             data.length,
             requestInfo);