[SPARK-53496][SQL][CORE][ML][SS][K8S][YARN] Remove dangling right curly braces in log
### What changes were proposed in this pull request?
Remove dangling right curly braces in log
### Why are the changes needed?
fix log structure
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
Passing CI
### Was this patch authored or co-authored using generative AI tooling?
no
Closes #52226 from yaooqinn/minor.
Authored-by: Kent Yao <yao@apache.org>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
diff --git a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
index d082492..b602632 100644
--- a/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
+++ b/mllib/src/main/scala/org/apache/spark/ml/classification/LogisticRegression.scala
@@ -836,7 +836,7 @@
(_initialModel.getFitIntercept == $(fitIntercept))
if (!modelIsValid) {
instr.logWarning(log"Initial coefficients will be ignored! Its dimensions " +
- log"(${MDC(LogKeys.NUM_ROWS, providedCoefs.numRows)}}, " +
+ log"(${MDC(LogKeys.NUM_ROWS, providedCoefs.numRows)}, " +
log"${MDC(LogKeys.NUM_COLUMNS, providedCoefs.numCols)}) did not match the " +
log"expected size (${MDC(LogKeys.NUM_COEFFICIENTS, numCoefficientSets)}, " +
log"${MDC(LogKeys.NUM_FEATURES, numFeatures)})")
diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
index f4e205e..ededa6b 100644
--- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
+++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/LoggingPodStatusWatcher.scala
@@ -98,7 +98,7 @@
}
override def watchOrStop(sId: String): Boolean = {
- logInfo(log"Waiting for application ${MDC(APP_NAME, conf.appName)}} with application ID " +
+ logInfo(log"Waiting for application ${MDC(APP_NAME, conf.appName)} with application ID " +
log"${MDC(APP_ID, appId)} and submission ID ${MDC(SUBMISSION_ID, sId)} to finish...")
val interval = conf.get(REPORT_INTERVAL)
synchronized {
diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
index e84a0c9..051b7e0 100644
--- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
+++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsAllocator.scala
@@ -191,7 +191,7 @@
}
if (timedOut.nonEmpty) {
- logWarning(log"Executors with ids ${MDC(LogKeys.EXECUTOR_IDS, timedOut.mkString(","))}} " +
+ logWarning(log"Executors with ids ${MDC(LogKeys.EXECUTOR_IDS, timedOut.mkString(","))} " +
log"were not detected in the Kubernetes cluster after " +
log"${MDC(LogKeys.TIMEOUT, podCreationTimeout)} ms despite the fact that a previous " +
log"allocation attempt tried to create them. The executors may have been deleted but the " +
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
index b4f9c74..0965500 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala
@@ -332,7 +332,7 @@
appContext.setLogAggregationContext(logAggregationContext)
} catch {
case NonFatal(e) =>
- logWarning(log"Ignoring ${MDC(LogKeys.CONFIG, ROLLED_LOG_INCLUDE_PATTERN.key)}} " +
+ logWarning(log"Ignoring ${MDC(LogKeys.CONFIG, ROLLED_LOG_INCLUDE_PATTERN.key)} " +
log"because the version of YARN does not support it", e)
}
}
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
index 48946b5..8b11765 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnAllocator.scala
@@ -587,7 +587,7 @@
if (log.isInfoEnabled()) {
val (localized, anyHost) = newLocalityRequests.partition(_.getNodes() != null)
if (anyHost.nonEmpty) {
- logInfo(log"Submitted ${MDC(LogKeys.COUNT, anyHost.size)}} unlocalized container " +
+ logInfo(log"Submitted ${MDC(LogKeys.COUNT, anyHost.size)} unlocalized container " +
log"requests.")
}
localized.foreach { request =>
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
index f3a0da2..ca62735 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTablesCommand.scala
@@ -39,7 +39,7 @@
} catch {
case NonFatal(e) =>
logWarning(log"Failed to analyze table ${MDC(TABLE_NAME, tbl.table)} in the " +
- log"database ${MDC(DATABASE_NAME, db)} because of ${MDC(ERROR, e.toString)}}", e)
+ log"database ${MDC(DATABASE_NAME, db)} because of ${MDC(ERROR, e.toString)}", e)
}
}
Seq.empty[Row]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
index 77e21bc..8cf2bd8 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/GroupBasedRowLevelOperationScanPlanning.scala
@@ -69,7 +69,7 @@
|Pushing operators to ${MDC(LogKeys.RELATION_NAME, relation.name)}
|Pushed filters: ${MDC(LogKeys.PUSHED_FILTERS, pushedFiltersStr)}
|Filters evaluated on data source side: ${MDC(LogKeys.EVALUATED_FILTERS, evaluatedFilters.mkString(", "))}
- |Filters evaluated on Spark side: ${MDC(LogKeys.POST_SCAN_FILTERS, postScanFilters.mkString(", "))}}
+ |Filters evaluated on Spark side: ${MDC(LogKeys.POST_SCAN_FILTERS, postScanFilters.mkString(", "))}
|Output: ${MDC(LogKeys.RELATION_OUTPUT, output.mkString(", "))}
""".stripMargin)
// scalastyle:on line.size.limit
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
index 68e8c1c..4904e3d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/WriteToDataSourceV2Exec.scala
@@ -436,7 +436,7 @@
logInfo(log"Start processing data source write support: " +
log"${MDC(LogKeys.BATCH_WRITE, batchWrite)}. The input RDD has " +
- log"${MDC(LogKeys.COUNT, messages.length)}} partitions.")
+ log"${MDC(LogKeys.COUNT, messages.length)} partitions.")
// Avoid object not serializable issue.
val writeMetrics: Map[String, SQLMetric] = customMetrics
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
index 8a175e6..ccdcc6f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/runtime/TriggerExecutor.scala
@@ -101,7 +101,7 @@
/** Called when a batch falls behind */
def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
logWarning(log"Current batch is falling behind. The trigger interval is " +
- log"${MDC(TRIGGER_INTERVAL, intervalMs)}} milliseconds, but spent " +
+ log"${MDC(TRIGGER_INTERVAL, intervalMs)} milliseconds, but spent " +
log"${MDC(ELAPSED_TIME, realElapsedTimeMs)} milliseconds")
}
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
index f37a260..49eb725 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/HDFSBackedStateStoreProvider.scala
@@ -515,7 +515,7 @@
logInfo(log"Trying to add version=${MDC(LogKeys.STATE_STORE_VERSION, newVersion)} to state " +
log"cache map with current_size=${MDC(LogKeys.NUM_LOADED_ENTRIES, loadedEntries)} and " +
log"earliest_loaded_version=" +
- log"${MDC(LogKeys.EARLIEST_LOADED_VERSION, earliestLoadedVersion.get)}} " +
+ log"${MDC(LogKeys.EARLIEST_LOADED_VERSION, earliestLoadedVersion.get)} " +
log"and max_versions_to_retain_in_memory=" +
log"${MDC(LogKeys.NUM_VERSIONS_RETAIN, numberOfVersionsToRetainInMemory)}")
} else {
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
index 9ac9d4c..b0e0cc5 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala
@@ -349,7 +349,7 @@
if (shouldInfer) {
val tableName = tableMeta.identifier.unquotedString
logInfo(log"Inferring case-sensitive schema for table ${MDC(TABLE_NAME, tableName)} " +
- log"(inference mode: ${MDC(INFERENCE_MODE, inferenceMode)})})")
+ log"(inference mode: ${MDC(INFERENCE_MODE, inferenceMode)})")
val fileIndex = fileIndexOpt.getOrElse {
val rootPath = new Path(tableMeta.location)
new InMemoryFileIndex(sparkSession, Seq(rootPath), options, None)