blob: 6fba665c8b2497f4d4204990aca414112da9380a [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import com.typesafe.tools.mima.core.*
/**
* Additional excludes for checking of Spark's binary compatibility.
*
* This acts as an official audit of cases where we excluded other classes. Please use the narrowest
* possible exclude here. MIMA will usually tell you what exclude to use, e.g.:
*
* ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.rdd.RDD.take")
*
* It is also possible to exclude Spark classes and packages. This should be used sparingly:
*
* MimaBuild.excludeSparkClass("graphx.util.collection.GraphXPrimitiveKeyOpenHashMap")
*
* For a new Spark version, please update MimaBuild.scala to reflect the previous version.
*/
object MimaExcludes {
// Exclude rules for 4.1.x from 4.0.0
lazy val v41excludes = defaultExcludes ++ Seq(
// [SPARK-51261][ML][CONNECT] Introduce model size estimation to control ml cache
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.ml.linalg.Vector.getSizeInBytes"),
// [SPARK-52221][SQL] Refactor SqlScriptingLocalVariableManager into more generic context manager
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.scripting.SqlScriptingExecution.withLocalVariableManager"),
// [SPARK-53391][CORE] Remove unused PrimitiveKeyOpenHashMap
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.collection.PrimitiveKeyOpenHashMap*"),
// [SPARK-54041][SQL] Enable Direct Passthrough Partitioning in the DataFrame API
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.Dataset.repartitionById")
)
// Default exclude rules
lazy val defaultExcludes = Seq(
// Spark Internals
ProblemFilters.exclude[Problem]("org.apache.spark.rpc.*"),
ProblemFilters.exclude[Problem]("org.spark-project.jetty.*"),
ProblemFilters.exclude[Problem]("org.spark_project.jetty.*"),
ProblemFilters.exclude[Problem]("org.sparkproject.jetty.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.internal.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.kafka010.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.unused.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.unsafe.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.memory.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.util.collection.unsafe.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.catalyst.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.execution.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.internal.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.errors.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.classic.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.connect.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.scripting.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.types.variant.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.ui.flamegraph.*"),
// DSv2 catalog and expression APIs are unstable yet. We should enable this back.
ProblemFilters.exclude[Problem]("org.apache.spark.sql.connector.catalog.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.connector.expressions.*"),
// Avro source implementation is internal.
ProblemFilters.exclude[Problem]("org.apache.spark.sql.v2.avro.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.avro.*"),
// SPARK-43169: shaded and generated protobuf code
ProblemFilters.exclude[Problem]("org.sparkproject.spark_core.protobuf.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.status.protobuf.StoreTypes*"),
// SPARK-44104: shaded protobuf code and Apis with parameters relocated
ProblemFilters.exclude[Problem]("org.sparkproject.spark_protobuf.protobuf.*"),
ProblemFilters.exclude[Problem]("org.apache.spark.sql.protobuf.utils.SchemaConverters.*"),
// SPARK-51267: Match local Spark Connect server logic between Python and Scala
ProblemFilters.exclude[MissingFieldProblem]("org.apache.spark.launcher.SparkLauncher.SPARK_LOCAL_REMOTE"),
// SPARK-53138: Split common/utils Java code into a new module common/utils-java
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.QueryContext"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.QueryContextType"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.api.java.function.*"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.network.util.ByteUnit"),
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.network.util.JavaUtils"),
(problem: Problem) => problem match {
case MissingClassProblem(cls) => !cls.fullName.startsWith("org.sparkproject.jpmml") &&
!cls.fullName.startsWith("org.sparkproject.dmg.pmml")
case _ => true
}
)
def excludes(version: String): Seq[Problem => Boolean] = version match {
case v if v.startsWith("4.1") => v41excludes
case _ => Seq()
}
}