diff --git a/docs/deployment/migration-guide.md b/docs/deployment/migration-guide.md index 8e4fefbd7f8..f14d773bdfe 100644 --- a/docs/deployment/migration-guide.md +++ b/docs/deployment/migration-guide.md @@ -24,6 +24,7 @@ * Since Kyuubi 1.12, session configurations in REST API responses are redacted by default using `kyuubi.server.redaction.regex`. Use `kyuubi.server.conf.retrieveMode` to control this behavior: `REDACTED` (default), `ORIGINAL` (no redaction), or `NONE` (omit configs entirely). * Since Kyuubi 1.12, `GET /api/v1/sessions` returns only sessions owned by the authenticated user instead of all sessions on the server. To restore the previous behavior, set `kyuubi.frontend.rest.legacy.v1.sessionsReturnAllUsers=true`. * Since Kyuubi 1.12, the configuration `spark.sql.kyuubi.hive.connector.dropTableAsPurgeTable` is introduced by Kyuubi Spark Hive connector(KSHC) to control whether DROP TABLE command completely remove its data by skipping HDFS trash. The default value is false. To restore the legacy behavior, set it to true. +* Since Kyuubi 1.12, the configuration `spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled` is introduced by Kyuubi Spark Hive connector(KSHC) to control whether partition columns are exposed as runtime filter attributes, which is required for Spark Dynamic Partition Pruning (DPP). The default value is true. To restore the legacy behavior, set it to false. * Since Kyuubi 1.12, the `CHAT` engine is removed. ## Upgrading from Kyuubi 1.10 to 1.11 diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/KyuubiHiveConnectorConf.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/KyuubiHiveConnectorConf.scala index adeab91a1a0..8f4d2a09762 100644 --- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/KyuubiHiveConnectorConf.scala +++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/KyuubiHiveConnectorConf.scala @@ -65,4 +65,12 @@ object KyuubiHiveConnectorConf { .version("1.12.0") .booleanConf .createWithDefault(false) + + val READ_RUNTIME_FILTER_ENABLED = + buildConf("spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled") + .doc("When enabled, partition columns will be exposed as runtime filter attributes, " + + "this is required for Spark Dynamic Partition Pruning (DPP).") + .version("1.12.0") + .booleanConf + .createWithDefault(true) } diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala index 0142c556194..ec9fd3aeeb1 100644 --- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala +++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveFileIndex.scala @@ -52,6 +52,18 @@ class HiveCatalogFileIndex( private val baseLocation: Option[URI] = table.storage.locationUri + // Align with Spark's built-in CatalogFileIndex by explicitly overriding equals. + // This keeps `BatchScanExec#equals` stable and enables BroadcastExchange reuse under DPP. + override def equals(other: Any): Boolean = other match { + case that: HiveCatalogFileIndex => + this.hiveCatalog.name == that.hiveCatalog.name && + this.catalogTable.identifier == that.catalogTable.identifier + case _ => false + } + + override def hashCode(): Int = + 31 * hiveCatalog.name.hashCode + catalogTable.identifier.hashCode + override def partitionSchema: StructType = table.partitionSchema override def listFiles( diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupport.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupport.scala new file mode 100644 index 00000000000..764b4697940 --- /dev/null +++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupport.scala @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.spark.connector.hive.read + +import java.util.Locale + +import org.apache.spark.internal.Logging +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, In, Literal} +import org.apache.spark.sql.connector.expressions.{Expressions, NamedReference} +import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.StructTypeHelper +import org.apache.spark.sql.sources.{Filter, In => FilterIn} +import org.apache.spark.sql.types.StructType + +/** + * Helpers for a Hive-backed V2 [[org.apache.spark.sql.connector.read.Scan]] to + * implement [[org.apache.spark.sql.connector.read.SupportsRuntimeFiltering]] + * for Dynamic Partition Pruning (DPP). + * + * Spark's `DataSourceV2Strategy` currently only emits the `IN` form as a DPP + * runtime filter, so translation here handles `In` only. Any filter whose + * attribute does not match a known partition column is dropped; drops are + * logged at DEBUG. + * + * We deliberately use the V1 `SupportsRuntimeFiltering` instead of the newer + * `SupportsRuntimeV2Filtering` to keep this connector compilable against + * Spark 3.3, where `SupportsRuntimeV2Filtering` was introduced in Spark 3.4. + */ +object HiveRuntimeFilterSupport extends Logging { + + /** + * Build the runtime-filterable attribute array. Only partition columns are exposed + * because DPP is only beneficial at the partition directory granularity. + */ + def filterAttributes(partitionColumnNames: Seq[String]): Array[NamedReference] = { + partitionColumnNames.map(Expressions.column).toArray + } + + /** + * Translate Spark's runtime V1 `In` filters into catalyst [[In]] expressions + * bound to the given partition attributes. + * + * A filter is accepted only when it is a [[FilterIn]] whose attribute resolves + * to a known partition column. + */ + def toCatalystPartitionFilters( + filters: Array[Filter], + partitionSchema: StructType, + isCaseSensitive: Boolean): Seq[Expression] = { + val attrByName: Map[String, AttributeReference] = + partitionSchema.toAttributes + .map(a => normalize(a.name, isCaseSensitive) -> a).toMap + + val accepted = filters.toSeq.flatMap { + case FilterIn(attribute, values) => + attrByName.get(normalize(attribute, isCaseSensitive)).map { attr => + In(attr, values.toSeq.map(v => Literal.create(v, attr.dataType))) + } + case _ => None + } + + if (accepted.length < filters.length) { + logDebug( + s"Dropped ${filters.length - accepted.length} of ${filters.length} runtime " + + s"filter(s) not applicable to partition columns " + + s"[${partitionSchema.fieldNames.mkString(", ")}]") + } + accepted + } + + private def normalize(name: String, isCaseSensitive: Boolean): String = + if (isCaseSensitive) { + name + } else { + name.toLowerCase(Locale.ROOT) + } +} diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala index ab6cfd912ae..92778789235 100644 --- a/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala +++ b/extensions/spark/kyuubi-spark-connector-hive/src/main/scala/org/apache/kyuubi/spark/connector/hive/read/HiveScan.scala @@ -27,7 +27,8 @@ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTablePartition} import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression} import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection -import org.apache.spark.sql.connector.read.PartitionReaderFactory +import org.apache.spark.sql.connector.expressions.NamedReference +import org.apache.spark.sql.connector.read.{PartitionReaderFactory, SupportsRuntimeFiltering} import org.apache.spark.sql.execution.datasources.{FilePartition, PartitionedFile} import org.apache.spark.sql.execution.datasources.v2.FileScan import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.HiveClientImpl @@ -36,6 +37,7 @@ import org.apache.spark.sql.types.StructType import org.apache.spark.util.SerializableConfiguration import org.apache.kyuubi.spark.connector.hive.{HiveConnectorUtils, KyuubiHiveConnectorException} +import org.apache.kyuubi.spark.connector.hive.KyuubiHiveConnectorConf.READ_RUNTIME_FILTER_ENABLED case class HiveScan( sparkSession: SparkSession, @@ -46,13 +48,29 @@ case class HiveScan( readPartitionSchema: StructType, pushedFilters: Array[Filter] = Array.empty, partitionFilters: Seq[Expression] = Seq.empty, - dataFilters: Seq[Expression] = Seq.empty) extends FileScan { + dataFilters: Seq[Expression] = Seq.empty) extends FileScan + with SupportsRuntimeFiltering { private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis private val partFileToHivePartMap: mutable.Map[PartitionedFile, CatalogTablePartition] = mutable.Map() + private var runtimeFilters: Seq[Expression] = Seq.empty + + // Align with Spark's built-in ParquetScan/OrcScan by explicitly overriding equals. + // This keeps `BatchScanExec#equals` stable and enables BroadcastExchange reuse under DPP. + override def equals(obj: Any): Boolean = obj match { + case other: HiveScan => + super.equals(other) && + catalogTable.identifier == other.catalogTable.identifier && + dataSchema == other.dataSchema && + equivalentFilters(pushedFilters, other.pushedFilters) + case _ => false + } + + override def hashCode(): Int = getClass.hashCode() + override def isSplitable(path: Path): Boolean = { catalogTable.provider.map(_.toUpperCase(Locale.ROOT)).exists { case "PARQUET" => true @@ -83,8 +101,9 @@ case class HiveScan( } override protected def partitions: Seq[FilePartition] = { + val effectivePartitionFilters = partitionFilters ++ runtimeFilters val (selectedPartitions, partDirToHivePartMap) = - fileIndex.listHiveFiles(partitionFilters, dataFilters) + fileIndex.listHiveFiles(effectivePartitionFilters, dataFilters) val maxSplitBytes = FilePartition.maxSplitBytes(sparkSession, selectedPartitions) val partitionAttributes = toAttributes(fileIndex.partitionSchema) val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap @@ -157,4 +176,29 @@ case class HiveScan( def toAttributes(structType: StructType): Seq[AttributeReference] = structType.map(f => AttributeReference(f.name, f.dataType, f.nullable, f.metadata)()) + + // ------------------------------------------------------------------------------- + // SupportsRuntimeFiltering implementation + // ------------------------------------------------------------------------------- + + override def filterAttributes(): Array[NamedReference] = { + if (!sparkSession.sessionState.conf.getConf(READ_RUNTIME_FILTER_ENABLED)) { + Array.empty + } else { + HiveRuntimeFilterSupport.filterAttributes(readPartitionSchema.fieldNames.toSeq) + } + } + + override def filter(filters: Array[Filter]): Unit = { + runtimeFilters = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + filters, + fileIndex.partitionSchema, + isCaseSensitive) + if (runtimeFilters.nonEmpty) { + logInfo(s"Received ${runtimeFilters.length} runtime partition filter(s) for " + + s"${catalogTable.identifier}") + logDebug(s"Runtime partition filter(s) for ${catalogTable.identifier}: " + + s"${runtimeFilters.mkString(", ")}") + } + } } diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/DynamicPartitionPruningSuite.scala b/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/DynamicPartitionPruningSuite.scala new file mode 100644 index 00000000000..47a3214b6d3 --- /dev/null +++ b/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/DynamicPartitionPruningSuite.scala @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.spark.connector.hive + +import scala.annotation.tailrec + +import org.apache.spark.sql.{Row, SparkSession} +import org.apache.spark.sql.connector.read.{Scan, SupportsRuntimeFiltering} +import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec +import org.apache.spark.sql.execution.datasources.v2.BatchScanExec + +import org.apache.kyuubi.spark.connector.hive.read.HiveScan + +class DynamicPartitionPruningSuite extends KyuubiHiveTest { + + private def findScan(spark: SparkSession, sql: String, tableNameHint: String): Scan = { + // Match on `HiveScan.catalogTable` rather than the node's `toString` because + // `BatchScanExec.toString` shape differs across Spark versions. + def matchesHint(b: BatchScanExec): Boolean = b.scan match { + case h: HiveScan => h.catalogTable.identifier.table == tableNameHint + case _ => false + } + + @tailrec + def findBatchScan(plan: SparkPlan): Option[BatchScanExec] = plan match { + case aqe: AdaptiveSparkPlanExec => findBatchScan(aqe.inputPlan) + case _ => plan.collectFirst { + case b: BatchScanExec if matchesHint(b) => b + } + } + + val exec = findBatchScan(spark.sql(sql).queryExecution.executedPlan) + assert(exec.isDefined) + exec.get.scan + } + + test("HiveScan supports DPP runtime filtering on partition columns") { + Seq( + ("true", Seq("dt")), + ("false", Seq.empty[String])).foreach { case (enabled, expectedFilterAttrs) => + withSparkSession(Map( + "hive.exec.dynamic.partition.mode" -> "nonstrict", + "spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled" -> enabled)) { spark => + val suffix = if (enabled == "true") "on" else "off" + val fact = s"hive.default.dpp_fact_$suffix" + val dim = s"hive.default.dpp_dim_$suffix" + + withTable(fact, dim) { + spark.sql( + s""" + | CREATE TABLE $fact (id INT, v STRING) PARTITIONED BY (dt STRING) + | STORED AS TEXTFILE + |""".stripMargin).collect() + spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-01-01') VALUES (1, 'a'), (2, 'b')") + spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-05-01') VALUES (3, 'c'), (4, 'd')") + spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-09-01') VALUES (5, 'e'), (6, 'f')") + + spark.sql( + s""" + | CREATE TABLE $dim (dt STRING, tag STRING) + | STORED AS TEXTFILE + |""".stripMargin).collect() + spark.sql(s"INSERT INTO $dim VALUES ('2026-05-01', 'target')") + + val sql = + s""" + | SELECT f.id, f.v, f.dt + | FROM $fact f JOIN $dim d ON f.dt = d.dt + | WHERE d.tag = 'target' + |""".stripMargin + + checkAnswer( + spark.sql(sql), + Seq( + Row(3, "c", "2026-05-01"), + Row(4, "d", "2026-05-01"))) + + val scan = findScan(spark, sql, fact.split('.').last) + assert(scan.isInstanceOf[SupportsRuntimeFiltering]) + val filterAttrs = scan.asInstanceOf[SupportsRuntimeFiltering] + .filterAttributes().map(_.fieldNames().mkString(".")) + assert(filterAttrs.toSeq == expectedFilterAttrs) + } + } + } + } +} diff --git a/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupportSuite.scala b/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupportSuite.scala new file mode 100644 index 00000000000..e3ab3e55cee --- /dev/null +++ b/extensions/spark/kyuubi-spark-connector-hive/src/test/scala/org/apache/kyuubi/spark/connector/hive/read/HiveRuntimeFilterSupportSuite.scala @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kyuubi.spark.connector.hive.read + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, In, Literal} +import org.apache.spark.sql.sources.{EqualTo, Filter, In => FilterIn} +import org.apache.spark.sql.types._ + +class HiveRuntimeFilterSupportSuite extends SparkFunSuite { + + private val partitionSchema = StructType(Seq( + StructField("dt", StringType), + StructField("id", LongType))) + + test("filterAttributes returns one NamedReference per partition column") { + val refs = HiveRuntimeFilterSupport.filterAttributes(Seq("dt", "id")) + assert(refs.length == 2) + assert(refs.map(_.fieldNames().toSeq) === Array(Seq("dt"), Seq("id"))) + } + + test("filterAttributes returns empty array when no partition columns") { + assert(HiveRuntimeFilterSupport.filterAttributes(Seq.empty).isEmpty) + } + + test("toCatalystPartitionFilters returns Nil for empty filter array") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array.empty[Filter], + partitionSchema, + isCaseSensitive = false) + assert(out.isEmpty) + } + + test("IN against a partition column is translated to catalyst In") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](FilterIn("dt", Array[Any]("2026-01-01", "2026-05-01"))), + partitionSchema, + isCaseSensitive = false) + + assert(out.size == 1) + val in = out.head.asInstanceOf[In] + val attr = in.value.asInstanceOf[AttributeReference] + assert(attr.name == "dt") + assert(attr.dataType == StringType) + val literalValues = in.list.map(_.asInstanceOf[Literal].value.toString) + assert(literalValues === Seq("2026-01-01", "2026-05-01")) + } + + test("IN against a non-partition column is dropped as a whole") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](FilterIn("non_partition_col", Array[Any]("x"))), + partitionSchema, + isCaseSensitive = false) + assert(out.isEmpty) + } + + test("IN with case-different column name is accepted in case-insensitive mode") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](FilterIn("DT", Array[Any]("2026-01-01"))), + partitionSchema, + isCaseSensitive = false) + assert(out.size == 1) + assert(out.head.asInstanceOf[In].value.asInstanceOf[AttributeReference].name == "dt") + } + + test("IN with case-different column name is rejected in case-sensitive mode") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](FilterIn("DT", Array[Any]("2026-01-01"))), + partitionSchema, + isCaseSensitive = true) + assert(out.isEmpty) + } + + test("non-IN filter is ignored") { + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](EqualTo("dt", "x")), + partitionSchema, + isCaseSensitive = false) + assert(out.isEmpty) + } + + test("mixed filter list keeps the accepted ones and drops the rest") { + val good = FilterIn("dt", Array[Any]("2026-05-01")) + val bad = FilterIn("non_partition_col", Array[Any]("x")) + val out = HiveRuntimeFilterSupport.toCatalystPartitionFilters( + Array[Filter](good, bad), + partitionSchema, + isCaseSensitive = false) + assert(out.size == 1) + assert(out.head.asInstanceOf[In].value.asInstanceOf[AttributeReference].name == "dt") + } +}