Skip to content
Open
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docs/deployment/migration-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
* Since Kyuubi 1.12, session configurations in REST API responses are redacted by default using `kyuubi.server.redaction.regex`. Use `kyuubi.server.conf.retrieveMode` to control this behavior: `REDACTED` (default), `ORIGINAL` (no redaction), or `NONE` (omit configs entirely).
* Since Kyuubi 1.12, `GET /api/v1/sessions` returns only sessions owned by the authenticated user instead of all sessions on the server. To restore the previous behavior, set `kyuubi.frontend.rest.legacy.v1.sessionsReturnAllUsers=true`.
* Since Kyuubi 1.12, the configuration `spark.sql.kyuubi.hive.connector.dropTableAsPurgeTable` is introduced by Kyuubi Spark Hive connector(KSHC) to control whether DROP TABLE command completely remove its data by skipping HDFS trash. The default value is false. To restore the legacy behavior, set it to true.
* Since Kyuubi 1.12, the configuration `spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled` is introduced by Kyuubi Spark Hive connector(KSHC) to control whether partition columns are exposed as runtime filter attributes, which is required for Spark Dynamic Partition Pruning (DPP). The default value is true. To restore the legacy behavior, set it to true.
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think in “To restore the legacy behavior, set it to true.” it should be false instead of true?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, my mistake, thanks for review! I've already made the changes.

* Since Kyuubi 1.12, the `CHAT` engine is removed.

## Upgrading from Kyuubi 1.10 to 1.11
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,12 @@ object KyuubiHiveConnectorConf {
.version("1.12.0")
.booleanConf
.createWithDefault(false)

val READ_RUNTIME_FILTER_ENABLED =
buildConf("spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled")
.doc("When enabled, partition columns will be exposed as runtime filter attributes, " +
"this is required for Spark Dynamic Partition Pruning (DPP).")
.version("1.12.0")
.booleanConf
.createWithDefault(true)
}
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,18 @@ class HiveCatalogFileIndex(

private val baseLocation: Option[URI] = table.storage.locationUri

// Align with Spark's built-in CatalogFileIndex by explicitly overriding equals.
// This keeps `BatchScanExec#equals` stable and enables BroadcastExchange reuse under DPP.
override def equals(other: Any): Boolean = other match {
case that: HiveCatalogFileIndex =>
this.hiveCatalog.name == that.hiveCatalog.name &&
this.catalogTable.identifier == that.catalogTable.identifier
case _ => false
}

override def hashCode(): Int =
31 * hiveCatalog.name.hashCode + catalogTable.identifier.hashCode

override def partitionSchema: StructType = table.partitionSchema

override def listFiles(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.kyuubi.spark.connector.hive.read

import java.util.Locale

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression, In, Literal}
import org.apache.spark.sql.connector.expressions.{Expressions, NamedReference}
import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.StructTypeHelper
import org.apache.spark.sql.sources.{Filter, In => FilterIn}
import org.apache.spark.sql.types.StructType

/**
* Helpers for a Hive-backed V2 [[org.apache.spark.sql.connector.read.Scan]] to
* implement [[org.apache.spark.sql.connector.read.SupportsRuntimeFiltering]]
* for Dynamic Partition Pruning (DPP).
*
* Spark's `DataSourceV2Strategy` currently only emits the `IN` form as a DPP
* runtime filter, so translation here handles `In` only. Any filter whose
* attribute does not match a known partition column is dropped; drops are
* logged at DEBUG.
*
* We deliberately use the V1 `SupportsRuntimeFiltering` instead of the newer
* `SupportsRuntimeV2Filtering` to keep this connector compilable against
* Spark 3.3, where `SupportsRuntimeV2Filtering` was introduced in Spark 3.4.
*/
object HiveRuntimeFilterSupport extends Logging {

/**
* Build the runtime-filterable attribute array. Only partition columns are exposed
* because DPP is only beneficial at the partition directory granularity.
*/
def filterAttributes(partitionColumnNames: Seq[String]): Array[NamedReference] = {
partitionColumnNames.map(Expressions.column).toArray
}

/**
* Translate Spark's runtime V1 `In` filters into catalyst [[In]] expressions
* bound to the given partition attributes.
*
* A filter is accepted only when it is a [[FilterIn]] whose attribute resolves
* to a known partition column.
*/
def toCatalystPartitionFilters(
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This makes the code compatibility very fragile, catalyst code is treated as internal implementation details and does not provide any compatibility guarantee.

Given the situation, why choose to implement SupportsRuntimeV2Filtering and translate the V2 Predicate to the catalyst Filter instead of just implement SupportsRuntimeFiltering?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. I’ll use SupportsRuntimeFiltering in the latest commit, and try to maintain compatibility with Spark 3.3+.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've replaced SupportsRuntimeV2Filtering with SupportsRuntimeFiltering and updated the PR description; ready for further review.

filters: Array[Filter],
partitionSchema: StructType,
isCaseSensitive: Boolean): Seq[Expression] = {
val attrByName: Map[String, AttributeReference] =
partitionSchema.toAttributes
.map(a => normalize(a.name, isCaseSensitive) -> a).toMap

val accepted = filters.toSeq.flatMap {
case FilterIn(attribute, values) =>
attrByName.get(normalize(attribute, isCaseSensitive)).map { attr =>
In(attr, values.toSeq.map(v => Literal.create(v, attr.dataType)))
}
case _ => None
}

if (accepted.length < filters.length) {
logDebug(
s"Dropped ${filters.length - accepted.length} of ${filters.length} runtime " +
s"filter(s) not applicable to partition columns " +
s"[${partitionSchema.fieldNames.mkString(", ")}]")
}
accepted
}

private def normalize(name: String, isCaseSensitive: Boolean): String =
if (isCaseSensitive) {
name
} else {
name.toLowerCase(Locale.ROOT)
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTablePartition}
import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression}
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.connector.read.PartitionReaderFactory
import org.apache.spark.sql.connector.expressions.NamedReference
import org.apache.spark.sql.connector.read.{PartitionReaderFactory, SupportsRuntimeFiltering}
import org.apache.spark.sql.execution.datasources.{FilePartition, PartitionedFile}
import org.apache.spark.sql.execution.datasources.v2.FileScan
import org.apache.spark.sql.hive.kyuubi.connector.HiveBridgeHelper.HiveClientImpl
Expand All @@ -36,6 +37,7 @@ import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration

import org.apache.kyuubi.spark.connector.hive.{HiveConnectorUtils, KyuubiHiveConnectorException}
import org.apache.kyuubi.spark.connector.hive.KyuubiHiveConnectorConf.READ_RUNTIME_FILTER_ENABLED

case class HiveScan(
sparkSession: SparkSession,
Expand All @@ -46,13 +48,29 @@ case class HiveScan(
readPartitionSchema: StructType,
pushedFilters: Array[Filter] = Array.empty,
partitionFilters: Seq[Expression] = Seq.empty,
dataFilters: Seq[Expression] = Seq.empty) extends FileScan {
dataFilters: Seq[Expression] = Seq.empty) extends FileScan
with SupportsRuntimeFiltering {

private val isCaseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis

private val partFileToHivePartMap: mutable.Map[PartitionedFile, CatalogTablePartition] =
mutable.Map()

private var runtimeFilters: Seq[Expression] = Seq.empty

// Align with Spark's built-in ParquetScan/OrcScan by explicitly overriding equals.
// This keeps `BatchScanExec#equals` stable and enables BroadcastExchange reuse under DPP.
override def equals(obj: Any): Boolean = obj match {
case other: HiveScan =>
super.equals(other) &&
catalogTable.identifier == other.catalogTable.identifier &&
dataSchema == other.dataSchema &&
equivalentFilters(pushedFilters, other.pushedFilters)
case _ => false
}

override def hashCode(): Int = getClass.hashCode()

override def isSplitable(path: Path): Boolean = {
catalogTable.provider.map(_.toUpperCase(Locale.ROOT)).exists {
case "PARQUET" => true
Expand Down Expand Up @@ -83,8 +101,9 @@ case class HiveScan(
}

override protected def partitions: Seq[FilePartition] = {
val effectivePartitionFilters = partitionFilters ++ runtimeFilters
val (selectedPartitions, partDirToHivePartMap) =
fileIndex.listHiveFiles(partitionFilters, dataFilters)
fileIndex.listHiveFiles(effectivePartitionFilters, dataFilters)
val maxSplitBytes = FilePartition.maxSplitBytes(sparkSession, selectedPartitions)
val partitionAttributes = toAttributes(fileIndex.partitionSchema)
val attributeMap = partitionAttributes.map(a => normalizeName(a.name) -> a).toMap
Expand Down Expand Up @@ -157,4 +176,29 @@ case class HiveScan(

def toAttributes(structType: StructType): Seq[AttributeReference] =
structType.map(f => AttributeReference(f.name, f.dataType, f.nullable, f.metadata)())

// -------------------------------------------------------------------------------
// SupportsRuntimeFiltering implementation
// -------------------------------------------------------------------------------

override def filterAttributes(): Array[NamedReference] = {
if (!sparkSession.sessionState.conf.getConf(READ_RUNTIME_FILTER_ENABLED)) {
Array.empty
} else {
HiveRuntimeFilterSupport.filterAttributes(readPartitionSchema.fieldNames.toSeq)
}
}

override def filter(filters: Array[Filter]): Unit = {
runtimeFilters = HiveRuntimeFilterSupport.toCatalystPartitionFilters(
filters,
fileIndex.partitionSchema,
isCaseSensitive)
if (runtimeFilters.nonEmpty) {
logInfo(s"Received ${runtimeFilters.length} runtime partition filter(s) for " +
s"${catalogTable.identifier}")
logDebug(s"Runtime partition filter(s) for ${catalogTable.identifier}: " +
s"${runtimeFilters.mkString(", ")}")
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.kyuubi.spark.connector.hive

import scala.annotation.tailrec

import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.connector.read.{Scan, SupportsRuntimeFiltering}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec

import org.apache.kyuubi.spark.connector.hive.read.HiveScan

class DynamicPartitionPruningSuite extends KyuubiHiveTest {

private def findScan(spark: SparkSession, sql: String, tableNameHint: String): Scan = {
// Match on `HiveScan.catalogTable` rather than the node's `toString` because
// `BatchScanExec.toString` shape differs across Spark versions.
def matchesHint(b: BatchScanExec): Boolean = b.scan match {
case h: HiveScan => h.catalogTable.identifier.table == tableNameHint
case _ => false
}

@tailrec
def findBatchScan(plan: SparkPlan): Option[BatchScanExec] = plan match {
case aqe: AdaptiveSparkPlanExec => findBatchScan(aqe.inputPlan)
case _ => plan.collectFirst {
case b: BatchScanExec if matchesHint(b) => b
}
}

val exec = findBatchScan(spark.sql(sql).queryExecution.executedPlan)
assert(exec.isDefined)
exec.get.scan
}

test("HiveScan supports DPP runtime filtering on partition columns") {
Seq(
("true", Seq("dt")),
("false", Seq.empty[String])).foreach { case (enabled, expectedFilterAttrs) =>
withSparkSession(Map(
"hive.exec.dynamic.partition.mode" -> "nonstrict",
"spark.sql.kyuubi.hive.connector.read.runtimeFilter.enabled" -> enabled)) { spark =>
val suffix = if (enabled == "true") "on" else "off"
val fact = s"hive.default.dpp_fact_$suffix"
val dim = s"hive.default.dpp_dim_$suffix"

withTable(fact, dim) {
spark.sql(
s"""
| CREATE TABLE $fact (id INT, v STRING) PARTITIONED BY (dt STRING)
| STORED AS TEXTFILE
|""".stripMargin).collect()
spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-01-01') VALUES (1, 'a'), (2, 'b')")
spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-05-01') VALUES (3, 'c'), (4, 'd')")
spark.sql(s"INSERT INTO $fact PARTITION (dt='2026-09-01') VALUES (5, 'e'), (6, 'f')")

spark.sql(
s"""
| CREATE TABLE $dim (dt STRING, tag STRING)
| STORED AS TEXTFILE
|""".stripMargin).collect()
spark.sql(s"INSERT INTO $dim VALUES ('2026-05-01', 'target')")

val sql =
s"""
| SELECT f.id, f.v, f.dt
| FROM $fact f JOIN $dim d ON f.dt = d.dt
| WHERE d.tag = 'target'
|""".stripMargin

checkAnswer(
spark.sql(sql),
Seq(
Row(3, "c", "2026-05-01"),
Row(4, "d", "2026-05-01")))

val scan = findScan(spark, sql, fact.split('.').last)
assert(scan.isInstanceOf[SupportsRuntimeFiltering])
val filterAttrs = scan.asInstanceOf[SupportsRuntimeFiltering]
.filterAttributes().map(_.fieldNames().mkString("."))
assert(filterAttrs.toSeq == expectedFilterAttrs)
}
}
}
}
}
Loading
Loading