Skip to content

Commit 08fa357

Browse files
committed
update diff
1 parent a7301d0 commit 08fa357

2 files changed

Lines changed: 82 additions & 17 deletions

File tree

dev/diffs/4.1.1.diff

Lines changed: 82 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -687,6 +687,57 @@ index 95e86fe4311..24f99384bb7 100644
687687
}.flatten
688688
assert(filters.contains(GreaterThan(scan.logicalPlan.output.head, Literal(5L))))
689689
}
690+
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/IgnoreComet.scala b/sql/core/src/test/scala/org/apache/spark/sql/IgnoreComet.scala
691+
new file mode 100644
692+
index 00000000000..5691536c114
693+
--- /dev/null
694+
+++ b/sql/core/src/test/scala/org/apache/spark/sql/IgnoreComet.scala
695+
@@ -0,0 +1,45 @@
696+
+/*
697+
+ * Licensed to the Apache Software Foundation (ASF) under one or more
698+
+ * contributor license agreements. See the NOTICE file distributed with
699+
+ * this work for additional information regarding copyright ownership.
700+
+ * The ASF licenses this file to You under the Apache License, Version 2.0
701+
+ * (the "License"); you may not use this file except in compliance with
702+
+ * the License. You may obtain a copy of the License at
703+
+ *
704+
+ * http://www.apache.org/licenses/LICENSE-2.0
705+
+ *
706+
+ * Unless required by applicable law or agreed to in writing, software
707+
+ * distributed under the License is distributed on an "AS IS" BASIS,
708+
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
709+
+ * See the License for the specific language governing permissions and
710+
+ * limitations under the License.
711+
+ */
712+
+
713+
+package org.apache.spark.sql
714+
+
715+
+import org.scalactic.source.Position
716+
+import org.scalatest.Tag
717+
+
718+
+import org.apache.spark.sql.test.SQLTestUtils
719+
+
720+
+/**
721+
+ * Tests with this tag will be ignored when Comet is enabled (e.g., via `ENABLE_COMET`).
722+
+ */
723+
+case class IgnoreComet(reason: String) extends Tag("DisableComet")
724+
+case class IgnoreCometNativeIcebergCompat(reason: String) extends Tag("DisableComet")
725+
+case class IgnoreCometNativeDataFusion(reason: String) extends Tag("DisableComet")
726+
+case class IgnoreCometNativeScan(reason: String) extends Tag("DisableComet")
727+
+
728+
+/**
729+
+ * Helper trait that disables Comet for all tests regardless of default config values.
730+
+ */
731+
+trait IgnoreCometSuite extends SQLTestUtils {
732+
+ override protected def test(testName: String, testTags: Tag*)(testFun: => Any)
733+
+ (implicit pos: Position): Unit = {
734+
+ if (isCometEnabled) {
735+
+ ignore(testName + " (disabled when Comet is on)", testTags: _*)(testFun)
736+
+ } else {
737+
+ super.test(testName, testTags: _*)(testFun)
738+
+ }
739+
+ }
740+
+}
690741
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/InjectRuntimeFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/InjectRuntimeFilterSuite.scala
691742
index 7d7185ae6c1..442a5bddeb8 100644
692743
--- a/sql/core/src/test/scala/org/apache/spark/sql/InjectRuntimeFilterSuite.scala
@@ -1246,7 +1297,7 @@ index fee375db10a..8c2c24e2c5f 100644
12461297
val v = VariantBuilder.parseJson(s, false)
12471298
new VariantVal(v.getValue, v.getMetadata)
12481299
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/collation/CollationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/collation/CollationSuite.scala
1249-
index 6cdf681d65c..4ea007422dd 100644
1300+
index 6cdf681d65c..9ccdb711763 100644
12501301
--- a/sql/core/src/test/scala/org/apache/spark/sql/collation/CollationSuite.scala
12511302
+++ b/sql/core/src/test/scala/org/apache/spark/sql/collation/CollationSuite.scala
12521303
@@ -22,10 +22,11 @@ import scala.jdk.CollectionConverters.MapHasAsJava
@@ -1272,7 +1323,23 @@ index 6cdf681d65c..4ea007422dd 100644
12721323
}.nonEmpty
12731324
)
12741325
}
1275-
@@ -1508,7 +1511,9 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1326+
@@ -416,6 +419,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1327+
}
1328+
1329+
test("aggregates count respects collation") {
1330+
+ IgnoreComet("TODO: Ignore for first stage of 4.1")
1331+
Seq(
1332+
("utf8_binary_rtrim", Seq("aaa", "aaa "), Seq(Row(2, "aaa"))),
1333+
("utf8_binary", Seq("AAA", "aaa"), Seq(Row(1, "AAA"), Row(1, "aaa"))),
1334+
@@ -448,6 +452,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1335+
}
1336+
1337+
test("hash agg is not used for non binary collations") {
1338+
+ IgnoreComet("TODO: Ignore for first stage of 4.1")
1339+
val tableNameNonBinary = "T_NON_BINARY"
1340+
val tableNameBinary = "T_BINARY"
1341+
withTable(tableNameNonBinary) {
1342+
@@ -1508,7 +1513,9 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
12761343
}
12771344
}
12781345

@@ -1283,23 +1350,23 @@ index 6cdf681d65c..4ea007422dd 100644
12831350
val t1 = "T_1"
12841351
val t2 = "T_2"
12851352

1286-
@@ -1614,6 +1619,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1353+
@@ -1614,6 +1621,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
12871354
} else {
12881355
assert(!collectFirst(queryPlan) {
12891356
case b: BroadcastHashJoinExec => b.leftKeys.head
12901357
+ case b: CometBroadcastHashJoinExec => b.leftKeys.head
12911358
}.head.isInstanceOf[ArrayTransform])
12921359
}
12931360
}
1294-
@@ -1679,6 +1685,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1361+
@@ -1679,6 +1687,7 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
12951362
} else {
12961363
assert(!collectFirst(queryPlan) {
12971364
case b: BroadcastHashJoinExec => b.leftKeys.head
12981365
+ case b: CometBroadcastHashJoinExec => b.leftKeys.head
12991366
}.head.isInstanceOf[ArrayTransform])
13001367
}
13011368
}
1302-
@@ -1818,7 +1825,9 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
1369+
@@ -1818,7 +1827,9 @@ class CollationSuite extends DatasourceV2SQLBase with AdaptiveSparkPlanHelper {
13031370
}
13041371
}
13051372

@@ -1755,14 +1822,14 @@ index 47679ed7865..9ffbaecb98e 100644
17551822
assert(collectWithSubqueries(plan) { case s: SortAggregateExec => s }.length == sortAggCount)
17561823
}
17571824
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
1758-
index aed11badb71..1a0b22ef6fd 100644
1825+
index aed11badb71..ab7e9456e26 100644
17591826
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
17601827
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala
17611828
@@ -23,6 +23,7 @@ import org.apache.spark.sql.QueryTest
17621829
import org.apache.spark.sql.catalyst.InternalRow
17631830
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
17641831
import org.apache.spark.sql.catalyst.plans.logical.Deduplicate
1765-
+import org.apache.spark.sql.comet.CometNativeColumnarToRowExec
1832+
+import org.apache.spark.sql.comet.CometColumnarToRowExec
17661833
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
17671834
import org.apache.spark.sql.internal.SQLConf
17681835
import org.apache.spark.sql.test.SharedSparkSession
@@ -1773,20 +1840,20 @@ index aed11badb71..1a0b22ef6fd 100644
17731840
- df.queryExecution.executedPlan.collectFirst { case p: ColumnarToRowExec => p }.get
17741841
+ df.queryExecution.executedPlan.collectFirst {
17751842
+ case p: ColumnarToRowExec => p
1776-
+ case p: CometNativeColumnarToRowExec => p
1843+
+ case p: CometColumnarToRowExec => p
17771844
+ }.get
17781845
try {
17791846
spark.range(1).foreach { _ =>
17801847
columnarToRowExec.canonicalized
17811848
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
1782-
index a3cfdc5a240..87039732332 100644
1849+
index a3cfdc5a240..1b08a1f42ee 100644
17831850
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
17841851
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala
17851852
@@ -22,6 +22,7 @@ import org.apache.spark.rdd.MapPartitionsWithEvaluatorRDD
17861853
import org.apache.spark.sql.{Dataset, QueryTest, Row, SaveMode}
17871854
import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode
17881855
import org.apache.spark.sql.catalyst.expressions.codegen.{ByteCodeStats, CodeAndComment, CodeGenerator}
1789-
+import org.apache.spark.sql.comet.{CometHashJoinExec, CometNativeColumnarToRowExec, CometSortExec, CometSortMergeJoinExec}
1856+
+import org.apache.spark.sql.comet.{CometColumnarToRowExec, CometHashJoinExec, CometSortExec, CometSortMergeJoinExec}
17901857
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite
17911858
import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, SortAggregateExec}
17921859
import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
@@ -1999,13 +2066,12 @@ index a3cfdc5a240..87039732332 100644
19992066
val projection = Seq.tabulate(columnNum)(i => s"c$i + c$i as newC$i")
20002067
val df = spark.read.parquet(path).selectExpr(projection: _*)
20012068

2002-
@@ -815,6 +852,10 @@ class WholeStageCodegenSuite extends QueryTest with SharedSparkSession
2069+
@@ -815,6 +852,9 @@ class WholeStageCodegenSuite extends QueryTest with SharedSparkSession
20032070
assert(distinctWithId.queryExecution.executedPlan.exists {
20042071
case WholeStageCodegenExec(
20052072
ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: HashAggregateExec, _, _))) => true
20062073
+ case WholeStageCodegenExec(
2007-
+ ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _,
2008-
+ InputAdapter(_: CometNativeColumnarToRowExec), _, _))) =>
2074+
+ ProjectExec(_, BroadcastHashJoinExec(_, _, _, _, _, _: CometColumnarToRowExec, _, _))) =>
20092075
+ true
20102076
case _ => false
20112077
})
@@ -3005,15 +3071,15 @@ index e31e0e70cf3..034ad5b953e 100644
30053071
import testImplicits._
30063072

30073073
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
3008-
index 83ebd24384c..39c2f46c9b8 100644
3074+
index 83ebd24384c..69a0328b760 100644
30093075
--- a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
30103076
+++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala
30113077
@@ -26,10 +26,11 @@ import org.apache.spark.sql.catalyst.expressions
30123078
import org.apache.spark.sql.catalyst.expressions._
30133079
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
30143080
import org.apache.spark.sql.catalyst.types.DataTypeUtils
30153081
-import org.apache.spark.sql.execution.{FileSourceScanExec, SortExec, SparkPlan}
3016-
+import org.apache.spark.sql.comet._
3082+
+import org.apache.spark.sql.comet.{CometColumnarToRowExec, CometExec, CometScanExec, CometSortMergeJoinExec}
30173083
+import org.apache.spark.sql.execution.{ColumnarToRowExec, FileSourceScanExec, SortExec, SparkPlan}
30183084
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, AdaptiveSparkPlanHelper}
30193085
import org.apache.spark.sql.execution.datasources.BucketingUtils
@@ -3081,7 +3147,7 @@ index 83ebd24384c..39c2f46c9b8 100644
30813147
+ case s: SortMergeJoinExec => s
30823148
+ case o => fail(s"expected SortMergeJoinExec, but found\n$o")
30833149
+ }
3084-
+ case CometNativeColumnarToRowExec(child) =>
3150+
+ case CometColumnarToRowExec(child) =>
30853151
+ child.asInstanceOf[CometSortMergeJoinExec].originalPlan match {
30863152
+ case s: SortMergeJoinExec => s
30873153
+ case o => fail(s"expected SortMergeJoinExec, but found\n$o")

spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ import org.apache.spark.sql.internal.SQLConf.SESSION_LOCAL_TIMEZONE
3939
import org.apache.spark.sql.types._
4040

4141
import org.apache.comet.CometSparkSessionExtensions.{isSpark40Plus, isSpark41Plus}
42-
import org.apache.comet.serde.CometConcat
4342
import org.apache.comet.testing.{DataGenOptions, FuzzDataGenerator}
4443

4544
class CometExpressionSuite extends CometTestBase with AdaptiveSparkPlanHelper {

0 commit comments

Comments
 (0)