Skip to content

Commit 942e95e

Browse files
committed
[SPARK-50762][SQL][TEST][FOLLOWUP][4.0] Regenerate sql-udf.sql.out
### What changes were proposed in this pull request? This is a follow-up of #50898 for branch-4.0. - #50898 ### Why are the changes needed? #50898 broke `branch-4.0` CIs. - https://github.com/apache/spark/actions/runs/15070364465/job/42364916342 - https://github.com/apache/spark/actions/runs/15070303045/job/42364700177 - https://github.com/apache/spark/actions/runs/15070364465/job/42364916342 ``` - sql-udf.sql_analyzer_test *** FAILED *** sql-udf.sql_analyzer_test Expected "...g.default.foo3_1b(c2[))#xL > cast(0 as bigint)) +- Project [c1#x, count(1)#xL, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(sum(c2))#x, sum(spark_catalog.default.foo3_1b(c2))#xL] +- Project [c1#x, count(1)#xL, sum(c2)#xL, sum(spark_catalog.default.foo3_1b(c2))#xL, cast(sum(c2)#xL as int) AS x#x] +- Aggregate [c1#x], [c1#x, count(1) AS count(1)#xL, sum(c2#x) AS sum(c2)#xL, sum(spark_catalog.default.foo3_1b(x#x)) AS sum(spark_catalog.default.foo3_1b(c2]))#xL] +...", but got "...g.default.foo3_1b(c2[#x))#xL > cast(0 as bigint)) +- Project [c1#x, count(1)#xL, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(sum(c2))#x, sum(spark_catalog.default.foo3_1b(c2#x))#xL] +- Project [c1#x, count(1)#xL, sum(c2)#xL, sum(spark_catalog.default.foo3_1b(c2#x))#xL, cast(sum(c2)#xL as int) AS x#x] +- Aggregate [c1#x], [c1#x, count(1) AS count(1)#xL, sum(c2#x) AS sum(c2)#xL, sum(spark_catalog.default.foo3_1b(x#x)) AS sum(spark_catalog.default.foo3_1b(c2#x]))#xL] +..." Result did not match for query #152 SELECT c1, COUNT(*), foo3_1b(SUM(c2)) FROM t1 GROUP BY c1 HAVING SUM(foo3_1b(c2)) > 0 (SQLQueryTestSuite.scala:683) ``` ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Pass the CIs. ### Was this patch authored or co-authored using generative AI tooling? No. Closes #50928 from dongjoon-hyun/SPARK-50762. Authored-by: Dongjoon Hyun <[email protected]> Signed-off-by: Dongjoon Hyun <[email protected]>
1 parent 6ffc4ed commit 942e95e

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

sql/core/src/test/resources/sql-tests/analyzer-results/sql-udf.sql.out

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1832,10 +1832,10 @@ Filter (spark_catalog.default.foo3_1b(sum(c2))#x > 0)
18321832
SELECT c1, COUNT(*), foo3_1b(SUM(c2)) FROM t1 GROUP BY c1 HAVING SUM(foo3_1b(c2)) > 0
18331833
-- !query analysis
18341834
Project [c1#x, count(1)#xL, spark_catalog.default.foo3_1b(sum(c2))#x]
1835-
+- Filter (sum(spark_catalog.default.foo3_1b(c2))#xL > cast(0 as bigint))
1836-
+- Project [c1#x, count(1)#xL, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(sum(c2))#x, sum(spark_catalog.default.foo3_1b(c2))#xL]
1837-
+- Project [c1#x, count(1)#xL, sum(c2)#xL, sum(spark_catalog.default.foo3_1b(c2))#xL, cast(sum(c2)#xL as int) AS x#x]
1838-
+- Aggregate [c1#x], [c1#x, count(1) AS count(1)#xL, sum(c2#x) AS sum(c2)#xL, sum(spark_catalog.default.foo3_1b(x#x)) AS sum(spark_catalog.default.foo3_1b(c2))#xL]
1835+
+- Filter (sum(spark_catalog.default.foo3_1b(c2#x))#xL > cast(0 as bigint))
1836+
+- Project [c1#x, count(1)#xL, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(sum(c2))#x, sum(spark_catalog.default.foo3_1b(c2#x))#xL]
1837+
+- Project [c1#x, count(1)#xL, sum(c2)#xL, sum(spark_catalog.default.foo3_1b(c2#x))#xL, cast(sum(c2)#xL as int) AS x#x]
1838+
+- Aggregate [c1#x], [c1#x, count(1) AS count(1)#xL, sum(c2#x) AS sum(c2)#xL, sum(spark_catalog.default.foo3_1b(x#x)) AS sum(spark_catalog.default.foo3_1b(c2#x))#xL]
18391839
+- Project [c1#x, c2#x, cast(c2#x as int) AS x#x]
18401840
+- SubqueryAlias spark_catalog.default.t1
18411841
+- View (`spark_catalog`.`default`.`t1`, [c1#x, c2#x])
@@ -2141,9 +2141,9 @@ Project [spark_catalog.default.foo3_1b(c1)#x, sum(c2)#xL]
21412141
-- !query
21422142
SELECT foo3_1b(c1), c2, GROUPING(foo3_1b(c1)), SUM(c1) FROM t1 GROUP BY ROLLUP(foo3_1b(c1), c2)
21432143
-- !query analysis
2144-
Aggregate [spark_catalog.default.foo3_1b(c1)#x, c2#x, spark_grouping_id#xL], [spark_catalog.default.foo3_1b(c1)#x AS spark_catalog.default.foo3_1b(c1)#x, c2#x, cast((shiftright(spark_grouping_id#xL, 1) & 1) as tinyint) AS grouping(spark_catalog.default.foo3_1b(c1))#x, sum(c1#x) AS sum(c1)#xL]
2145-
+- Expand [[c1#x, c2#x, spark_catalog.default.foo3_1b(c1)#x, c2#x, 0], [c1#x, c2#x, spark_catalog.default.foo3_1b(c1)#x, null, 1], [c1#x, c2#x, null, null, 3]], [c1#x, c2#x, spark_catalog.default.foo3_1b(c1)#x, c2#x, spark_grouping_id#xL]
2146-
+- Project [c1#x, c2#x, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(c1)#x, c2#x AS c2#x]
2144+
Aggregate [spark_catalog.default.foo3_1b(c1#x)#x, c2#x, spark_grouping_id#xL], [spark_catalog.default.foo3_1b(c1#x)#x AS spark_catalog.default.foo3_1b(c1)#x, c2#x, cast((shiftright(spark_grouping_id#xL, 1) & 1) as tinyint) AS grouping(spark_catalog.default.foo3_1b(c1))#x, sum(c1#x) AS sum(c1)#xL]
2145+
+- Expand [[c1#x, c2#x, spark_catalog.default.foo3_1b(c1#x)#x, c2#x, 0], [c1#x, c2#x, spark_catalog.default.foo3_1b(c1#x)#x, null, 1], [c1#x, c2#x, null, null, 3]], [c1#x, c2#x, spark_catalog.default.foo3_1b(c1#x)#x, c2#x, spark_grouping_id#xL]
2146+
+- Project [c1#x, c2#x, spark_catalog.default.foo3_1b(x#x) AS spark_catalog.default.foo3_1b(c1#x)#x, c2#x AS c2#x]
21472147
+- Project [c1#x, c2#x, cast(c1#x as int) AS x#x]
21482148
+- SubqueryAlias spark_catalog.default.t1
21492149
+- View (`spark_catalog`.`default`.`t1`, [c1#x, c2#x])
@@ -2932,7 +2932,7 @@ Project [spark_catalog.default.foo3_2e1(occurrences#x, instance_start_time#x) AS
29322932
: : +- OneRowRelation
29332933
: +- Project [CASE WHEN (isnull(outer(occurrences#x)) OR (size(outer(occurrences#x), false) = 0)) THEN cast(null as string) ELSE sort_array(diffs#x, true)[0].id END AS id#x]
29342934
: +- SubqueryAlias t
2935-
: +- CTERelationRef xxxx, true, [diffs#x], false, false, 1
2935+
: +- CTERelationRef xxxx, true, [diffs#x], false, false
29362936
+- Project [cast(array(struct(col1, 2022-01-01 10:11:12, col2, 1), struct(col1, 2022-01-01 10:11:15, col2, 2)) as array<struct<start_time:timestamp,occurrence_id:string>>) AS occurrences#x, cast(2022-01-01 as timestamp) AS instance_start_time#x]
29372937
+- OneRowRelation
29382938

0 commit comments

Comments
 (0)