1
0
Fork 0

Merging upstream version 26.19.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:15:28 +02:00
parent 58527c3d26
commit a99682f526
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
98 changed files with 67345 additions and 65319 deletions

View file

@ -100,6 +100,117 @@ STRING;
RPAD(tbl.str_col, 1, tbl.str_col);
STRING;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.double_col, tbl.bigint_col);
DOUBLE;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.bigint_col, tbl.double_col);
DOUBLE;
# dialect: hive, spark2, spark
IF(cond, tbl.double_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
IF(cond, tbl.str_col, tbl.double_col);
STRING;
# dialect: databricks
IF(cond, tbl.str_col, tbl.double_col);
DOUBLE;
# dialect: databricks
IF(cond, tbl.double_col, tbl.str_col);
DOUBLE;
# dialect: hive, spark2, spark
IF(cond, tbl.date_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
IF(cond, tbl.str_col, tbl.date_col);
STRING;
# dialect: databricks
IF(cond, tbl.date_col, tbl.str_col);
DATE;
# dialect: databricks
IF(cond, tbl.str_col, tbl.date_col);
DATE;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.date_col, tbl.timestamp_col);
TIMESTAMP;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.timestamp_col, tbl.date_col);
TIMESTAMP;
# dialect: hive, spark2, spark, databricks
IF(cond, NULL, tbl.str_col);
STRING;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.str_col, NULL);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.str_col, tbl.date_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.str_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.bigint_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.str_col, tbl.date_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.str_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, NULL, tbl.bigint_col, tbl.str_col);
STRING;
# dialect: databricks
COALESCE(tbl.str_col, tbl.bigint_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bigint_col, tbl.str_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.str_col, NULL, tbl.bigint_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bigint_col, NULL, tbl.str_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bool_col, tbl.str_col);
BOOLEAN;
# dialect: hive, spark2, spark
COALESCE(tbl.interval_col, tbl.str_col);
STRING;
# dialect: databricks
COALESCE(tbl.interval_col, tbl.str_col);
INTERVAL;
# dialect: databricks
COALESCE(tbl.bin_col, tbl.str_col);
BINARY;
--------------------------------------
-- BigQuery
@ -205,6 +316,14 @@ STRING;
CONCAT(tbl.bin_col, tbl.bin_col);
BINARY;
# dialect: bigquery
CONCAT(0, tbl.str_col);
STRING;
# dialect: bigquery
CONCAT(tbl.str_col, 0);
STRING;
# dialect: bigquery
LEFT(tbl.str_col, 1);
STRING;

View file

@ -329,6 +329,11 @@ FROM
t1;
SELECT x.a AS a, x.b AS b, ROW_NUMBER() OVER (PARTITION BY x.a ORDER BY x.a) AS row_num FROM x AS x ORDER BY x.a, x.b, row_num;
# title: Keep ORDER BY
# execute: false
WITH t AS (SELECT t1.x AS x, t1.y AS y, t2.a AS a, t2.b AS b FROM t1 AS t1(x, y) CROSS JOIN t2 AS t2(a, b) ORDER BY t2.a) SELECT t.x AS x, t.y AS y, t.a AS a, t.b AS b FROM t AS t;
SELECT t1.x AS x, t1.y AS y, t2.a AS a, t2.b AS b FROM t1 AS t1(x, y) CROSS JOIN t2 AS t2(a, b) ORDER BY t2.a;
# title: Don't merge window functions, inner table is aliased in outer query
with t1 as (
SELECT

View file

@ -449,3 +449,11 @@ SELECT
FROM foo
WHERE
1 = 1 AND /* first comment */ foo.a /* second comment */ = 1;
MERGE INTO t USING s ON t.id = s.id WHEN MATCHED THEN UPDATE SET status = s.status, amount = s.amount;
MERGE INTO t
USING s
ON t.id = s.id
WHEN MATCHED THEN UPDATE SET
status = s.status,
amount = s.amount;