1
0
Fork 0

Adding upstream version 26.17.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-12 06:50:57 +02:00
parent 4362133ee5
commit 873e685933
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
63 changed files with 16004 additions and 15816 deletions

View file

@ -236,6 +236,10 @@ LANGUAGE js AS
self.validate_identity(
"CREATE OR REPLACE VIEW test (tenant_id OPTIONS (description='Test description on table creation')) AS SELECT 1 AS tenant_id, 1 AS customer_id",
)
self.validate_identity(
'SELECT r"\\t"',
"SELECT '\\\\t'",
)
self.validate_identity(
"ARRAY(SELECT AS STRUCT e.x AS y, e.z AS bla FROM UNNEST(bob))::ARRAY<STRUCT<y STRING, bro NUMERIC>>",
"CAST(ARRAY(SELECT AS STRUCT e.x AS y, e.z AS bla FROM UNNEST(bob)) AS ARRAY<STRUCT<y STRING, bro NUMERIC>>)",
@ -1023,8 +1027,8 @@ LANGUAGE js AS
r'r"""/\*.*\*/"""',
write={
"bigquery": r"'/\\*.*\\*/'",
"duckdb": r"'/\\*.*\\*/'",
"presto": r"'/\\*.*\\*/'",
"duckdb": r"'/\*.*\*/'",
"presto": r"'/\*.*\*/'",
"hive": r"'/\\*.*\\*/'",
"spark": r"'/\\*.*\\*/'",
},
@ -1033,8 +1037,8 @@ LANGUAGE js AS
r'R"""/\*.*\*/"""',
write={
"bigquery": r"'/\\*.*\\*/'",
"duckdb": r"'/\\*.*\\*/'",
"presto": r"'/\\*.*\\*/'",
"duckdb": r"'/\*.*\*/'",
"presto": r"'/\*.*\*/'",
"hive": r"'/\\*.*\\*/'",
"spark": r"'/\\*.*\\*/'",
},

View file

@ -167,6 +167,7 @@ class TestClickhouse(Validator):
self.validate_identity(
"CREATE MATERIALIZED VIEW test_view ON CLUSTER '{cluster}' (id UInt8) ENGINE=AggregatingMergeTree() ORDER BY tuple() AS SELECT * FROM test_data"
)
self.validate_identity("CREATE TABLE test (id UInt8) ENGINE=Null()")
self.validate_identity(
"CREATE MATERIALIZED VIEW test_view ON CLUSTER cl1 TO table1 AS SELECT * FROM test_data"
)

View file

@ -338,3 +338,8 @@ class TestDatabricks(Validator):
self.validate_identity(
"ANALYZE TABLE ctlg.db.tbl PARTITION(foo = 'foo', bar = 'bar') COMPUTE STATISTICS NOSCAN"
)
def test_udf_environment_property(self):
self.validate_identity(
"""CREATE FUNCTION a() ENVIRONMENT (dependencies = '["foo1==1", "foo2==2"]', environment_version = 'None')"""
)

View file

@ -428,6 +428,7 @@ class TestDuckDB(Validator):
"SELECT STRFTIME(CAST('2020-01-01' AS TIMESTAMP), CONCAT('%Y', '%m'))",
write={
"duckdb": "SELECT STRFTIME(CAST('2020-01-01' AS TIMESTAMP), CONCAT('%Y', '%m'))",
"spark": "SELECT DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP_NTZ), CONCAT(COALESCE('yyyy', ''), COALESCE('MM', '')))",
"tsql": "SELECT FORMAT(CAST('2020-01-01' AS DATETIME2), CONCAT('yyyy', 'MM'))",
},
)
@ -1110,6 +1111,28 @@ class TestDuckDB(Validator):
},
)
self.validate_all(
"SELECT TIMESTAMP 'foo'",
write={
"duckdb": "SELECT CAST('foo' AS TIMESTAMP)",
"hive": "SELECT CAST('foo' AS TIMESTAMP)",
"spark2": "SELECT CAST('foo' AS TIMESTAMP)",
"spark": "SELECT CAST('foo' AS TIMESTAMP_NTZ)",
"postgres": "SELECT CAST('foo' AS TIMESTAMP)",
"mysql": "SELECT CAST('foo' AS DATETIME)",
"clickhouse": "SELECT CAST('foo' AS Nullable(DateTime))",
"databricks": "SELECT CAST('foo' AS TIMESTAMP_NTZ)",
"snowflake": "SELECT CAST('foo' AS TIMESTAMPNTZ)",
"redshift": "SELECT CAST('foo' AS TIMESTAMP)",
"tsql": "SELECT CAST('foo' AS DATETIME2)",
"presto": "SELECT CAST('foo' AS TIMESTAMP)",
"trino": "SELECT CAST('foo' AS TIMESTAMP)",
"oracle": "SELECT CAST('foo' AS TIMESTAMP)",
"bigquery": "SELECT CAST('foo' AS DATETIME)",
"starrocks": "SELECT CAST('foo' AS DATETIME)",
},
)
def test_sample(self):
self.validate_identity(
"SELECT * FROM tbl USING SAMPLE 5",

View file

@ -1311,7 +1311,7 @@ COMMENT='客户账户表'"""
def test_timestamp_trunc(self):
hive_dialects = ("spark", "databricks")
for dialect in ("postgres", "snowflake", "duckdb", *hive_dialects):
for dialect in ("postgres", "snowflake", *hive_dialects):
for unit in (
"SECOND",
"DAY",

View file

@ -1022,6 +1022,39 @@ class TestSnowflake(Validator):
},
)
self.validate_all(
"DAYOFWEEKISO(foo)",
read={
"presto": "DAY_OF_WEEK(foo)",
"trino": "DAY_OF_WEEK(foo)",
},
write={
"snowflake": "DAYOFWEEKISO(foo)",
},
)
self.validate_all(
"DAYOFWEEKISO(foo)",
read={
"presto": "DOW(foo)",
"trino": "DOW(foo)",
},
write={
"snowflake": "DAYOFWEEKISO(foo)",
},
)
self.validate_all(
"DAYOFYEAR(foo)",
read={
"presto": "DOY(foo)",
"trino": "DOY(foo)",
},
write={
"snowflake": "DAYOFYEAR(foo)",
},
)
def test_null_treatment(self):
self.validate_all(
r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1",

View file

@ -1062,6 +1062,16 @@ WHERE
"tsql": "ALTER TABLE a ADD b INTEGER, c INTEGER",
},
)
self.validate_all(
"ALTER TABLE a ALTER COLUMN b INTEGER",
read={
"": "ALTER TABLE a ALTER COLUMN b INT",
},
write={
"": "ALTER TABLE a ALTER COLUMN b SET DATA TYPE INT",
"tsql": "ALTER TABLE a ALTER COLUMN b INTEGER",
},
)
self.validate_all(
"CREATE TABLE #mytemp (a INTEGER, b CHAR(2), c TIME(4), d FLOAT(24))",
write={

View file

@ -44,3 +44,30 @@ SELECT x.cnt AS cnt FROM (SELECT COUNT(1) AS cnt FROM x AS x HAVING COUNT(1) > 0
-- Pushdown predicate to HAVING (DNF)
SELECT x.cnt AS cnt FROM (SELECT COUNT(1) AS cnt, COUNT(x.a) AS cnt_a, COUNT(x.b) AS cnt_b FROM x AS x) AS x WHERE (x.cnt_a > 0 AND x.cnt_b > 0) OR x.cnt > 0;
SELECT x.cnt AS cnt FROM (SELECT COUNT(1) AS cnt, COUNT(x.a) AS cnt_a, COUNT(x.b) AS cnt_b FROM x AS x HAVING COUNT(1) > 0 OR (COUNT(x.a) > 0 AND COUNT(x.b) > 0)) AS x WHERE x.cnt > 0 OR (x.cnt_a > 0 AND x.cnt_b > 0);
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
SELECT x.a, u.val FROM x AS x JOIN UNNEST(ARRAY(0, 1)) AS u("val") ON u.val < x.a WHERE TRUE;
# dialect: presto
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
# dialect: trino
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
# dialect: athena
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
SELECT x.a, u.val FROM x AS x CROSS JOIN UNNEST(ARRAY[0, 1]) AS u("val") WHERE x.a > u.val;
# dialect: presto
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") CROSS JOIN x AS x WHERE x.a > u.val;
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") JOIN x AS x ON u.val < x.a WHERE TRUE;
# dialect: trino
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") CROSS JOIN x AS x WHERE x.a > u.val;
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") JOIN x AS x ON u.val < x.a WHERE TRUE;
# dialect: athena
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") CROSS JOIN x AS x WHERE x.a > u.val;
SELECT x.a, u.val FROM UNNEST(ARRAY[0, 1]) AS u("val") JOIN x AS x ON u.val < x.a WHERE TRUE;

View file

@ -248,6 +248,13 @@ class TestTransforms(unittest.TestCase):
"SELECT * FROM table1 LEFT JOIN table2 ON table1.col = table2.col1 + 25",
dialect,
)
# eliminate join mark while preserving non-participating joins
self.validate(
eliminate_join_marks,
"SELECT * FROM a, b, c WHERE a.id = b.id AND b.id(+) = c.id",
"SELECT * FROM a LEFT JOIN b ON b.id = c.id CROSS JOIN c WHERE a.id = b.id",
dialect,
)
alias = "AS " if dialect != "oracle" else ""
self.validate(