1
0
Fork 0

Merging upstream version 11.4.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 15:46:19 +01:00
parent ecb42ec17f
commit 63746a3e92
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
89 changed files with 35352 additions and 33081 deletions

View file

@ -8,6 +8,9 @@ class TestBigQuery(Validator):
def test_bigquery(self):
self.validate_identity("SELECT STRUCT<ARRAY<STRING>>(['2023-01-17'])")
self.validate_identity("SELECT * FROM q UNPIVOT(values FOR quarter IN (b, c))")
self.validate_identity(
"SELECT * FROM (SELECT * FROM `t`) AS a UNPIVOT((c) FOR c_name IN (v1, v2))"
)
self.validate_all("LEAST(x, y)", read={"sqlite": "MIN(x, y)"})
self.validate_all(
@ -280,7 +283,7 @@ class TestBigQuery(Validator):
"duckdb": "CURRENT_DATE + INTERVAL 1 DAY",
"mysql": "DATE_ADD(CURRENT_DATE, INTERVAL 1 DAY)",
"postgres": "CURRENT_DATE + INTERVAL '1' DAY",
"presto": "DATE_ADD(DAY, 1, CURRENT_DATE)",
"presto": "DATE_ADD('DAY', 1, CURRENT_DATE)",
"hive": "DATE_ADD(CURRENT_DATE, 1)",
"spark": "DATE_ADD(CURRENT_DATE, 1)",
},

View file

@ -16,6 +16,10 @@ class TestClickhouse(Validator):
self.validate_identity("SELECT * FROM foo ANY JOIN bla")
self.validate_identity("SELECT quantile(0.5)(a)")
self.validate_identity("SELECT quantiles(0.5)(a) AS x FROM t")
self.validate_identity("SELECT quantiles(0.1, 0.2, 0.3)(a)")
self.validate_identity("SELECT histogram(5)(a)")
self.validate_identity("SELECT groupUniqArray(2)(a)")
self.validate_identity("SELECT exponentialTimeDecayedAvg(60)(a, b)")
self.validate_identity("SELECT * FROM foo WHERE x GLOBAL IN (SELECT * FROM bar)")
self.validate_identity("position(haystack, needle)")
self.validate_identity("position(haystack, needle, position)")

View file

@ -519,7 +519,7 @@ class TestDialect(Validator):
"duckdb": "x + INTERVAL 1 day",
"hive": "DATE_ADD(x, 1)",
"mysql": "DATE_ADD(x, INTERVAL 1 DAY)",
"postgres": "x + INTERVAL '1' 'day'",
"postgres": "x + INTERVAL '1' day",
"presto": "DATE_ADD('day', 1, x)",
"snowflake": "DATEADD(day, 1, x)",
"spark": "DATE_ADD(x, 1)",
@ -543,11 +543,48 @@ class TestDialect(Validator):
)
self.validate_all(
"DATE_TRUNC('day', x)",
read={
"bigquery": "DATE_TRUNC(x, day)",
"duckdb": "DATE_TRUNC('day', x)",
"spark": "TRUNC(x, 'day')",
},
write={
"bigquery": "DATE_TRUNC(x, day)",
"duckdb": "DATE_TRUNC('day', x)",
"mysql": "DATE(x)",
"presto": "DATE_TRUNC('day', x)",
"postgres": "DATE_TRUNC('day', x)",
"snowflake": "DATE_TRUNC('day', x)",
"starrocks": "DATE_TRUNC('day', x)",
"spark": "TRUNC(x, 'day')",
},
)
self.validate_all(
"TIMESTAMP_TRUNC(x, day)",
read={
"bigquery": "TIMESTAMP_TRUNC(x, day)",
"presto": "DATE_TRUNC('day', x)",
"postgres": "DATE_TRUNC('day', x)",
"snowflake": "DATE_TRUNC('day', x)",
"starrocks": "DATE_TRUNC('day', x)",
"spark": "DATE_TRUNC('day', x)",
},
)
self.validate_all(
"DATE_TRUNC('day', CAST(x AS DATE))",
read={
"presto": "DATE_TRUNC('day', x::DATE)",
"snowflake": "DATE_TRUNC('day', x::DATE)",
},
)
self.validate_all(
"TIMESTAMP_TRUNC(CAST(x AS DATE), day)",
read={
"postgres": "DATE_TRUNC('day', x::DATE)",
"starrocks": "DATE_TRUNC('day', x::DATE)",
},
)
self.validate_all(
"DATE_TRUNC('week', x)",
write={
@ -582,8 +619,6 @@ class TestDialect(Validator):
"DATE_TRUNC('year', x)",
read={
"bigquery": "DATE_TRUNC(x, year)",
"snowflake": "DATE_TRUNC(year, x)",
"starrocks": "DATE_TRUNC('year', x)",
"spark": "TRUNC(x, 'year')",
},
write={
@ -599,7 +634,10 @@ class TestDialect(Validator):
"TIMESTAMP_TRUNC(x, year)",
read={
"bigquery": "TIMESTAMP_TRUNC(x, year)",
"postgres": "DATE_TRUNC(year, x)",
"spark": "DATE_TRUNC('year', x)",
"snowflake": "DATE_TRUNC(year, x)",
"starrocks": "DATE_TRUNC('year', x)",
},
write={
"bigquery": "TIMESTAMP_TRUNC(x, year)",
@ -752,7 +790,6 @@ class TestDialect(Validator):
"trino": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"duckdb": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"hive": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"presto": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"spark": "AGGREGATE(x, 0, (acc, x) -> acc + x, acc -> acc)",
"presto": "REDUCE(x, 0, (acc, x) -> acc + x, acc -> acc)",
},
@ -1455,3 +1492,36 @@ SELECT
"postgres": "SUBSTRING('123456' FROM 2 FOR 3)",
},
)
def test_count_if(self):
self.validate_identity("COUNT_IF(DISTINCT cond)")
self.validate_all(
"SELECT COUNT_IF(cond) FILTER", write={"": "SELECT COUNT_IF(cond) AS FILTER"}
)
self.validate_all(
"SELECT COUNT_IF(col % 2 = 0) FROM foo",
write={
"": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"databricks": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"presto": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"snowflake": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"sqlite": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
},
)
self.validate_all(
"SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
read={
"": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"databricks": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
},
write={
"": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"databricks": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"presto": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"sqlite": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FILTER(WHERE col < 1000) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
},
)

View file

@ -4,6 +4,12 @@ from tests.dialects.test_dialect import Validator
class TestDrill(Validator):
dialect = "drill"
def test_drill(self):
self.validate_all(
"DATE_FORMAT(a, 'yyyy')",
write={"drill": "TO_CHAR(a, 'yyyy')"},
)
def test_string_literals(self):
self.validate_all(
"SELECT '2021-01-01' + INTERVAL 1 MONTH",

View file

@ -125,6 +125,11 @@ class TestDuckDB(Validator):
"SELECT a['x space'] FROM (SELECT {'x space': 1, 'y': 2, 'z': 3} AS a)"
)
self.validate_all("SELECT * FROM 'x.y'", write={"duckdb": 'SELECT * FROM "x.y"'})
self.validate_all(
"WITH 'x' AS (SELECT 1) SELECT * FROM x",
write={"duckdb": 'WITH "x" AS (SELECT 1) SELECT * FROM x'},
)
self.validate_all(
"CREATE TABLE IF NOT EXISTS table (cola INT, colb STRING) USING ICEBERG PARTITIONED BY (colb)",
write={

View file

@ -63,8 +63,8 @@ class TestPresto(Validator):
"bigquery": "CAST(x AS TIMESTAMPTZ)",
"duckdb": "CAST(x AS TIMESTAMPTZ(9))",
"presto": "CAST(x AS TIMESTAMP(9) WITH TIME ZONE)",
"hive": "CAST(x AS TIMESTAMPTZ)",
"spark": "CAST(x AS TIMESTAMPTZ)",
"hive": "CAST(x AS TIMESTAMP)",
"spark": "CAST(x AS TIMESTAMP)",
},
)
@ -189,34 +189,38 @@ class TestPresto(Validator):
)
self.validate_all(
"DAY_OF_WEEK(timestamp '2012-08-08 01:00')",
"DAY_OF_WEEK(timestamp '2012-08-08 01:00:00')",
write={
"spark": "DAYOFWEEK(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"presto": "DAY_OF_WEEK(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"spark": "DAYOFWEEK(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"presto": "DAY_OF_WEEK(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"duckdb": "DAYOFWEEK(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
},
)
self.validate_all(
"DAY_OF_MONTH(timestamp '2012-08-08 01:00')",
"DAY_OF_MONTH(timestamp '2012-08-08 01:00:00')",
write={
"spark": "DAYOFMONTH(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"presto": "DAY_OF_MONTH(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"spark": "DAYOFMONTH(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"presto": "DAY_OF_MONTH(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"duckdb": "DAYOFMONTH(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
},
)
self.validate_all(
"DAY_OF_YEAR(timestamp '2012-08-08 01:00')",
"DAY_OF_YEAR(timestamp '2012-08-08 01:00:00')",
write={
"spark": "DAYOFYEAR(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"presto": "DAY_OF_YEAR(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"spark": "DAYOFYEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"presto": "DAY_OF_YEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"duckdb": "DAYOFYEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
},
)
self.validate_all(
"WEEK_OF_YEAR(timestamp '2012-08-08 01:00')",
"WEEK_OF_YEAR(timestamp '2012-08-08 01:00:00')",
write={
"spark": "WEEKOFYEAR(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"presto": "WEEK_OF_YEAR(CAST('2012-08-08 01:00' AS TIMESTAMP))",
"spark": "WEEKOFYEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"presto": "WEEK_OF_YEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
"duckdb": "WEEKOFYEAR(CAST('2012-08-08 01:00:00' AS TIMESTAMP))",
},
)
@ -365,6 +369,15 @@ class TestPresto(Validator):
self.validate_identity("START TRANSACTION ISOLATION LEVEL REPEATABLE READ")
self.validate_identity("APPROX_PERCENTILE(a, b, c, d)")
self.validate_all(
"ARRAY_AGG(x ORDER BY y DESC)",
write={
"hive": "COLLECT_LIST(x)",
"presto": "ARRAY_AGG(x ORDER BY y DESC)",
"spark": "COLLECT_LIST(x)",
"trino": "ARRAY_AGG(x ORDER BY y DESC)",
},
)
self.validate_all(
"SELECT a FROM t GROUP BY a, ROLLUP(b), ROLLUP(c), ROLLUP(d)",
write={

View file

@ -6,6 +6,9 @@ class TestRedshift(Validator):
def test_redshift(self):
self.validate_all("CONVERT(INTEGER, x)", write={"redshift": "CAST(x AS INTEGER)"})
self.validate_all(
"DATEADD('day', ndays, caldate)", write={"redshift": "DATEADD(day, ndays, caldate)"}
)
self.validate_all(
'create table "group" ("col" char(10))',
write={
@ -80,10 +83,10 @@ class TestRedshift(Validator):
},
)
self.validate_all(
"DATEDIFF(d, a, b)",
"DATEDIFF('day', a, b)",
write={
"redshift": "DATEDIFF(d, a, b)",
"presto": "DATE_DIFF(d, a, b)",
"redshift": "DATEDIFF(day, a, b)",
"presto": "DATE_DIFF('day', a, b)",
},
)
self.validate_all(

View file

@ -17,6 +17,41 @@ class TestSnowflake(Validator):
)
self.validate_identity("COMMENT IF EXISTS ON TABLE foo IS 'bar'")
self.validate_all(
"SELECT i, p, o FROM qt QUALIFY ROW_NUMBER() OVER (PARTITION BY p ORDER BY o) = 1",
write={
"": "SELECT i, p, o FROM qt QUALIFY ROW_NUMBER() OVER (PARTITION BY p ORDER BY o NULLS LAST) = 1",
"databricks": "SELECT i, p, o FROM qt QUALIFY ROW_NUMBER() OVER (PARTITION BY p ORDER BY o NULLS LAST) = 1",
"hive": "SELECT i, p, o FROM (SELECT i, p, o, ROW_NUMBER() OVER (PARTITION BY p ORDER BY o NULLS LAST) AS _w FROM qt) AS _t WHERE _w = 1",
"presto": "SELECT i, p, o FROM (SELECT i, p, o, ROW_NUMBER() OVER (PARTITION BY p ORDER BY o) AS _w FROM qt) AS _t WHERE _w = 1",
"snowflake": "SELECT i, p, o FROM qt QUALIFY ROW_NUMBER() OVER (PARTITION BY p ORDER BY o) = 1",
"spark": "SELECT i, p, o FROM (SELECT i, p, o, ROW_NUMBER() OVER (PARTITION BY p ORDER BY o NULLS LAST) AS _w FROM qt) AS _t WHERE _w = 1",
"sqlite": "SELECT i, p, o FROM (SELECT i, p, o, ROW_NUMBER() OVER (PARTITION BY p ORDER BY o NULLS LAST) AS _w FROM qt) AS _t WHERE _w = 1",
"trino": "SELECT i, p, o FROM (SELECT i, p, o, ROW_NUMBER() OVER (PARTITION BY p ORDER BY o) AS _w FROM qt) AS _t WHERE _w = 1",
},
)
self.validate_all(
"SELECT BOOLOR_AGG(c1), BOOLOR_AGG(c2) FROM test",
write={
"": "SELECT LOGICAL_OR(c1), LOGICAL_OR(c2) FROM test",
"duckdb": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
"postgres": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
"snowflake": "SELECT BOOLOR_AGG(c1), BOOLOR_AGG(c2) FROM test",
"spark": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
"sqlite": "SELECT MAX(c1), MAX(c2) FROM test",
},
)
self.validate_all(
"SELECT BOOLAND_AGG(c1), BOOLAND_AGG(c2) FROM test",
write={
"": "SELECT LOGICAL_AND(c1), LOGICAL_AND(c2) FROM test",
"duckdb": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
"postgres": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
"snowflake": "SELECT BOOLAND_AGG(c1), BOOLAND_AGG(c2) FROM test",
"spark": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
"sqlite": "SELECT MIN(c1), MIN(c2) FROM test",
},
)
self.validate_all(
"TO_CHAR(x, y)",
read={

View file

@ -213,6 +213,9 @@ TBLPROPERTIES (
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
self.validate_all(
"CAST(x AS TIMESTAMP)", read={"trino": "CAST(x AS TIMESTAMP(6) WITH TIME ZONE)"}
)
self.validate_all(
"SELECT DATE_ADD(my_date_column, 1)",
write={

View file

@ -56,6 +56,33 @@ class TestSQLite(Validator):
)
def test_sqlite(self):
self.validate_all(
"CURRENT_DATE",
read={
"": "CURRENT_DATE",
"snowflake": "CURRENT_DATE()",
},
)
self.validate_all(
"CURRENT_TIME",
read={
"": "CURRENT_TIME",
"snowflake": "CURRENT_TIME()",
},
)
self.validate_all(
"CURRENT_TIMESTAMP",
read={
"": "CURRENT_TIMESTAMP",
"snowflake": "CURRENT_TIMESTAMP()",
},
)
self.validate_all(
"SELECT DATE('2020-01-01 16:03:05')",
read={
"snowflake": "SELECT CAST('2020-01-01 16:03:05' AS DATE)",
},
)
self.validate_all(
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={