1
0
Fork 0

Merging upstream version 11.3.3.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 15:43:32 +01:00
parent f767789b5e
commit 4a70b88890
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
62 changed files with 28339 additions and 27272 deletions

View file

@ -8,6 +8,8 @@ class TestBigQuery(Validator):
def test_bigquery(self):
self.validate_identity("SELECT STRUCT<ARRAY<STRING>>(['2023-01-17'])")
self.validate_identity("SELECT * FROM q UNPIVOT(values FOR quarter IN (b, c))")
self.validate_all("LEAST(x, y)", read={"sqlite": "MIN(x, y)"})
self.validate_all(
"REGEXP_CONTAINS('foo', '.*')",
read={"bigquery": "REGEXP_CONTAINS('foo', '.*')"},
@ -390,3 +392,18 @@ class TestBigQuery(Validator):
"bigquery": "INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING), CAST(14 AS STRING))",
},
)
def test_merge(self):
self.validate_all(
"""
MERGE dataset.Inventory T
USING dataset.NewArrivals S ON FALSE
WHEN NOT MATCHED BY TARGET AND product LIKE '%a%'
THEN DELETE
WHEN NOT MATCHED BY SOURCE AND product LIKE '%b%'
THEN DELETE""",
write={
"bigquery": "MERGE INTO dataset.Inventory AS T USING dataset.NewArrivals AS S ON FALSE WHEN NOT MATCHED AND product LIKE '%a%' THEN DELETE WHEN NOT MATCHED BY SOURCE AND product LIKE '%b%' THEN DELETE",
"snowflake": "MERGE INTO dataset.Inventory AS T USING dataset.NewArrivals AS S ON FALSE WHEN NOT MATCHED AND product LIKE '%a%' THEN DELETE WHEN NOT MATCHED AND product LIKE '%b%' THEN DELETE",
},
)

View file

@ -510,6 +510,7 @@ class TestDialect(Validator):
"DATE_ADD(x, 1, 'day')",
read={
"mysql": "DATE_ADD(x, INTERVAL 1 DAY)",
"snowflake": "DATEADD('day', 1, x)",
"starrocks": "DATE_ADD(x, INTERVAL 1 DAY)",
},
write={

View file

@ -75,6 +75,40 @@ class TestDuckDB(Validator):
},
)
def test_sample(self):
self.validate_all(
"SELECT * FROM tbl USING SAMPLE 5",
write={"duckdb": "SELECT * FROM tbl USING SAMPLE (5)"},
)
self.validate_all(
"SELECT * FROM tbl USING SAMPLE 10%",
write={"duckdb": "SELECT * FROM tbl USING SAMPLE (10 PERCENT)"},
)
self.validate_all(
"SELECT * FROM tbl USING SAMPLE 10 PERCENT (bernoulli)",
write={"duckdb": "SELECT * FROM tbl USING SAMPLE BERNOULLI (10 PERCENT)"},
)
self.validate_all(
"SELECT * FROM tbl USING SAMPLE reservoir(50 ROWS) REPEATABLE (100)",
write={"duckdb": "SELECT * FROM tbl USING SAMPLE RESERVOIR (50 ROWS) REPEATABLE (100)"},
)
self.validate_all(
"SELECT * FROM tbl USING SAMPLE 10% (system, 377)",
write={"duckdb": "SELECT * FROM tbl USING SAMPLE SYSTEM (10 PERCENT) REPEATABLE (377)"},
)
self.validate_all(
"SELECT * FROM tbl TABLESAMPLE RESERVOIR(20%), tbl2 WHERE tbl.i=tbl2.i",
write={
"duckdb": "SELECT * FROM tbl TABLESAMPLE RESERVOIR (20 PERCENT), tbl2 WHERE tbl.i = tbl2.i"
},
)
self.validate_all(
"SELECT * FROM tbl, tbl2 WHERE tbl.i=tbl2.i USING SAMPLE RESERVOIR(20%)",
write={
"duckdb": "SELECT * FROM tbl, tbl2 WHERE tbl.i = tbl2.i USING SAMPLE RESERVOIR (20 PERCENT)"
},
)
def test_duckdb(self):
self.validate_identity("SELECT {'a': 1} AS x")
self.validate_identity("SELECT {'a': {'b': {'c': 1}}, 'd': {'e': 2}} AS x")

View file

@ -482,9 +482,9 @@ class TestHive(Validator):
self.validate_all(
"SELECT * FROM x TABLESAMPLE(10) y",
write={
"presto": "SELECT * FROM x AS y TABLESAMPLE(10)",
"hive": "SELECT * FROM x TABLESAMPLE(10) AS y",
"spark": "SELECT * FROM x TABLESAMPLE(10) AS y",
"presto": "SELECT * FROM x AS y TABLESAMPLE (10)",
"hive": "SELECT * FROM x TABLESAMPLE (10) AS y",
"spark": "SELECT * FROM x TABLESAMPLE (10) AS y",
},
)
self.validate_all(
@ -626,25 +626,25 @@ class TestHive(Validator):
},
)
self.validate_all(
"SELECT * FROM x TABLESAMPLE(1) AS foo",
"SELECT * FROM x TABLESAMPLE (1) AS foo",
read={
"presto": "SELECT * FROM x AS foo TABLESAMPLE(1)",
"presto": "SELECT * FROM x AS foo TABLESAMPLE (1)",
},
write={
"presto": "SELECT * FROM x AS foo TABLESAMPLE(1)",
"hive": "SELECT * FROM x TABLESAMPLE(1) AS foo",
"spark": "SELECT * FROM x TABLESAMPLE(1) AS foo",
"presto": "SELECT * FROM x AS foo TABLESAMPLE (1)",
"hive": "SELECT * FROM x TABLESAMPLE (1) AS foo",
"spark": "SELECT * FROM x TABLESAMPLE (1) AS foo",
},
)
self.validate_all(
"SELECT * FROM x TABLESAMPLE(1) AS foo",
"SELECT * FROM x TABLESAMPLE (1) AS foo",
read={
"presto": "SELECT * FROM x AS foo TABLESAMPLE(1)",
"presto": "SELECT * FROM x AS foo TABLESAMPLE (1)",
},
write={
"presto": "SELECT * FROM x AS foo TABLESAMPLE(1)",
"hive": "SELECT * FROM x TABLESAMPLE(1) AS foo",
"spark": "SELECT * FROM x TABLESAMPLE(1) AS foo",
"presto": "SELECT * FROM x AS foo TABLESAMPLE (1)",
"hive": "SELECT * FROM x TABLESAMPLE (1) AS foo",
"spark": "SELECT * FROM x TABLESAMPLE (1) AS foo",
},
)
self.validate_all(

View file

@ -64,6 +64,7 @@ class TestMySQL(Validator):
self.validate_identity("SET TRANSACTION READ ONLY")
self.validate_identity("SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE")
self.validate_identity("SELECT SCHEMA()")
self.validate_identity("SELECT DATABASE()")
def test_types(self):
self.validate_all(

View file

@ -11,6 +11,14 @@ class TestPostgres(Validator):
self.validate_identity("CREATE TABLE test (foo JSONB)")
self.validate_identity("CREATE TABLE test (foo VARCHAR(64)[])")
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING a")
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING a, b")
self.validate_identity("INSERT INTO x VALUES (1, 'a', 2.0) RETURNING *")
self.validate_identity(
"DELETE FROM event USING sales AS s WHERE event.eventid = s.eventid RETURNING a"
)
self.validate_identity("UPDATE tbl_name SET foo = 123 RETURNING a")
self.validate_all(
"CREATE OR REPLACE FUNCTION function_name (input_a character varying DEFAULT NULL::character varying)",
write={

View file

@ -85,6 +85,12 @@ class TestRedshift(Validator):
"presto": "DATE_DIFF(d, a, b)",
},
)
self.validate_all(
"SELECT TOP 1 x FROM y",
write={
"redshift": "SELECT x FROM y LIMIT 1",
},
)
def test_identity(self):
self.validate_identity(

View file

@ -62,6 +62,28 @@ class TestSnowflake(Validator):
"duckdb": "CASE WHEN bar = 0 THEN 0 ELSE CAST(foo AS DOUBLE) / bar END",
},
)
self.validate_all(
"ZEROIFNULL(foo)",
write={
"snowflake": "IFF(foo IS NULL, 0, foo)",
"sqlite": "CASE WHEN foo IS NULL THEN 0 ELSE foo END",
"presto": "IF(foo IS NULL, 0, foo)",
"spark": "IF(foo IS NULL, 0, foo)",
"hive": "IF(foo IS NULL, 0, foo)",
"duckdb": "CASE WHEN foo IS NULL THEN 0 ELSE foo END",
},
)
self.validate_all(
"NULLIFZERO(foo)",
write={
"snowflake": "IFF(foo = 0, NULL, foo)",
"sqlite": "CASE WHEN foo = 0 THEN NULL ELSE foo END",
"presto": "IF(foo = 0, NULL, foo)",
"spark": "IF(foo = 0, NULL, foo)",
"hive": "IF(foo = 0, NULL, foo)",
"duckdb": "CASE WHEN foo = 0 THEN NULL ELSE foo END",
},
)
self.validate_all(
"CREATE OR REPLACE TEMPORARY TABLE x (y NUMBER IDENTITY(0, 1))",
write={
@ -279,12 +301,6 @@ class TestSnowflake(Validator):
"snowflake": "SELECT REGEXP_LIKE(a, b)",
},
)
self.validate_all(
"SELECT a FROM test SAMPLE BLOCK (0.5) SEED (42)",
write={
"snowflake": "SELECT a FROM test TABLESAMPLE BLOCK (0.5) SEED (42)",
},
)
self.validate_all(
"SELECT a FROM test pivot",
write={
@ -356,6 +372,51 @@ class TestSnowflake(Validator):
},
)
def test_sample(self):
self.validate_identity("SELECT * FROM testtable TABLESAMPLE BERNOULLI (20.3)")
self.validate_identity("SELECT * FROM testtable TABLESAMPLE (100)")
self.validate_identity(
"SELECT i, j FROM table1 AS t1 INNER JOIN table2 AS t2 TABLESAMPLE (50) WHERE t2.j = t1.i"
)
self.validate_identity(
"SELECT * FROM (SELECT * FROM t1 JOIN t2 ON t1.a = t2.c) TABLESAMPLE (1)"
)
self.validate_identity("SELECT * FROM testtable TABLESAMPLE SYSTEM (3) SEED (82)")
self.validate_identity("SELECT * FROM testtable TABLESAMPLE (10 ROWS)")
self.validate_all(
"SELECT * FROM testtable SAMPLE (10)",
write={"snowflake": "SELECT * FROM testtable TABLESAMPLE (10)"},
)
self.validate_all(
"SELECT * FROM testtable SAMPLE ROW (0)",
write={"snowflake": "SELECT * FROM testtable TABLESAMPLE ROW (0)"},
)
self.validate_all(
"SELECT a FROM test SAMPLE BLOCK (0.5) SEED (42)",
write={
"snowflake": "SELECT a FROM test TABLESAMPLE BLOCK (0.5) SEED (42)",
},
)
self.validate_all(
"""
SELECT i, j
FROM
table1 AS t1 SAMPLE (25) -- 25% of rows in table1
INNER JOIN
table2 AS t2 SAMPLE (50) -- 50% of rows in table2
WHERE t2.j = t1.i""",
write={
"snowflake": "SELECT i, j FROM table1 AS t1 TABLESAMPLE (25) /* 25% of rows in table1 */ INNER JOIN table2 AS t2 TABLESAMPLE (50) /* 50% of rows in table2 */ WHERE t2.j = t1.i",
},
)
self.validate_all(
"SELECT * FROM testtable SAMPLE BLOCK (0.012) REPEATABLE (99992)",
write={
"snowflake": "SELECT * FROM testtable TABLESAMPLE BLOCK (0.012) SEED (99992)",
},
)
def test_timestamps(self):
self.validate_identity("SELECT EXTRACT(month FROM a)")
@ -415,6 +476,13 @@ class TestSnowflake(Validator):
"presto": "SELECT TO_UNIXTIME(CAST(foo AS TIMESTAMP)) * 1000 AS ddate FROM table_name",
},
)
self.validate_all(
"DATEADD(DAY, 5, CAST('2008-12-25' AS DATE))",
write={
"bigquery": "DATE_ADD(CAST('2008-12-25' AS DATE), INTERVAL 5 DAY)",
"snowflake": "DATEADD(DAY, 5, CAST('2008-12-25' AS DATE))",
},
)
def test_semi_structured_types(self):
self.validate_identity("SELECT CAST(a AS VARIANT)")
@ -655,6 +723,14 @@ FROM persons AS p, LATERAL FLATTEN(input => p.c, path => 'contact') AS f, LATERA
},
)
self.validate_all(
"""SELECT $1 AS "_1" FROM VALUES ('a'), ('b')""",
write={
"snowflake": """SELECT $1 AS "_1" FROM (VALUES ('a'), ('b'))""",
"spark": """SELECT @1 AS `_1` FROM VALUES ('a'), ('b')""",
},
)
def test_describe_table(self):
self.validate_all(
"DESCRIBE TABLE db.table",

View file

@ -81,6 +81,13 @@ class TestSQLite(Validator):
"sqlite": "SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname NULLS LAST, lname",
},
)
self.validate_all("x", read={"snowflake": "LEAST(x)"})
self.validate_all("MIN(x)", read={"snowflake": "MIN(x)"}, write={"snowflake": "MIN(x)"})
self.validate_all(
"MIN(x, y, z)",
read={"snowflake": "LEAST(x, y, z)"},
write={"snowflake": "LEAST(x, y, z)"},
)
def test_hexadecimal_literal(self):
self.validate_all(