1
0
Fork 0

Merging upstream version 11.3.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 15:42:13 +01:00
parent f223c02081
commit 1c10961499
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
62 changed files with 26499 additions and 24781 deletions

View file

@ -520,7 +520,7 @@ class TestDialect(Validator):
"mysql": "DATE_ADD(x, INTERVAL 1 DAY)",
"postgres": "x + INTERVAL '1' 'day'",
"presto": "DATE_ADD('day', 1, x)",
"snowflake": "DATEADD(x, 1, 'day')",
"snowflake": "DATEADD(day, 1, x)",
"spark": "DATE_ADD(x, 1)",
"sqlite": "DATE(x, '1 day')",
"starrocks": "DATE_ADD(x, INTERVAL 1 DAY)",
@ -1279,6 +1279,16 @@ class TestDialect(Validator):
"sqlite": "SELECT y AS x FROM my_table AS t",
},
)
self.validate_all(
"SELECT * FROM (SELECT * FROM my_table AS t) AS tbl",
write={
"drill": "SELECT * FROM (SELECT * FROM my_table AS t) AS tbl",
"hive": "SELECT * FROM (SELECT * FROM my_table AS t) AS tbl",
"oracle": "SELECT * FROM (SELECT * FROM my_table t) tbl",
"postgres": "SELECT * FROM (SELECT * FROM my_table AS t) AS tbl",
"sqlite": "SELECT * FROM (SELECT * FROM my_table AS t) AS tbl",
},
)
self.validate_all(
"WITH cte1 AS (SELECT a, b FROM table1), cte2 AS (SELECT c, e AS d FROM table2) SELECT b, d AS dd FROM cte1 AS t JOIN cte2 WHERE cte1.a = cte2.c",
write={

View file

@ -21,10 +21,10 @@ class TestDuckDB(Validator):
self.validate_all(
"EPOCH_MS(x)",
write={
"bigquery": "UNIX_TO_TIME(x / 1000)",
"bigquery": "UNIX_TO_TIME(CAST(x / 1000 AS INT64))",
"duckdb": "TO_TIMESTAMP(x / 1000)",
"presto": "FROM_UNIXTIME(x / 1000)",
"spark": "FROM_UNIXTIME(x / 1000)",
"spark": "FROM_UNIXTIME(CAST(x / 1000 AS INT))",
},
)
self.validate_all(

View file

@ -7,6 +7,11 @@ class TestOracle(Validator):
def test_oracle(self):
self.validate_identity("SELECT * FROM V$SESSION")
def test_join_marker(self):
self.validate_identity("SELECT e1.x, e2.x FROM e e1, e e2 WHERE e1.y (+) = e2.y")
self.validate_identity("SELECT e1.x, e2.x FROM e e1, e e2 WHERE e1.y = e2.y (+)")
self.validate_identity("SELECT e1.x, e2.x FROM e e1, e e2 WHERE e1.y (+) = e2.y (+)")
def test_xml_table(self):
self.validate_identity("XMLTABLE('x')")
self.validate_identity("XMLTABLE('x' RETURNING SEQUENCE BY REF)")

View file

@ -106,6 +106,47 @@ class TestPostgres(Validator):
self.validate_identity("x ~ 'y'")
self.validate_identity("x ~* 'y'")
self.validate_all(
"1 / 2",
read={
"drill": "1 / 2",
"duckdb": "1 / 2",
"postgres": "1 / 2",
"presto": "1 / 2",
"redshift": "1 / 2",
"sqlite": "1 / 2",
"teradata": "1 / 2",
"trino": "1 / 2",
"tsql": "1 / 2",
},
write={
"drill": "1 / 2",
"duckdb": "1 / 2",
"postgres": "1 / 2",
"presto": "1 / 2",
"redshift": "1 / 2",
"sqlite": "1 / 2",
"teradata": "1 / 2",
"trino": "1 / 2",
"tsql": "1 / 2",
"bigquery": "CAST(1 / 2 AS INT64)",
"clickhouse": "CAST(1 / 2 AS Int32)",
"databricks": "CAST(1 / 2 AS INT)",
"hive": "CAST(1 / 2 AS INT)",
"mysql": "CAST(1 / 2 AS INT)",
"oracle": "CAST(1 / 2 AS NUMBER)",
"snowflake": "CAST(1 / 2 AS INT)",
"spark": "CAST(1 / 2 AS INT)",
"starrocks": "CAST(1 / 2 AS INT)",
},
)
self.validate_all(
"SELECT (DATE '2016-01-10', DATE '2016-02-01') OVERLAPS (DATE '2016-01-20', DATE '2016-02-10')",
write={
"postgres": "SELECT (CAST('2016-01-10' AS DATE), CAST('2016-02-01' AS DATE)) OVERLAPS (CAST('2016-01-20' AS DATE), CAST('2016-02-10' AS DATE))",
"tsql": "SELECT (CAST('2016-01-10' AS DATE), CAST('2016-02-01' AS DATE)) OVERLAPS (CAST('2016-01-20' AS DATE), CAST('2016-02-10' AS DATE))",
},
)
self.validate_all(
"x ^ y",
write={

View file

@ -38,6 +38,12 @@ class TestRedshift(Validator):
"redshift": "SELECT CAST('abc' AS VARBYTE)",
},
)
self.validate_all(
"SELECT 'abc'::CHARACTER",
write={
"redshift": "SELECT CAST('abc' AS CHAR)",
},
)
self.validate_all(
"SELECT * FROM venue WHERE (venuecity, venuestate) IN (('Miami', 'FL'), ('Tampa', 'FL')) ORDER BY venueid",
write={

View file

@ -10,10 +10,58 @@ class TestSnowflake(Validator):
self.validate_identity("SELECT REGEXP_LIKE(a, b, c)")
self.validate_identity("PUT file:///dir/tmp.csv @%table")
self.validate_identity("CREATE TABLE foo (bar FLOAT AUTOINCREMENT START 0 INCREMENT 1)")
self.validate_identity("ALTER TABLE IF EXISTS foo SET TAG a = 'a', b = 'b', c = 'c'")
self.validate_identity("ALTER TABLE foo UNSET TAG a, b, c")
self.validate_identity(
'COPY INTO NEW_TABLE ("foo", "bar") FROM (SELECT $1, $2, $3, $4 FROM @%old_table)'
)
self.validate_identity("COMMENT IF EXISTS ON TABLE foo IS 'bar'")
self.validate_all(
"1 / 2",
read={
"bigquery": "1 / 2",
"clickhouse": "1 / 2",
"databricks": "1 / 2",
"hive": "1 / 2",
"mysql": "1 / 2",
"oracle": "1 / 2",
"snowflake": "1 / 2",
"spark": "1 / 2",
"starrocks": "1 / 2",
},
write={
"bigquery": "1 / 2",
"clickhouse": "1 / 2",
"databricks": "1 / 2",
"hive": "1 / 2",
"mysql": "1 / 2",
"oracle": "1 / 2",
"snowflake": "1 / 2",
"spark": "1 / 2",
"starrocks": "1 / 2",
"drill": "CAST(1 AS DOUBLE) / 2",
"duckdb": "CAST(1 AS DOUBLE) / 2",
"postgres": "CAST(1 AS DOUBLE PRECISION) / 2",
"presto": "CAST(1 AS DOUBLE) / 2",
"redshift": "CAST(1 AS DOUBLE PRECISION) / 2",
"sqlite": "CAST(1 AS REAL) / 2",
"teradata": "CAST(1 AS DOUBLE) / 2",
"trino": "CAST(1 AS DOUBLE) / 2",
"tsql": "CAST(1 AS DOUBLE) / 2",
},
)
self.validate_all(
"DIV0(foo, bar)",
write={
"snowflake": "IFF(bar = 0, 0, foo / bar)",
"sqlite": "CASE WHEN bar = 0 THEN 0 ELSE CAST(foo AS REAL) / bar END",
"presto": "IF(bar = 0, 0, CAST(foo AS DOUBLE) / bar)",
"spark": "IF(bar = 0, 0, foo / bar)",
"hive": "IF(bar = 0, 0, foo / bar)",
"duckdb": "CASE WHEN bar = 0 THEN 0 ELSE CAST(foo AS DOUBLE) / bar END",
},
)
self.validate_all(
"CREATE OR REPLACE TEMPORARY TABLE x (y NUMBER IDENTITY(0, 1))",
write={
@ -63,9 +111,13 @@ class TestSnowflake(Validator):
},
)
self.validate_all(
"SELECT * EXCLUDE a, b RENAME (c AS d, E as F) FROM xxx",
"SELECT * EXCLUDE (a, b) RENAME (c AS d, E AS F) FROM xxx",
read={
"duckdb": "SELECT * EXCLUDE (a, b) REPLACE (c AS d, E AS F) FROM xxx",
},
write={
"snowflake": "SELECT * EXCLUDE (a, b) RENAME (c AS d, E AS F) FROM xxx",
"duckdb": "SELECT * EXCLUDE (a, b) REPLACE (c AS d, E AS F) FROM xxx",
},
)
self.validate_all(
@ -170,6 +222,20 @@ class TestSnowflake(Validator):
"snowflake": "SELECT ARRAY_AGG(DISTINCT a)",
},
)
self.validate_all(
"ARRAY_TO_STRING(x, '')",
write={
"spark": "ARRAY_JOIN(x, '')",
"snowflake": "ARRAY_TO_STRING(x, '')",
},
)
self.validate_all(
"TO_ARRAY(x)",
write={
"spark": "ARRAY(x)",
"snowflake": "[x]",
},
)
self.validate_all(
"SELECT * FROM a INTERSECT ALL SELECT * FROM b",
write={

View file

@ -24,19 +24,21 @@ class TestTeradata(Validator):
def test_create(self):
self.validate_identity("CREATE TABLE x (y INT) PRIMARY INDEX (y) PARTITION BY y INDEX (y)")
self.validate_identity(
"CREATE TABLE a (b INT) PRIMARY INDEX (y) PARTITION BY RANGE_N(b BETWEEN 'a', 'b' AND 'c' EACH '1')"
)
self.validate_identity(
"CREATE TABLE a (b INT) PARTITION BY RANGE_N(b BETWEEN 0, 1 AND 2 EACH 1)"
)
self.validate_identity(
"CREATE TABLE a (b INT) PARTITION BY RANGE_N(b BETWEEN *, 1 AND * EACH b) INDEX (a)"
)
self.validate_all(
"REPLACE VIEW a AS (SELECT b FROM c)",
write={"teradata": "CREATE OR REPLACE VIEW a AS (SELECT b FROM c)"},
)
self.validate_all(
"SEL a FROM b",
write={"teradata": "SELECT a FROM b"},
)
self.validate_identity("CREATE VOLATILE TABLE a (b INT)")
def test_insert(self):
self.validate_all(
"INS INTO x SELECT * FROM y", write={"teradata": "INSERT INTO x SELECT * FROM y"}
@ -54,6 +56,11 @@ class TestTeradata(Validator):
self.validate_all("a NE b", write={"teradata": "a <> b"})
self.validate_all("a NOT= b", write={"teradata": "a <> b"})
self.validate_all(
"SEL a FROM b",
write={"teradata": "SELECT a FROM b"},
)
def test_datatype(self):
self.validate_all(
"CREATE TABLE z (a ST_GEOMETRY(1))",