1
0
Fork 0

Merging upstream version 10.5.2.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 15:03:38 +01:00
parent 77197f1e44
commit e0f3bbb5f3
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
58 changed files with 1480 additions and 383 deletions

View file

@ -125,7 +125,7 @@ class TestBigQuery(Validator):
},
)
self.validate_all(
"CURRENT_DATE",
"CURRENT_TIMESTAMP()",
read={
"tsql": "GETDATE()",
},
@ -299,6 +299,14 @@ class TestBigQuery(Validator):
"snowflake": "SELECT cola, colb FROM (VALUES (1, 'test')) AS tab(cola, colb)",
},
)
self.validate_all(
"SELECT cola, colb, colc FROM (VALUES (1, 'test', NULL)) AS tab(cola, colb, colc)",
write={
"spark": "SELECT cola, colb, colc FROM VALUES (1, 'test', NULL) AS tab(cola, colb, colc)",
"bigquery": "SELECT cola, colb, colc FROM UNNEST([STRUCT(1 AS cola, 'test' AS colb, NULL AS colc)])",
"snowflake": "SELECT cola, colb, colc FROM (VALUES (1, 'test', NULL)) AS tab(cola, colb, colc)",
},
)
self.validate_all(
"SELECT * FROM (SELECT a, b, c FROM test) PIVOT(SUM(b) d, COUNT(*) e FOR c IN ('x', 'y'))",
write={
@ -324,3 +332,35 @@ class TestBigQuery(Validator):
"SELECT a, GROUP_CONCAT(b) FROM table GROUP BY a",
write={"bigquery": "SELECT a, STRING_AGG(b) FROM table GROUP BY a"},
)
def test_remove_precision_parameterized_types(self):
self.validate_all(
"SELECT CAST(1 AS NUMERIC(10, 2))",
write={
"bigquery": "SELECT CAST(1 AS NUMERIC)",
},
)
self.validate_all(
"CREATE TABLE test (a NUMERIC(10, 2))",
write={
"bigquery": "CREATE TABLE test (a NUMERIC(10, 2))",
},
)
self.validate_all(
"SELECT CAST('1' AS STRING(10)) UNION ALL SELECT CAST('2' AS STRING(10))",
write={
"bigquery": "SELECT CAST('1' AS STRING) UNION ALL SELECT CAST('2' AS STRING)",
},
)
self.validate_all(
"SELECT cola FROM (SELECT CAST('1' AS STRING(10)) AS cola UNION ALL SELECT CAST('2' AS STRING(10)) AS cola)",
write={
"bigquery": "SELECT cola FROM (SELECT CAST('1' AS STRING) AS cola UNION ALL SELECT CAST('2' AS STRING) AS cola)",
},
)
self.validate_all(
"INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING(10)), CAST(14 AS STRING(10)))",
write={
"bigquery": "INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING), CAST(14 AS STRING))",
},
)

View file

@ -14,6 +14,9 @@ class TestClickhouse(Validator):
self.validate_identity("SELECT * FROM foo LEFT ASOF JOIN bla")
self.validate_identity("SELECT * FROM foo ASOF JOIN bla")
self.validate_identity("SELECT * FROM foo ANY JOIN bla")
self.validate_identity("SELECT quantile(0.5)(a)")
self.validate_identity("SELECT quantiles(0.5)(a) AS x FROM t")
self.validate_identity("SELECT * FROM foo WHERE x GLOBAL IN (SELECT * FROM bar)")
self.validate_all(
"SELECT fname, lname, age FROM person ORDER BY age DESC NULLS FIRST, fname ASC NULLS LAST, lname",
@ -38,3 +41,9 @@ class TestClickhouse(Validator):
"SELECT x #! comment",
write={"": "SELECT x /* comment */"},
)
self.validate_all(
"SELECT quantileIf(0.5)(a, true)",
write={
"clickhouse": "SELECT quantileIf(0.5)(a, TRUE)",
},
)

View file

@ -85,7 +85,7 @@ class TestDialect(Validator):
self.validate_all(
"CAST(a AS BINARY(4))",
write={
"bigquery": "CAST(a AS BINARY(4))",
"bigquery": "CAST(a AS BINARY)",
"clickhouse": "CAST(a AS BINARY(4))",
"drill": "CAST(a AS VARBINARY(4))",
"duckdb": "CAST(a AS BINARY(4))",
@ -104,7 +104,7 @@ class TestDialect(Validator):
self.validate_all(
"CAST(a AS VARBINARY(4))",
write={
"bigquery": "CAST(a AS VARBINARY(4))",
"bigquery": "CAST(a AS VARBINARY)",
"clickhouse": "CAST(a AS VARBINARY(4))",
"duckdb": "CAST(a AS VARBINARY(4))",
"mysql": "CAST(a AS VARBINARY(4))",
@ -181,7 +181,7 @@ class TestDialect(Validator):
self.validate_all(
"CAST(a AS VARCHAR(3))",
write={
"bigquery": "CAST(a AS STRING(3))",
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR(3))",
"duckdb": "CAST(a AS TEXT(3))",
"mysql": "CAST(a AS VARCHAR(3))",

View file

@ -338,6 +338,24 @@ class TestHive(Validator):
)
def test_hive(self):
self.validate_all(
"SELECT A.1a AS b FROM test_a AS A",
write={
"spark": "SELECT A.1a AS b FROM test_a AS A",
},
)
self.validate_all(
"SELECT 1_a AS a FROM test_table",
write={
"spark": "SELECT 1_a AS a FROM test_table",
},
)
self.validate_all(
"SELECT a_b AS 1_a FROM test_table",
write={
"spark": "SELECT a_b AS 1_a FROM test_table",
},
)
self.validate_all(
"PERCENTILE(x, 0.5)",
write={
@ -411,7 +429,7 @@ class TestHive(Validator):
"INITCAP('new york')",
write={
"duckdb": "INITCAP('new york')",
"presto": "REGEXP_REPLACE('new york', '(\w)(\w*)', x -> UPPER(x[1]) || LOWER(x[2]))",
"presto": r"REGEXP_REPLACE('new york', '(\w)(\w*)', x -> UPPER(x[1]) || LOWER(x[2]))",
"hive": "INITCAP('new york')",
"spark": "INITCAP('new york')",
},

View file

@ -122,6 +122,10 @@ class TestPostgres(Validator):
"TO_TIMESTAMP(123::DOUBLE PRECISION)",
write={"postgres": "TO_TIMESTAMP(CAST(123 AS DOUBLE PRECISION))"},
)
self.validate_all(
"SELECT to_timestamp(123)::time without time zone",
write={"postgres": "SELECT CAST(TO_TIMESTAMP(123) AS TIME)"},
)
self.validate_identity(
"CREATE TABLE A (LIKE B INCLUDING CONSTRAINT INCLUDING COMPRESSION EXCLUDING COMMENTS)"

View file

@ -60,11 +60,11 @@ class TestPresto(Validator):
self.validate_all(
"CAST(x AS TIMESTAMP(9) WITH TIME ZONE)",
write={
"bigquery": "CAST(x AS TIMESTAMPTZ(9))",
"bigquery": "CAST(x AS TIMESTAMPTZ)",
"duckdb": "CAST(x AS TIMESTAMPTZ(9))",
"presto": "CAST(x AS TIMESTAMP(9) WITH TIME ZONE)",
"hive": "CAST(x AS TIMESTAMPTZ(9))",
"spark": "CAST(x AS TIMESTAMPTZ(9))",
"hive": "CAST(x AS TIMESTAMPTZ)",
"spark": "CAST(x AS TIMESTAMPTZ)",
},
)

View file

@ -523,3 +523,33 @@ FROM persons AS p, LATERAL FLATTEN(input => p.c, path => 'contact') AS f, LATERA
"spark": "SELECT `c0`, `c1` FROM (VALUES (1, 2), (3, 4)) AS `t0`(`c0`, `c1`)",
},
)
def test_describe_table(self):
self.validate_all(
"DESCRIBE TABLE db.table",
write={
"snowflake": "DESCRIBE TABLE db.table",
"spark": "DESCRIBE db.table",
},
)
self.validate_all(
"DESCRIBE db.table",
write={
"snowflake": "DESCRIBE TABLE db.table",
"spark": "DESCRIBE db.table",
},
)
self.validate_all(
"DESC TABLE db.table",
write={
"snowflake": "DESCRIBE TABLE db.table",
"spark": "DESCRIBE db.table",
},
)
self.validate_all(
"DESC VIEW db.table",
write={
"snowflake": "DESCRIBE VIEW db.table",
"spark": "DESCRIBE db.table",
},
)

View file

@ -207,6 +207,7 @@ TBLPROPERTIES (
)
def test_spark(self):
self.validate_identity("SELECT UNIX_TIMESTAMP()")
self.validate_all(
"ARRAY_SORT(x, (left, right) -> -1)",
write={

View file

@ -6,6 +6,8 @@ class TestTSQL(Validator):
def test_tsql(self):
self.validate_identity('SELECT "x"."y" FROM foo')
self.validate_identity("SELECT * FROM #foo")
self.validate_identity("SELECT * FROM ##foo")
self.validate_identity(
"SELECT DISTINCT DepartmentName, PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY BaseRate) OVER (PARTITION BY DepartmentName) AS MedianCont FROM dbo.DimEmployee"
)
@ -71,6 +73,12 @@ class TestTSQL(Validator):
"tsql": "CAST(x AS DATETIME2)",
},
)
self.validate_all(
"CAST(x AS DATETIME2(6))",
write={
"hive": "CAST(x AS TIMESTAMP)",
},
)
def test_charindex(self):
self.validate_all(
@ -300,6 +308,12 @@ class TestTSQL(Validator):
"spark": "SELECT CAST(y.x AS VARCHAR(10)) AS z FROM testdb.dbo.test AS y",
},
)
self.validate_all(
"SELECT CAST((SELECT x FROM y) AS VARCHAR) AS test",
write={
"spark": "SELECT CAST((SELECT x FROM y) AS STRING) AS test",
},
)
def test_add_date(self):
self.validate_identity("SELECT DATEADD(year, 1, '2017/08/25')")
@ -441,3 +455,13 @@ class TestTSQL(Validator):
"SELECT '''test'''",
write={"spark": r"SELECT '\'test\''"},
)
def test_eomonth(self):
self.validate_all(
"EOMONTH(GETDATE())",
write={"spark": "LAST_DAY(CURRENT_TIMESTAMP())"},
)
self.validate_all(
"EOMONTH(GETDATE(), -1)",
write={"spark": "LAST_DAY(ADD_MONTHS(CURRENT_TIMESTAMP(), -1))"},
)