1
0
Fork 0

Adding upstream version 21.1.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 21:25:55 +01:00
parent d51e69137a
commit 92ffd7746f
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
91 changed files with 49165 additions and 47854 deletions

View file

@ -18,78 +18,6 @@ class TestBigQuery(Validator):
maxDiff = None
def test_bigquery(self):
self.validate_identity("ARRAY_AGG(x IGNORE NULLS LIMIT 1)")
self.validate_identity("ARRAY_AGG(x IGNORE NULLS ORDER BY x LIMIT 1)")
self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY x LIMIT 1)")
self.validate_identity("ARRAY_AGG(x IGNORE NULLS)")
self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS HAVING MAX x ORDER BY x LIMIT 1)")
self.validate_all(
"SELECT SUM(x IGNORE NULLS) AS x",
read={
"bigquery": "SELECT SUM(x IGNORE NULLS) AS x",
"duckdb": "SELECT SUM(x IGNORE NULLS) AS x",
"postgres": "SELECT SUM(x) IGNORE NULLS AS x",
"spark": "SELECT SUM(x) IGNORE NULLS AS x",
"snowflake": "SELECT SUM(x) IGNORE NULLS AS x",
},
write={
"bigquery": "SELECT SUM(x IGNORE NULLS) AS x",
"duckdb": "SELECT SUM(x IGNORE NULLS) AS x",
"postgres": "SELECT SUM(x) IGNORE NULLS AS x",
"spark": "SELECT SUM(x) IGNORE NULLS AS x",
"snowflake": "SELECT SUM(x) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT SUM(x RESPECT NULLS) AS x",
read={
"bigquery": "SELECT SUM(x RESPECT NULLS) AS x",
"duckdb": "SELECT SUM(x RESPECT NULLS) AS x",
"postgres": "SELECT SUM(x) RESPECT NULLS AS x",
"spark": "SELECT SUM(x) RESPECT NULLS AS x",
"snowflake": "SELECT SUM(x) RESPECT NULLS AS x",
},
write={
"bigquery": "SELECT SUM(x RESPECT NULLS) AS x",
"duckdb": "SELECT SUM(x RESPECT NULLS) AS x",
"postgres": "SELECT SUM(x) RESPECT NULLS AS x",
"spark": "SELECT SUM(x) RESPECT NULLS AS x",
"snowflake": "SELECT SUM(x) RESPECT NULLS AS x",
},
)
self.validate_all(
"SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
write={
"bigquery": "SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"duckdb": "SELECT QUANTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"spark": "SELECT PERCENTILE_CONT(x, 0.5) RESPECT NULLS OVER ()",
},
)
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
write={
"bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
"duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 10) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
write={
"bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
"duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 1, 10) IGNORE NULLS AS x",
},
)
self.validate_identity("SELECT COUNT(x RESPECT NULLS)")
self.validate_identity("SELECT LAST_VALUE(x IGNORE NULLS) OVER y AS x")
self.validate_identity(
"create or replace view test (tenant_id OPTIONS(description='Test description on table creation')) select 1 as tenant_id, 1 as customer_id;",
"CREATE OR REPLACE VIEW test (tenant_id OPTIONS (description='Test description on table creation')) AS SELECT 1 AS tenant_id, 1 AS customer_id",
)
with self.assertLogs(helper_logger) as cm:
statements = parse(
"""
@ -131,19 +59,12 @@ class TestBigQuery(Validator):
self.validate_all(
"a[0]",
read={
"bigquery": "a[0]",
"duckdb": "a[1]",
"presto": "a[1]",
},
)
self.validate_identity(
"select array_contains([1, 2, 3], 1)",
"SELECT EXISTS(SELECT 1 FROM UNNEST([1, 2, 3]) AS _col WHERE _col = 1)",
)
self.validate_identity("CREATE SCHEMA x DEFAULT COLLATE 'en'")
self.validate_identity("CREATE TABLE x (y INT64) DEFAULT COLLATE 'en'")
self.validate_identity("PARSE_JSON('{}', wide_number_mode => 'exact')")
with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")
@ -179,6 +100,16 @@ class TestBigQuery(Validator):
)
assert "'END FOR'" in cm.output[0]
self.validate_identity("CREATE SCHEMA x DEFAULT COLLATE 'en'")
self.validate_identity("CREATE TABLE x (y INT64) DEFAULT COLLATE 'en'")
self.validate_identity("PARSE_JSON('{}', wide_number_mode => 'exact')")
self.validate_identity("FOO(values)")
self.validate_identity("STRUCT(values AS value)")
self.validate_identity("ARRAY_AGG(x IGNORE NULLS LIMIT 1)")
self.validate_identity("ARRAY_AGG(x IGNORE NULLS ORDER BY x LIMIT 1)")
self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY x LIMIT 1)")
self.validate_identity("ARRAY_AGG(x IGNORE NULLS)")
self.validate_identity("ARRAY_AGG(DISTINCT x IGNORE NULLS HAVING MAX x ORDER BY x LIMIT 1)")
self.validate_identity("SELECT * FROM dataset.my_table TABLESAMPLE SYSTEM (10 PERCENT)")
self.validate_identity("TIME('2008-12-25 15:30:00+08')")
self.validate_identity("TIME('2008-12-25 15:30:00+08', 'America/Los_Angeles')")
@ -237,6 +168,13 @@ class TestBigQuery(Validator):
self.validate_identity("SELECT TIMESTAMP_SECONDS(2) AS t")
self.validate_identity("SELECT TIMESTAMP_MILLIS(2) AS t")
self.validate_identity("""SELECT JSON_EXTRACT_SCALAR('{"a": 5}', '$.a')""")
self.validate_identity("UPDATE x SET y = NULL")
self.validate_identity("LOG(n, b)")
self.validate_identity("SELECT COUNT(x RESPECT NULLS)")
self.validate_identity("SELECT LAST_VALUE(x IGNORE NULLS) OVER y AS x")
self.validate_identity(
"SELECT * FROM test QUALIFY a IS DISTINCT FROM b WINDOW c AS (PARTITION BY d)"
)
self.validate_identity(
"FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count"
)
@ -264,6 +202,14 @@ class TestBigQuery(Validator):
self.validate_identity(
"""SELECT JSON_EXTRACT_SCALAR('5')""", """SELECT JSON_EXTRACT_SCALAR('5', '$')"""
)
self.validate_identity(
"select array_contains([1, 2, 3], 1)",
"SELECT EXISTS(SELECT 1 FROM UNNEST([1, 2, 3]) AS _col WHERE _col = 1)",
)
self.validate_identity(
"create or replace view test (tenant_id OPTIONS(description='Test description on table creation')) select 1 as tenant_id, 1 as customer_id;",
"CREATE OR REPLACE VIEW test (tenant_id OPTIONS (description='Test description on table creation')) AS SELECT 1 AS tenant_id, 1 AS customer_id",
)
self.validate_identity(
"SELECT SPLIT(foo)",
"SELECT SPLIT(foo, ',')",
@ -312,7 +258,81 @@ class TestBigQuery(Validator):
"SELECT * FROM UNNEST(x) WITH OFFSET EXCEPT DISTINCT SELECT * FROM UNNEST(y) WITH OFFSET",
"SELECT * FROM UNNEST(x) WITH OFFSET AS offset EXCEPT DISTINCT SELECT * FROM UNNEST(y) WITH OFFSET AS offset",
)
self.validate_identity(
"SELECT * FROM (SELECT a, b, c FROM test) PIVOT(SUM(b) d, COUNT(*) e FOR c IN ('x', 'y'))",
"SELECT * FROM (SELECT a, b, c FROM test) PIVOT(SUM(b) AS d, COUNT(*) AS e FOR c IN ('x', 'y'))",
)
self.validate_identity(
r"REGEXP_EXTRACT(svc_plugin_output, r'\\\((.*)')",
r"REGEXP_EXTRACT(svc_plugin_output, '\\\\\\((.*)')",
)
self.validate_all(
"TIMESTAMP(x)",
write={
"bigquery": "TIMESTAMP(x)",
"duckdb": "CAST(x AS TIMESTAMPTZ)",
"presto": "CAST(x AS TIMESTAMP WITH TIME ZONE)",
},
)
self.validate_all(
"SELECT SUM(x IGNORE NULLS) AS x",
read={
"bigquery": "SELECT SUM(x IGNORE NULLS) AS x",
"duckdb": "SELECT SUM(x IGNORE NULLS) AS x",
"postgres": "SELECT SUM(x) IGNORE NULLS AS x",
"spark": "SELECT SUM(x) IGNORE NULLS AS x",
"snowflake": "SELECT SUM(x) IGNORE NULLS AS x",
},
write={
"bigquery": "SELECT SUM(x IGNORE NULLS) AS x",
"duckdb": "SELECT SUM(x IGNORE NULLS) AS x",
"postgres": "SELECT SUM(x) IGNORE NULLS AS x",
"spark": "SELECT SUM(x) IGNORE NULLS AS x",
"snowflake": "SELECT SUM(x) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT SUM(x RESPECT NULLS) AS x",
read={
"bigquery": "SELECT SUM(x RESPECT NULLS) AS x",
"duckdb": "SELECT SUM(x RESPECT NULLS) AS x",
"postgres": "SELECT SUM(x) RESPECT NULLS AS x",
"spark": "SELECT SUM(x) RESPECT NULLS AS x",
"snowflake": "SELECT SUM(x) RESPECT NULLS AS x",
},
write={
"bigquery": "SELECT SUM(x RESPECT NULLS) AS x",
"duckdb": "SELECT SUM(x RESPECT NULLS) AS x",
"postgres": "SELECT SUM(x) RESPECT NULLS AS x",
"spark": "SELECT SUM(x) RESPECT NULLS AS x",
"snowflake": "SELECT SUM(x) RESPECT NULLS AS x",
},
)
self.validate_all(
"SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
write={
"bigquery": "SELECT PERCENTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"duckdb": "SELECT QUANTILE_CONT(x, 0.5 RESPECT NULLS) OVER ()",
"spark": "SELECT PERCENTILE_CONT(x, 0.5) RESPECT NULLS OVER ()",
},
)
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
write={
"bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 10) AS x",
"duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 10) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
write={
"bigquery": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a, b DESC LIMIT 1, 10) AS x",
"duckdb": "SELECT ARRAY_AGG(DISTINCT x IGNORE NULLS ORDER BY a NULLS FIRST, b DESC LIMIT 1, 10) AS x",
"spark": "SELECT COLLECT_LIST(DISTINCT x ORDER BY a, b DESC LIMIT 1, 10) IGNORE NULLS AS x",
},
)
self.validate_all(
"SELECT * FROM Produce UNPIVOT((first_half_sales, second_half_sales) FOR semesters IN ((Q1, Q2) AS 'semester_1', (Q3, Q4) AS 'semester_2'))",
read={
@ -464,7 +484,6 @@ class TestBigQuery(Validator):
"duckdb": "SELECT * FROM t WHERE EXISTS(SELECT * FROM UNNEST(nums) AS _t(x) WHERE x > 1)",
},
)
self.validate_identity("UPDATE x SET y = NULL")
self.validate_all(
"NULL",
read={
@ -620,6 +639,14 @@ class TestBigQuery(Validator):
"spark": "WITH cte AS (SELECT ARRAY(1, 2, 3) AS arr) SELECT EXPLODE(arr) FROM cte"
},
)
self.validate_all(
"SELECT IF(pos = pos_2, col, NULL) AS col FROM UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], []))) - 1)) AS pos CROSS JOIN UNNEST(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) - 1) AND pos_2 = (ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) - 1))",
read={"spark": "select explode_outer([])"},
)
self.validate_all(
"SELECT IF(pos = pos_2, col, NULL) AS col, IF(pos = pos_2, pos_2, NULL) AS pos_2 FROM UNNEST(GENERATE_ARRAY(0, GREATEST(ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], []))) - 1)) AS pos CROSS JOIN UNNEST(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) AS col WITH OFFSET AS pos_2 WHERE pos = pos_2 OR (pos > (ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) - 1) AND pos_2 = (ARRAY_LENGTH(IF(ARRAY_LENGTH(COALESCE([], [])) = 0, [[][SAFE_ORDINAL(0)]], [])) - 1))",
read={"spark": "select posexplode_outer([])"},
)
self.validate_all(
"SELECT AS STRUCT ARRAY(SELECT AS STRUCT b FROM x) AS y FROM z",
write={
@ -660,10 +687,6 @@ class TestBigQuery(Validator):
"bigquery": "SELECT ARRAY(SELECT AS STRUCT 1 AS a, 2 AS b)",
},
)
self.validate_identity(
r"REGEXP_EXTRACT(svc_plugin_output, r'\\\((.*)')",
r"REGEXP_EXTRACT(svc_plugin_output, '\\\\\\((.*)')",
)
self.validate_all(
"REGEXP_CONTAINS('foo', '.*')",
read={
@ -986,9 +1009,6 @@ class TestBigQuery(Validator):
"postgres": "CURRENT_DATE AT TIME ZONE 'UTC'",
},
)
self.validate_identity(
"SELECT * FROM test QUALIFY a IS DISTINCT FROM b WINDOW c AS (PARTITION BY d)"
)
self.validate_all(
"SELECT a FROM test WHERE a = 1 GROUP BY a HAVING a = 2 QUALIFY z ORDER BY a LIMIT 10",
write={
@ -997,45 +1017,20 @@ class TestBigQuery(Validator):
},
)
self.validate_all(
"SELECT cola, colb FROM (VALUES (1, 'test')) AS tab(cola, colb)",
write={
"spark": "SELECT cola, colb FROM VALUES (1, 'test') AS tab(cola, colb)",
"SELECT cola, colb FROM UNNEST([STRUCT(1 AS cola, 'test' AS colb)])",
read={
"bigquery": "SELECT cola, colb FROM UNNEST([STRUCT(1 AS cola, 'test' AS colb)])",
"snowflake": "SELECT cola, colb FROM (VALUES (1, 'test')) AS tab(cola, colb)",
},
)
self.validate_all(
"SELECT cola, colb FROM (VALUES (1, 'test')) AS tab",
write={
"bigquery": "SELECT cola, colb FROM UNNEST([STRUCT(1 AS _c0, 'test' AS _c1)])",
},
)
self.validate_all(
"SELECT cola, colb FROM (VALUES (1, 'test'))",
write={
"bigquery": "SELECT cola, colb FROM UNNEST([STRUCT(1 AS _c0, 'test' AS _c1)])",
"spark": "SELECT cola, colb FROM VALUES (1, 'test') AS tab(cola, colb)",
},
)
self.validate_all(
"SELECT * FROM UNNEST([STRUCT(1 AS id)]) CROSS JOIN UNNEST([STRUCT(1 AS id)])",
read={
"bigquery": "SELECT * FROM UNNEST([STRUCT(1 AS id)]) CROSS JOIN UNNEST([STRUCT(1 AS id)])",
"postgres": "SELECT * FROM (VALUES (1)) AS t1(id) CROSS JOIN (VALUES (1)) AS t2(id)",
},
)
self.validate_all(
"SELECT cola, colb, colc FROM (VALUES (1, 'test', NULL)) AS tab(cola, colb, colc)",
write={
"spark": "SELECT cola, colb, colc FROM VALUES (1, 'test', NULL) AS tab(cola, colb, colc)",
"bigquery": "SELECT cola, colb, colc FROM UNNEST([STRUCT(1 AS cola, 'test' AS colb, NULL AS colc)])",
"snowflake": "SELECT cola, colb, colc FROM (VALUES (1, 'test', NULL)) AS tab(cola, colb, colc)",
},
)
self.validate_all(
"SELECT * FROM (SELECT a, b, c FROM test) PIVOT(SUM(b) d, COUNT(*) e FOR c IN ('x', 'y'))",
write={
"bigquery": "SELECT * FROM (SELECT a, b, c FROM test) PIVOT(SUM(b) AS d, COUNT(*) AS e FOR c IN ('x', 'y'))",
},
)
self.validate_all(
"SELECT REGEXP_EXTRACT(abc, 'pattern(group)') FROM table",
write={
@ -1091,8 +1086,6 @@ WHERE
pretty=True,
)
self.validate_identity("LOG(n, b)")
def test_user_defined_functions(self):
self.validate_identity(
"CREATE TEMPORARY FUNCTION a(x FLOAT64, y FLOAT64) RETURNS FLOAT64 NOT DETERMINISTIC LANGUAGE js AS 'return x*y;'"
@ -1114,35 +1107,22 @@ WHERE
)
def test_remove_precision_parameterized_types(self):
self.validate_all(
"SELECT CAST(1 AS NUMERIC(10, 2))",
write={
"bigquery": "SELECT CAST(1 AS NUMERIC)",
},
)
self.validate_all(
"CREATE TABLE test (a NUMERIC(10, 2))",
write={
"bigquery": "CREATE TABLE test (a NUMERIC(10, 2))",
},
)
self.validate_all(
"SELECT CAST('1' AS STRING(10)) UNION ALL SELECT CAST('2' AS STRING(10))",
write={
"bigquery": "SELECT CAST('1' AS STRING) UNION ALL SELECT CAST('2' AS STRING)",
},
)
self.validate_all(
"SELECT cola FROM (SELECT CAST('1' AS STRING(10)) AS cola UNION ALL SELECT CAST('2' AS STRING(10)) AS cola)",
write={
"bigquery": "SELECT cola FROM (SELECT CAST('1' AS STRING) AS cola UNION ALL SELECT CAST('2' AS STRING) AS cola)",
},
)
self.validate_all(
self.validate_identity("CREATE TABLE test (a NUMERIC(10, 2))")
self.validate_identity(
"INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING(10)), CAST(14 AS STRING(10)))",
write={
"bigquery": "INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING), CAST(14 AS STRING))",
},
"INSERT INTO test (cola, colb) VALUES (CAST(7 AS STRING), CAST(14 AS STRING))",
)
self.validate_identity(
"SELECT CAST(1 AS NUMERIC(10, 2))",
"SELECT CAST(1 AS NUMERIC)",
)
self.validate_identity(
"SELECT CAST('1' AS STRING(10)) UNION ALL SELECT CAST('2' AS STRING(10))",
"SELECT CAST('1' AS STRING) UNION ALL SELECT CAST('2' AS STRING)",
)
self.validate_identity(
"SELECT cola FROM (SELECT CAST('1' AS STRING(10)) AS cola UNION ALL SELECT CAST('2' AS STRING(10)) AS cola)",
"SELECT cola FROM (SELECT CAST('1' AS STRING) AS cola UNION ALL SELECT CAST('2' AS STRING) AS cola)",
)
def test_models(self):

View file

@ -369,6 +369,7 @@ class TestClickhouse(Validator):
"SELECT STARTS_WITH('a', 'b'), STARTSWITH('a', 'b')",
write={"clickhouse": "SELECT startsWith('a', 'b'), startsWith('a', 'b')"},
)
self.validate_identity("SYSTEM STOP MERGES foo.bar", check_command_warning=True)
def test_cte(self):
self.validate_identity("WITH 'x' AS foo SELECT foo")

View file

@ -2101,7 +2101,7 @@ SELECT
"databricks": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"presto": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"snowflake": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"sqlite": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FROM foo",
"sqlite": "SELECT SUM(IIF(col % 2 = 0, 1, 0)) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
},
)
@ -2116,7 +2116,7 @@ SELECT
"": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"databricks": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"presto": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
"sqlite": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FILTER(WHERE col < 1000) FROM foo",
"sqlite": "SELECT SUM(IIF(col % 2 = 0, 1, 0)) FILTER(WHERE col < 1000) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FILTER(WHERE col < 1000) FROM foo",
},
)

View file

@ -152,7 +152,7 @@ class TestHive(Validator):
"duckdb": "CREATE TABLE x (w TEXT)", # Partition columns should exist in table
"presto": "CREATE TABLE x (w VARCHAR, y INTEGER, z INTEGER) WITH (PARTITIONED_BY=ARRAY['y', 'z'])",
"hive": "CREATE TABLE x (w STRING) PARTITIONED BY (y INT, z INT)",
"spark": "CREATE TABLE x (w STRING) PARTITIONED BY (y INT, z INT)",
"spark": "CREATE TABLE x (w STRING, y INT, z INT) PARTITIONED BY (y, z)",
},
)
self.validate_all(

View file

@ -8,6 +8,8 @@ class TestPostgres(Validator):
dialect = "postgres"
def test_postgres(self):
self.validate_identity("|/ x", "SQRT(x)")
self.validate_identity("||/ x", "CBRT(x)")
expr = parse_one(
"SELECT * FROM r CROSS JOIN LATERAL UNNEST(ARRAY[1]) AS s(location)", read="postgres"
)

View file

@ -424,7 +424,7 @@ class TestPresto(Validator):
"duckdb": "CREATE TABLE x (w TEXT, y INT, z INT)",
"presto": "CREATE TABLE x (w VARCHAR, y INTEGER, z INTEGER) WITH (PARTITIONED_BY=ARRAY['y', 'z'])",
"hive": "CREATE TABLE x (w STRING) PARTITIONED BY (y INT, z INT)",
"spark": "CREATE TABLE x (w STRING) PARTITIONED BY (y INT, z INT)",
"spark": "CREATE TABLE x (w STRING, y INT, z INT) PARTITIONED BY (y, z)",
},
)
self.validate_all(

View file

@ -1,4 +1,4 @@
from sqlglot import transpile
from sqlglot import exp, parse_one, transpile
from tests.dialects.test_dialect import Validator
@ -381,8 +381,6 @@ class TestRedshift(Validator):
"SELECT DATEADD(DAY, 1, DATE('2023-01-01'))",
)
self.validate_identity("SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l")
self.validate_identity(
"""SELECT
c_name,
@ -408,8 +406,9 @@ ORDER BY
union_query = f"SELECT * FROM ({' UNION ALL '.join('SELECT ' + v for v in values)})"
self.assertEqual(transpile(values_query, write="redshift")[0], union_query)
self.validate_identity(
"SELECT * FROM (VALUES (1), (2))",
values_sql = transpile("SELECT * FROM (VALUES (1), (2))", write="redshift", pretty=True)[0]
self.assertEqual(
values_sql,
"""SELECT
*
FROM (
@ -419,69 +418,51 @@ FROM (
SELECT
2
)""",
pretty=True,
)
self.validate_identity("INSERT INTO t (a) VALUES (1), (2), (3)")
self.validate_identity("INSERT INTO t (a, b) VALUES (1, 2), (3, 4)")
self.validate_all(
"SELECT * FROM (VALUES (1, 2)) AS t",
"SELECT * FROM (SELECT 1, 2) AS t",
read={
"": "SELECT * FROM (VALUES (1, 2)) AS t",
},
write={
"redshift": "SELECT * FROM (SELECT 1, 2) AS t",
"mysql": "SELECT * FROM (SELECT 1, 2) AS t",
"presto": "SELECT * FROM (VALUES (1, 2)) AS t",
"presto": "SELECT * FROM (SELECT 1, 2) AS t",
},
)
self.validate_all(
"SELECT * FROM (VALUES (1)) AS t1(id) CROSS JOIN (VALUES (1)) AS t2(id)",
write={
"redshift": "SELECT * FROM (SELECT 1 AS id) AS t1 CROSS JOIN (SELECT 1 AS id) AS t2",
"SELECT * FROM (SELECT 1 AS id) AS t1 CROSS JOIN (SELECT 1 AS id) AS t2",
read={
"": "SELECT * FROM (VALUES (1)) AS t1(id) CROSS JOIN (VALUES (1)) AS t2(id)",
},
)
self.validate_all(
"SELECT a, b FROM (VALUES (1, 2)) AS t (a, b)",
write={
"redshift": "SELECT a, b FROM (SELECT 1 AS a, 2 AS b) AS t",
"SELECT a, b FROM (SELECT 1 AS a, 2 AS b) AS t",
read={
"": "SELECT a, b FROM (VALUES (1, 2)) AS t (a, b)",
},
)
self.validate_all(
'SELECT a, b FROM (VALUES (1, 2), (3, 4)) AS "t" (a, b)',
write={
"redshift": 'SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4) AS "t"',
'SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4) AS "t"',
read={
"": 'SELECT a, b FROM (VALUES (1, 2), (3, 4)) AS "t" (a, b)',
},
)
self.validate_all(
"SELECT a, b FROM (VALUES (1, 2), (3, 4), (5, 6), (7, 8)) AS t (a, b)",
write={
"redshift": "SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4 UNION ALL SELECT 5, 6 UNION ALL SELECT 7, 8) AS t",
"SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4 UNION ALL SELECT 5, 6 UNION ALL SELECT 7, 8) AS t",
read={
"": "SELECT a, b FROM (VALUES (1, 2), (3, 4), (5, 6), (7, 8)) AS t (a, b)",
},
)
self.validate_all(
"INSERT INTO t(a) VALUES (1), (2), (3)",
write={
"redshift": "INSERT INTO t (a) VALUES (1), (2), (3)",
"INSERT INTO t (a, b) SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4) AS t",
read={
"": "INSERT INTO t(a, b) SELECT a, b FROM (VALUES (1, 2), (3, 4)) AS t (a, b)",
},
)
self.validate_all(
"INSERT INTO t(a, b) SELECT a, b FROM (VALUES (1, 2), (3, 4)) AS t (a, b)",
write={
"redshift": "INSERT INTO t (a, b) SELECT a, b FROM (SELECT 1 AS a, 2 AS b UNION ALL SELECT 3, 4) AS t",
},
)
self.validate_all(
"INSERT INTO t(a, b) VALUES (1, 2), (3, 4)",
write={
"redshift": "INSERT INTO t (a, b) VALUES (1, 2), (3, 4)",
},
)
self.validate_identity(
'SELECT * FROM (VALUES (1)) AS "t"(a)',
'''SELECT
*
FROM (
SELECT
1 AS a
) AS "t"''',
pretty=True,
)
def test_create_table_like(self):
self.validate_identity(
@ -532,3 +513,26 @@ FROM (
"redshift": "CREATE OR REPLACE VIEW v1 AS SELECT cola, colb FROM t1 WITH NO SCHEMA BINDING",
},
)
def test_column_unnesting(self):
ast = parse_one("SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3", read="redshift")
ast.args["from"].this.assert_is(exp.Table)
ast.args["joins"][0].this.assert_is(exp.Table)
self.assertEqual(ast.sql("redshift"), "SELECT * FROM t.t JOIN t.c1 ON c1.c2 = t.c3")
ast = parse_one("SELECT * FROM t AS t CROSS JOIN t.c1", read="redshift")
ast.args["from"].this.assert_is(exp.Table)
ast.args["joins"][0].this.assert_is(exp.Column)
self.assertEqual(ast.sql("redshift"), "SELECT * FROM t AS t CROSS JOIN t.c1")
ast = parse_one(
"SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l", read="redshift"
)
joins = ast.args["joins"]
ast.args["from"].this.assert_is(exp.Table)
joins[0].this.this.assert_is(exp.Column)
joins[1].this.this.assert_is(exp.Column)
joins[2].this.this.assert_is(exp.Dot)
self.assertEqual(
ast.sql("redshift"), "SELECT * FROM x AS a, a.b AS c, c.d.e AS f, f.g.h.i.j.k AS l"
)

View file

@ -84,6 +84,10 @@ WHERE
self.validate_identity(
"SELECT a FROM test PIVOT(SUM(x) FOR y IN ('z', 'q')) AS x TABLESAMPLE (0.1)"
)
self.validate_identity(
"value:values::string",
"CAST(GET_PATH(value, 'values') AS TEXT)",
)
self.validate_identity(
"""SELECT GET_PATH(PARSE_JSON('{"y": [{"z": 1}]}'), 'y[0]:z')""",
"""SELECT GET_PATH(PARSE_JSON('{"y": [{"z": 1}]}'), 'y[0].z')""",
@ -462,7 +466,7 @@ WHERE
"DIV0(foo, bar)",
write={
"snowflake": "IFF(bar = 0, 0, foo / bar)",
"sqlite": "CASE WHEN bar = 0 THEN 0 ELSE CAST(foo AS REAL) / bar END",
"sqlite": "IIF(bar = 0, 0, CAST(foo AS REAL) / bar)",
"presto": "IF(bar = 0, 0, CAST(foo AS DOUBLE) / bar)",
"spark": "IF(bar = 0, 0, foo / bar)",
"hive": "IF(bar = 0, 0, foo / bar)",
@ -473,7 +477,7 @@ WHERE
"ZEROIFNULL(foo)",
write={
"snowflake": "IFF(foo IS NULL, 0, foo)",
"sqlite": "CASE WHEN foo IS NULL THEN 0 ELSE foo END",
"sqlite": "IIF(foo IS NULL, 0, foo)",
"presto": "IF(foo IS NULL, 0, foo)",
"spark": "IF(foo IS NULL, 0, foo)",
"hive": "IF(foo IS NULL, 0, foo)",
@ -484,7 +488,7 @@ WHERE
"NULLIFZERO(foo)",
write={
"snowflake": "IFF(foo = 0, NULL, foo)",
"sqlite": "CASE WHEN foo = 0 THEN NULL ELSE foo END",
"sqlite": "IIF(foo = 0, NULL, foo)",
"presto": "IF(foo = 0, NULL, foo)",
"spark": "IF(foo = 0, NULL, foo)",
"hive": "IF(foo = 0, NULL, foo)",
@ -1513,6 +1517,10 @@ MATCH_RECOGNIZE (
self.validate_identity("SHOW COLUMNS IN VIEW")
self.validate_identity("SHOW COLUMNS LIKE '_foo%' IN VIEW dt_test")
self.validate_identity("SHOW USERS")
self.validate_identity("SHOW TERSE USERS")
self.validate_identity("SHOW USERS LIKE '_foo%' STARTS WITH 'bar' LIMIT 5 FROM 'baz'")
ast = parse_one("SHOW COLUMNS LIKE '_testing%' IN dt_test", read="snowflake")
table = ast.find(exp.Table)
literal = ast.find(exp.Literal)
@ -1536,6 +1544,10 @@ MATCH_RECOGNIZE (
table = ast.find(exp.Table)
self.assertEqual(table.sql(dialect="snowflake"), "db1.schema1")
users_exp = self.validate_identity("SHOW USERS")
self.assertTrue(isinstance(users_exp, exp.Show))
self.assertEqual(users_exp.this, "USERS")
def test_swap(self):
ast = parse_one("ALTER TABLE a SWAP WITH b", read="snowflake")
assert isinstance(ast, exp.AlterTable)

View file

@ -93,11 +93,12 @@ TBLPROPERTIES (
'x'='1'
)""",
"spark": """CREATE TABLE blah (
col_a INT
col_a INT,
date STRING
)
COMMENT 'Test comment: blah'
PARTITIONED BY (
date STRING
date
)
USING ICEBERG
TBLPROPERTIES (
@ -125,13 +126,6 @@ TBLPROPERTIES (
"spark": "ALTER TABLE StudentInfo DROP COLUMNS (LastName, DOB)",
},
)
self.validate_all(
"CREATE TABLE x USING ICEBERG PARTITIONED BY (MONTHS(y)) LOCATION 's3://z'",
identify=True,
write={
"spark": "CREATE TABLE `x` USING ICEBERG PARTITIONED BY (MONTHS(`y`)) LOCATION 's3://z'",
},
)
def test_to_date(self):
self.validate_all(
@ -256,6 +250,14 @@ TBLPROPERTIES (
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("SPLIT(str, pattern, lim)")
self.validate_identity(
"SELECT CAST('2023-01-01' AS TIMESTAMP) + INTERVAL 23 HOUR + 59 MINUTE + 59 SECONDS",
"SELECT CAST('2023-01-01' AS TIMESTAMP) + INTERVAL '23' HOUR + INTERVAL '59' MINUTE + INTERVAL '59' SECONDS",
)
self.validate_identity(
"SELECT CAST('2023-01-01' AS TIMESTAMP) + INTERVAL '23' HOUR + '59' MINUTE + '59' SECONDS",
"SELECT CAST('2023-01-01' AS TIMESTAMP) + INTERVAL '23' HOUR + INTERVAL '59' MINUTE + INTERVAL '59' SECONDS",
)
self.validate_identity(
"SELECT INTERVAL '5' HOURS '30' MINUTES '5' SECONDS '6' MILLISECONDS '7' MICROSECONDS",
"SELECT INTERVAL '5' HOURS + INTERVAL '30' MINUTES + INTERVAL '5' SECONDS + INTERVAL '6' MILLISECONDS + INTERVAL '7' MICROSECONDS",
@ -616,12 +618,6 @@ TBLPROPERTIES (
},
)
def test_iif(self):
self.validate_all(
"SELECT IIF(cond, 'True', 'False')",
write={"spark": "SELECT IF(cond, 'True', 'False')"},
)
def test_bool_or(self):
self.validate_all(
"SELECT a, LOGICAL_OR(b) FROM table GROUP BY a",

View file

@ -5,6 +5,21 @@ class TestTableau(Validator):
dialect = "tableau"
def test_tableau(self):
self.validate_all(
"[x]",
write={
"hive": "`x`",
"tableau": "[x]",
},
)
self.validate_all(
'"x"',
write={
"hive": "'x'",
"tableau": "'x'",
},
)
self.validate_all(
"IF x = 'a' THEN y ELSE NULL END",
read={

View file

@ -28,6 +28,14 @@ class TestTSQL(Validator):
self.validate_identity("1 AND true", "1 <> 0 AND (1 = 1)")
self.validate_identity("CAST(x AS int) OR y", "CAST(x AS INTEGER) <> 0 OR y <> 0")
self.validate_all(
"SELECT IIF(cond <> 0, 'True', 'False')",
read={
"spark": "SELECT IF(cond, 'True', 'False')",
"sqlite": "SELECT IIF(cond, 'True', 'False')",
"tsql": "SELECT IIF(cond <> 0, 'True', 'False')",
},
)
self.validate_all(
"SELECT TRIM(BOTH 'a' FROM a)",
read={
@ -1302,20 +1310,6 @@ WHERE
},
)
def test_iif(self):
self.validate_identity(
"SELECT IF(cond, 'True', 'False')", "SELECT IIF(cond <> 0, 'True', 'False')"
)
self.validate_identity(
"SELECT IIF(cond, 'True', 'False')", "SELECT IIF(cond <> 0, 'True', 'False')"
)
self.validate_all(
"SELECT IIF(cond, 'True', 'False');",
write={
"spark": "SELECT IF(cond, 'True', 'False')",
},
)
def test_lateral_subquery(self):
self.validate_all(
"SELECT x.a, x.b, t.v, t.y FROM x CROSS APPLY (SELECT v, y FROM t) t(v, y)",