1
0
Fork 0

Merging upstream version 25.29.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 21:56:19 +01:00
parent de8c8a17d0
commit 1e53504dfc
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
97 changed files with 64720 additions and 61752 deletions

View file

@ -526,7 +526,7 @@ class TestDialect(Validator):
write={
"": "SELECT NVL2(a, b, c)",
"bigquery": "SELECT CASE WHEN NOT a IS NULL THEN b ELSE c END",
"clickhouse": "SELECT CASE WHEN NOT a IS NULL THEN b ELSE c END",
"clickhouse": "SELECT CASE WHEN NOT (a IS NULL) THEN b ELSE c END",
"databricks": "SELECT NVL2(a, b, c)",
"doris": "SELECT CASE WHEN NOT a IS NULL THEN b ELSE c END",
"drill": "SELECT CASE WHEN NOT a IS NULL THEN b ELSE c END",
@ -552,7 +552,7 @@ class TestDialect(Validator):
write={
"": "SELECT NVL2(a, b)",
"bigquery": "SELECT CASE WHEN NOT a IS NULL THEN b END",
"clickhouse": "SELECT CASE WHEN NOT a IS NULL THEN b END",
"clickhouse": "SELECT CASE WHEN NOT (a IS NULL) THEN b END",
"databricks": "SELECT NVL2(a, b)",
"doris": "SELECT CASE WHEN NOT a IS NULL THEN b END",
"drill": "SELECT CASE WHEN NOT a IS NULL THEN b END",
@ -651,7 +651,7 @@ class TestDialect(Validator):
"snowflake": "CAST('2020-01-01' AS TIMESTAMP)",
"spark": "CAST('2020-01-01' AS TIMESTAMP)",
"trino": "CAST('2020-01-01' AS TIMESTAMP)",
"clickhouse": "CAST('2020-01-01' AS Nullable(DateTime))",
"clickhouse": "CAST('2020-01-01' AS DateTime64(6))",
"drill": "CAST('2020-01-01' AS TIMESTAMP)",
"hive": "CAST('2020-01-01' AS TIMESTAMP)",
"presto": "CAST('2020-01-01' AS TIMESTAMP)",
@ -688,7 +688,7 @@ class TestDialect(Validator):
"snowflake": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMPTZ)",
"spark": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMP)",
"trino": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMP WITH TIME ZONE)",
"clickhouse": "CAST('2020-01-01 12:13:14' AS Nullable(DateTime('America/Los_Angeles')))",
"clickhouse": "CAST('2020-01-01 12:13:14' AS DateTime64(6, 'America/Los_Angeles'))",
"drill": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMP)",
"hive": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMP)",
"presto": "CAST('2020-01-01 12:13:14-08:00' AS TIMESTAMP WITH TIME ZONE)",
@ -709,7 +709,7 @@ class TestDialect(Validator):
"snowflake": "CAST(col AS TIMESTAMPTZ)",
"spark": "CAST(col AS TIMESTAMP)",
"trino": "CAST(col AS TIMESTAMP WITH TIME ZONE)",
"clickhouse": "CAST(col AS Nullable(DateTime('America/Los_Angeles')))",
"clickhouse": "CAST(col AS DateTime64(6, 'America/Los_Angeles'))",
"drill": "CAST(col AS TIMESTAMP)",
"hive": "CAST(col AS TIMESTAMP)",
"presto": "CAST(col AS TIMESTAMP WITH TIME ZONE)",
@ -2893,3 +2893,121 @@ FROM subquery2""",
"snowflake": "UUID_STRING()",
},
)
def test_escaped_identifier_delimiter(self):
for dialect in ("databricks", "hive", "mysql", "spark2", "spark"):
with self.subTest(f"Testing escaped backtick in identifier name for {dialect}"):
self.validate_all(
'SELECT 1 AS "x`"',
read={
dialect: "SELECT 1 AS `x```",
},
write={
dialect: "SELECT 1 AS `x```",
},
)
for dialect in (
"",
"clickhouse",
"duckdb",
"postgres",
"presto",
"trino",
"redshift",
"snowflake",
"sqlite",
):
with self.subTest(f"Testing escaped double-quote in identifier name for {dialect}"):
self.validate_all(
'SELECT 1 AS "x"""',
read={
dialect: 'SELECT 1 AS "x"""',
},
write={
dialect: 'SELECT 1 AS "x"""',
},
)
for dialect in ("clickhouse", "sqlite"):
with self.subTest(f"Testing escaped backtick in identifier name for {dialect}"):
self.validate_all(
'SELECT 1 AS "x`"',
read={
dialect: "SELECT 1 AS `x```",
},
write={
dialect: 'SELECT 1 AS "x`"',
},
)
self.validate_all(
'SELECT 1 AS "x`"',
read={
"clickhouse": "SELECT 1 AS `x\\``",
},
write={
"clickhouse": 'SELECT 1 AS "x`"',
},
)
for name in ('"x\\""', '`x"`'):
with self.subTest(f"Testing ClickHouse delimiter escaping: {name}"):
self.validate_all(
'SELECT 1 AS "x"""',
read={
"clickhouse": f"SELECT 1 AS {name}",
},
write={
"clickhouse": 'SELECT 1 AS "x"""',
},
)
for name in ("[[x]]]", '"[x]"'):
with self.subTest(f"Testing T-SQL delimiter escaping: {name}"):
self.validate_all(
'SELECT 1 AS "[x]"',
read={
"tsql": f"SELECT 1 AS {name}",
},
write={
"tsql": "SELECT 1 AS [[x]]]",
},
)
for name in ('[x"]', '"x"""'):
with self.subTest(f"Testing T-SQL delimiter escaping: {name}"):
self.validate_all(
'SELECT 1 AS "x"""',
read={
"tsql": f"SELECT 1 AS {name}",
},
write={
"tsql": 'SELECT 1 AS [x"]',
},
)
def test_median(self):
for suffix in (
"",
" OVER ()",
):
self.validate_all(
f"MEDIAN(x){suffix}",
read={
"snowflake": f"MEDIAN(x){suffix}",
"duckdb": f"MEDIAN(x){suffix}",
"spark": f"MEDIAN(x){suffix}",
"databricks": f"MEDIAN(x){suffix}",
"redshift": f"MEDIAN(x){suffix}",
"oracle": f"MEDIAN(x){suffix}",
},
write={
"snowflake": f"MEDIAN(x){suffix}",
"duckdb": f"MEDIAN(x){suffix}",
"spark": f"MEDIAN(x){suffix}",
"databricks": f"MEDIAN(x){suffix}",
"redshift": f"MEDIAN(x){suffix}",
"oracle": f"MEDIAN(x){suffix}",
"clickhouse": f"MEDIAN(x){suffix}",
"postgres": f"PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY x){suffix}",
},
)