1
0
Fork 0

Adding upstream version 26.22.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-29 07:21:09 +02:00
parent 0eb0fedc25
commit ffc089e090
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
69 changed files with 29194 additions and 28548 deletions

View file

@ -177,6 +177,10 @@ class TestBigQuery(Validator):
self.validate_identity(
"CREATE OR REPLACE VIEW test (tenant_id OPTIONS (description='Test description on table creation')) AS SELECT 1 AS tenant_id, 1 AS customer_id",
)
self.validate_identity(
"--c\nARRAY_AGG(v IGNORE NULLS)",
"ARRAY_AGG(v IGNORE NULLS) /* c */",
)
self.validate_identity(
'SELECT r"\\t"',
"SELECT '\\\\t'",
@ -1732,6 +1736,9 @@ WHERE
)
def test_errors(self):
with self.assertRaises(ParseError):
self.parse_one("SELECT * FROM a - b.c.d2")
with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")

View file

@ -11,6 +11,7 @@ from sqlglot import (
parse_one,
)
from sqlglot.dialects import BigQuery, Hive, Snowflake
from sqlglot.dialects.dialect import Version
from sqlglot.parser import logger as parser_logger
@ -134,14 +135,25 @@ class TestDialect(Validator):
"oracle, normalization_strategy = lowercase, version = 19.5"
)
self.assertEqual(oracle_with_settings.normalization_strategy.value, "LOWERCASE")
self.assertEqual(oracle_with_settings.settings, {"version": "19.5"})
self.assertEqual(oracle_with_settings.version, Version("19.5"))
bool_settings = Dialect.get_or_raise("oracle, s1=TruE, s2=1, s3=FaLse, s4=0, s5=nonbool")
class MyDialect(Dialect):
SUPPORTED_SETTINGS = {"s1", "s2", "s3", "s4", "s5"}
bool_settings = Dialect.get_or_raise("mydialect, s1=TruE, s2=1, s3=FaLse, s4=0, s5=nonbool")
self.assertEqual(
bool_settings.settings,
{"s1": True, "s2": True, "s3": False, "s4": False, "s5": "nonbool"},
)
with self.assertRaises(ValueError) as cm:
Dialect.get_or_raise("tsql,normalisation_strategy=case_sensitive")
self.assertEqual(
"Unknown setting 'normalisation_strategy'. Did you mean normalization_strategy?",
str(cm.exception),
)
def test_compare_dialects(self):
bigquery_class = Dialect["bigquery"]
bigquery_object = BigQuery()
@ -170,7 +182,9 @@ class TestDialect(Validator):
def test_compare_dialect_versions(self):
ddb_v1 = Dialect.get_or_raise("duckdb, version=1.0")
ddb_v1_2 = Dialect.get_or_raise("duckdb, foo=bar, version=1.0")
ddb_v1_2 = Dialect.get_or_raise(
"duckdb, normalization_strategy=case_sensitive, version=1.0"
)
ddb_v2 = Dialect.get_or_raise("duckdb, version=2.2.4")
ddb_latest = Dialect.get_or_raise("duckdb")

View file

@ -9,6 +9,7 @@ class TestDuckDB(Validator):
dialect = "duckdb"
def test_duckdb(self):
self.validate_identity("SELECT * FROM my_ducklake.demo AT (VERSION => 2)")
self.validate_identity("SELECT UUIDV7()")
self.validate_identity("SELECT TRY(LOG(0))")
self.validate_identity("x::timestamp", "CAST(x AS TIMESTAMP)")

View file

@ -2667,3 +2667,25 @@ SINGLE = TRUE""",
for node in (max_by, min_by):
self.assertEqual(len(node.this.expressions), 1)
self.assertIsInstance(node.expression, exp.Column)
def test_create_view_copy_grants(self):
# for normal views, 'COPY GRANTS' goes *after* the column list. ref: https://docs.snowflake.com/en/sql-reference/sql/create-view#syntax
self.validate_identity(
"CREATE OR REPLACE VIEW FOO (A, B) COPY GRANTS AS SELECT A, B FROM TBL"
)
# for materialized views, 'COPY GRANTS' must go *before* the column list or an error will be thrown. ref: https://docs.snowflake.com/en/sql-reference/sql/create-materialized-view#syntax
self.validate_identity(
"CREATE OR REPLACE MATERIALIZED VIEW FOO COPY GRANTS (A, B) AS SELECT A, B FROM TBL"
)
# check that only 'COPY GRANTS' goes before the column list and other properties still go after
self.validate_identity(
"CREATE OR REPLACE MATERIALIZED VIEW FOO COPY GRANTS (A, B) COMMENT='foo' TAG (a='b') AS SELECT A, B FROM TBL"
)
# no COPY GRANTS
self.validate_identity("CREATE OR REPLACE VIEW FOO (A, B) AS SELECT A, B FROM TBL")
self.validate_identity(
"CREATE OR REPLACE MATERIALIZED VIEW FOO (A, B) AS SELECT A, B FROM TBL"
)

View file

@ -1318,6 +1318,7 @@ WHERE
)
def test_isnull(self):
self.validate_identity("ISNULL(x, y)")
self.validate_all("ISNULL(x, y)", write={"spark": "COALESCE(x, y)"})
def test_json(self):

View file

@ -28,6 +28,21 @@ TIME;
TIME_SUB(CAST('09:05:03' AS TIME), INTERVAL 2 HOUR);
TIME;
SORT_ARRAY(ARRAY(tbl.str_col));
ARRAY<STRING>;
SORT_ARRAY(ARRAY(tbl.double_col));
ARRAY<DOUBLE>;
SORT_ARRAY(ARRAY(tbl.bigint_col));
ARRAY<BIGINT>;
tbl.bigint || tbl.str_col;
VARCHAR;
tbl.str_col || tbl.bigint;
VARCHAR;
--------------------------------------
-- Spark2 / Spark3 / Databricks
--------------------------------------

View file

@ -86,3 +86,9 @@ TIME;
# dialect: bigquery
EXTRACT(day from x);
INT;
CASE WHEN x THEN CAST(y AS DECIMAL(18, 2)) ELSE NULL END;
DECIMAL(18,2);
CASE WHEN x THEN NULL ELSE CAST(y AS DECIMAL(18, 2)) END;
DECIMAL(18,2);

View file

@ -693,14 +693,14 @@ PIVOT(MAX("SOURCE"."VALUE") FOR "SOURCE"."KEY" IN ('a', 'b', 'c')) AS "FINAL"("I
# dialect: snowflake
SELECT * FROM m_sales AS m_sales(empid, dept, jan, feb) UNPIVOT(sales FOR month IN (jan, feb)) ORDER BY empid;
SELECT
"_Q_0"."EMPID" AS "EMPID",
"_Q_0"."DEPT" AS "DEPT",
"_Q_0"."MONTH" AS "MONTH",
"_Q_0"."SALES" AS "SALES"
"M_SALES"."EMPID" AS "EMPID",
"M_SALES"."DEPT" AS "DEPT",
"M_SALES"."MONTH" AS "MONTH",
"M_SALES"."SALES" AS "SALES"
FROM "M_SALES" AS "M_SALES"("EMPID", "DEPT", "JAN", "FEB")
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_Q_0"
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "M_SALES"
ORDER BY
"_Q_0"."EMPID";
"M_SALES"."EMPID";
# title: unpivoted table source, unpivot has column aliases
# execute: false
@ -747,28 +747,28 @@ ORDER BY
# note: the named columns aren not supported by BQ but we add them here to avoid defining a schema
SELECT * FROM produce AS produce(product, q1, q2, q3, q4) UNPIVOT(sales FOR quarter IN (q1, q2, q3, q4));
SELECT
`_q_0`.`product` AS `product`,
`_q_0`.`quarter` AS `quarter`,
`_q_0`.`sales` AS `sales`
`produce`.`product` AS `product`,
`produce`.`quarter` AS `quarter`,
`produce`.`sales` AS `sales`
FROM `produce` AS `produce`
UNPIVOT(`sales` FOR `quarter` IN (`produce`.`q1`, `produce`.`q2`, `produce`.`q3`, `produce`.`q4`)) AS `_q_0`;
UNPIVOT(`sales` FOR `quarter` IN (`produce`.`q1`, `produce`.`q2`, `produce`.`q3`, `produce`.`q4`)) AS `produce`;
# title: unpivoted table source with multiple value columns
# execute: false
# dialect: bigquery
SELECT * FROM produce AS produce(product, q1, q2, q3, q4) UNPIVOT((first_half_sales, second_half_sales) FOR semesters IN ((Q1, Q2) AS 'semester_1', (Q3, Q4) AS 'semester_2'));
SELECT
`_q_0`.`product` AS `product`,
`_q_0`.`semesters` AS `semesters`,
`_q_0`.`first_half_sales` AS `first_half_sales`,
`_q_0`.`second_half_sales` AS `second_half_sales`
`produce`.`product` AS `product`,
`produce`.`semesters` AS `semesters`,
`produce`.`first_half_sales` AS `first_half_sales`,
`produce`.`second_half_sales` AS `second_half_sales`
FROM `produce` AS `produce`
UNPIVOT((`first_half_sales`, `second_half_sales`) FOR
`semesters` IN (
(`produce`.`q1`, `produce`.`q2`) AS 'semester_1',
(`produce`.`q3`, `produce`.`q4`) AS 'semester_2'
)
) AS `_q_0`;
) AS `produce`;
# title: quoting is preserved
# dialect: snowflake

View file

@ -11105,11 +11105,9 @@ LEFT JOIN "ws" AS "ws"
AND "ss"."ss_item_sk" = "ws"."ws_item_sk"
AND "ss"."ss_sold_year" = "ws"."ws_sold_year"
WHERE
"cs"."cs_qty" > 0
AND "ss"."ss_sold_year" = 1999
AND "ws"."ws_qty" > 0
AND NOT "cs"."cs_qty" IS NULL
AND NOT "ws"."ws_qty" IS NULL
"ss"."ss_sold_year" = 1999
AND COALESCE("cs"."cs_qty", 0) > 0
AND COALESCE("ws"."ws_qty", 0) > 0
ORDER BY
"ss_item_sk",
"ss"."ss_qty" DESC,

View file

@ -51,7 +51,9 @@ def normalize(expression, **kwargs):
def simplify(expression, **kwargs):
return optimizer.simplify.simplify(expression, constant_propagation=True, **kwargs)
return optimizer.simplify.simplify(
expression, constant_propagation=True, coalesce_simplification=True, **kwargs
)
def annotate_functions(expression, **kwargs):

View file

@ -985,6 +985,9 @@ class TestParser(unittest.TestCase):
{"line": 1, "col": 81, "start": 69, "end": 80},
)
ast = parse_one("SELECT FOO()")
self.assertEqual(ast.find(exp.Anonymous).meta, {"line": 1, "col": 10, "start": 7, "end": 9})
def test_quoted_identifier_meta(self):
sql = 'SELECT "a" FROM "test_schema"."test_table_a"'
ast = parse_one(sql)