1
0
Fork 0

Merging upstream version 26.19.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-05-24 07:15:28 +02:00
parent 58527c3d26
commit a99682f526
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
98 changed files with 67345 additions and 65319 deletions

View file

@ -16,6 +16,7 @@ from sqlglot.helper import logger as helper_logger
from sqlglot.parser import logger as parser_logger
from tests.dialects.test_dialect import Validator
from sqlglot.optimizer.annotate_types import annotate_types
from sqlglot.optimizer.qualify import qualify
class TestBigQuery(Validator):
@ -23,67 +24,6 @@ class TestBigQuery(Validator):
maxDiff = None
def test_bigquery(self):
self.validate_all(
"EXTRACT(HOUR FROM DATETIME(2008, 12, 25, 15, 30, 00))",
write={
"bigquery": "EXTRACT(HOUR FROM DATETIME(2008, 12, 25, 15, 30, 00))",
"duckdb": "EXTRACT(HOUR FROM MAKE_TIMESTAMP(2008, 12, 25, 15, 30, 00))",
"snowflake": "DATE_PART(HOUR, TIMESTAMP_FROM_PARTS(2008, 12, 25, 15, 30, 00))",
},
)
self.validate_identity(
"""CREATE TEMPORARY FUNCTION FOO()
RETURNS STRING
LANGUAGE js AS
'return "Hello world!"'""",
pretty=True,
)
self.validate_identity(
"[a, a(1, 2,3,4444444444444444, tttttaoeunthaoentuhaoentuheoantu, toheuntaoheutnahoeunteoahuntaoeh), b(3, 4,5), c, d, tttttttttttttttteeeeeeeeeeeeeett, 12312312312]",
"""[
a,
a(
1,
2,
3,
4444444444444444,
tttttaoeunthaoentuhaoentuheoantu,
toheuntaoheutnahoeunteoahuntaoeh
),
b(3, 4, 5),
c,
d,
tttttttttttttttteeeeeeeeeeeeeett,
12312312312
]""",
pretty=True,
)
self.validate_all(
"SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 as a, 'abc' AS b), STRUCT(str_col AS abc)",
write={
"bigquery": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"duckdb": "SELECT {'_0': 1, '_1': 2, '_2': 3}, {}, {'_0': 'abc'}, {'_0': 1, '_1': t.str_col}, {'a': 1, 'b': 'abc'}, {'abc': str_col}",
"hive": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1, 'abc'), STRUCT(str_col)",
"spark2": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"spark": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"snowflake": "SELECT OBJECT_CONSTRUCT('_0', 1, '_1', 2, '_2', 3), OBJECT_CONSTRUCT(), OBJECT_CONSTRUCT('_0', 'abc'), OBJECT_CONSTRUCT('_0', 1, '_1', t.str_col), OBJECT_CONSTRUCT('a', 1, 'b', 'abc'), OBJECT_CONSTRUCT('abc', str_col)",
# fallback to unnamed without type inference
"trino": "SELECT ROW(1, 2, 3), ROW(), ROW('abc'), ROW(1, t.str_col), CAST(ROW(1, 'abc') AS ROW(a INTEGER, b VARCHAR)), ROW(str_col)",
},
)
self.validate_all(
"PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E6S%z', x)",
write={
"bigquery": "PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E6S%z', x)",
"duckdb": "STRPTIME(x, '%Y-%m-%dT%H:%M:%S.%f%z')",
},
)
self.validate_identity(
"PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E*S%z', x)",
"PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E*S%z', x)",
)
for prefix in ("c.db.", "db.", ""):
with self.subTest(f"Parsing {prefix}INFORMATION_SCHEMA.X into a Table"):
table = self.parse_one(f"`{prefix}INFORMATION_SCHEMA.X`", into=exp.Table)
@ -115,6 +55,7 @@ LANGUAGE js AS
select_with_quoted_udf = self.validate_identity("SELECT `p.d.UdF`(data) FROM `p.d.t`")
self.assertEqual(select_with_quoted_udf.selects[0].name, "p.d.UdF")
self.validate_identity("PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E*S%z', x)")
self.validate_identity("SELECT ARRAY_CONCAT([1])")
self.validate_identity("SELECT * FROM READ_CSV('bla.csv')")
self.validate_identity("CAST(x AS STRUCT<list ARRAY<INT64>>)")
@ -320,7 +261,80 @@ LANGUAGE js AS
"SELECT CAST(1 AS BYTEINT)",
"SELECT CAST(1 AS INT64)",
)
self.validate_identity(
"""CREATE TEMPORARY FUNCTION FOO()
RETURNS STRING
LANGUAGE js AS
'return "Hello world!"'""",
pretty=True,
)
self.validate_identity(
"[a, a(1, 2,3,4444444444444444, tttttaoeunthaoentuhaoentuheoantu, toheuntaoheutnahoeunteoahuntaoeh), b(3, 4,5), c, d, tttttttttttttttteeeeeeeeeeeeeett, 12312312312]",
"""[
a,
a(
1,
2,
3,
4444444444444444,
tttttaoeunthaoentuhaoentuheoantu,
toheuntaoheutnahoeunteoahuntaoeh
),
b(3, 4, 5),
c,
d,
tttttttttttttttteeeeeeeeeeeeeett,
12312312312
]""",
pretty=True,
)
self.validate_all(
"SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
write={
"bigquery": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"clickhouse": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"databricks": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"duckdb": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"mysql": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"oracle": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"postgres": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"presto": "SELECT purchases, LAST_VALUE(item) OVER (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce",
"redshift": "SELECT purchases, LAST_VALUE(item) OVER (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce",
"snowflake": "SELECT purchases, LAST_VALUE(item) OVER (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce",
"spark": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"trino": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases NULLS FIRST ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
"tsql": "SELECT purchases, LAST_VALUE(item) OVER item_window AS most_popular FROM Produce WINDOW item_window AS (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING)",
},
)
self.validate_all(
"EXTRACT(HOUR FROM DATETIME(2008, 12, 25, 15, 30, 00))",
write={
"bigquery": "EXTRACT(HOUR FROM DATETIME(2008, 12, 25, 15, 30, 00))",
"duckdb": "EXTRACT(HOUR FROM MAKE_TIMESTAMP(2008, 12, 25, 15, 30, 00))",
"snowflake": "DATE_PART(HOUR, TIMESTAMP_FROM_PARTS(2008, 12, 25, 15, 30, 00))",
},
)
self.validate_all(
"SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 as a, 'abc' AS b), STRUCT(str_col AS abc)",
write={
"bigquery": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"duckdb": "SELECT {'_0': 1, '_1': 2, '_2': 3}, {}, {'_0': 'abc'}, {'_0': 1, '_1': t.str_col}, {'a': 1, 'b': 'abc'}, {'abc': str_col}",
"hive": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1, 'abc'), STRUCT(str_col)",
"spark2": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"spark": "SELECT STRUCT(1, 2, 3), STRUCT(), STRUCT('abc'), STRUCT(1, t.str_col), STRUCT(1 AS a, 'abc' AS b), STRUCT(str_col AS abc)",
"snowflake": "SELECT OBJECT_CONSTRUCT('_0', 1, '_1', 2, '_2', 3), OBJECT_CONSTRUCT(), OBJECT_CONSTRUCT('_0', 'abc'), OBJECT_CONSTRUCT('_0', 1, '_1', t.str_col), OBJECT_CONSTRUCT('a', 1, 'b', 'abc'), OBJECT_CONSTRUCT('abc', str_col)",
# fallback to unnamed without type inference
"trino": "SELECT ROW(1, 2, 3), ROW(), ROW('abc'), ROW(1, t.str_col), CAST(ROW(1, 'abc') AS ROW(a INTEGER, b VARCHAR)), ROW(str_col)",
},
)
self.validate_all(
"PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E6S%z', x)",
write={
"bigquery": "PARSE_TIMESTAMP('%Y-%m-%dT%H:%M:%E6S%z', x)",
"duckdb": "STRPTIME(x, '%Y-%m-%dT%H:%M:%S.%f%z')",
},
)
self.validate_all(
"SELECT DATE_SUB(CURRENT_DATE(), INTERVAL 2 DAY)",
write={
@ -1684,6 +1698,39 @@ WHERE
"EXPORT DATA WITH CONNECTION myproject.us.myconnection OPTIONS (URI='gs://path*.csv.gz', FORMAT='CSV') AS SELECT * FROM all_rows"
)
self.validate_all(
"SELECT * FROM t1, UNNEST(`t1`) AS `col`",
read={
"duckdb": 'SELECT * FROM t1, UNNEST("t1") "t1" ("col")',
},
write={
"bigquery": "SELECT * FROM t1, UNNEST(`t1`) AS `col`",
"redshift": 'SELECT * FROM t1, "t1" AS "col"',
},
)
self.validate_all(
"SELECT * FROM t, UNNEST(`t2`.`t3`) AS `col`",
read={
"duckdb": 'SELECT * FROM t, UNNEST("t1"."t2"."t3") "t1" ("col")',
},
write={
"bigquery": "SELECT * FROM t, UNNEST(`t2`.`t3`) AS `col`",
"redshift": 'SELECT * FROM t, "t2"."t3" AS "col"',
},
)
self.validate_all(
"SELECT * FROM t1, UNNEST(`t1`.`t2`.`t3`.`t4`) AS `col`",
read={
"duckdb": 'SELECT * FROM t1, UNNEST("t1"."t2"."t3"."t4") "t3" ("col")',
},
write={
"bigquery": "SELECT * FROM t1, UNNEST(`t1`.`t2`.`t3`.`t4`) AS `col`",
"redshift": 'SELECT * FROM t1, "t1"."t2"."t3"."t4" AS "col"',
},
)
def test_errors(self):
with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")
@ -2489,3 +2536,20 @@ OPTIONS (
information_schema_sql[table_meta["start"] : table_meta["end"] + 1]
== "`region.INFORMATION_SCHEMA.COLUMNS`"
)
def test_override_normalization_strategy(self):
sql = "SELECT * FROM p.d.t"
ast = self.parse_one(sql)
qualified = qualify(ast.copy(), dialect="bigquery,normalization_strategy=uppercase")
self.assertEqual(qualified.sql("bigquery"), "SELECT * FROM `P`.`D`.`T` AS `T`")
from sqlglot.dialects import BigQuery
from sqlglot.dialects.dialect import NormalizationStrategy
try:
BigQuery.NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
qualified = qualify(ast.copy(), dialect="bigquery,normalization_strategy=uppercase")
self.assertEqual(qualified.sql("bigquery"), "SELECT * FROM `P`.`D`.`T` AS `T`")
finally:
BigQuery.NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE

View file

@ -110,6 +110,7 @@ class TestClickhouse(Validator):
self.validate_identity("TRUNCATE DATABASE db")
self.validate_identity("TRUNCATE DATABASE db ON CLUSTER test_cluster")
self.validate_identity("TRUNCATE DATABASE db ON CLUSTER '{cluster}'")
self.validate_identity("EXCHANGE TABLES x.a AND y.b", check_command_warning=True)
self.validate_identity(
"SELECT DATE_BIN(toDateTime('2023-01-01 14:45:00'), INTERVAL '1' MINUTE, toDateTime('2023-01-01 14:35:30'), 'UTC')",
)

View file

@ -162,6 +162,12 @@ class TestDatabricks(Validator):
},
)
for option in ("", " (foo)", " MATCH FULL", " NOT ENFORCED"):
with self.subTest(f"Databricks foreign key REFERENCES option: {option}."):
self.validate_identity(
f"CREATE TABLE t1 (foo BIGINT NOT NULL CONSTRAINT foo_c FOREIGN KEY REFERENCES t2{option})"
)
# https://docs.databricks.com/sql/language-manual/functions/colonsign.html
def test_json(self):
self.validate_identity("SELECT c1:price, c1:price.foo, c1:price.bar[1]")

View file

@ -326,6 +326,46 @@ class TestDialect(Validator):
"doris": "CAST(a AS VARCHAR(3))",
},
)
self.validate_all(
"CAST(a AS CHARACTER VARYING)",
write={
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR)",
"duckdb": "CAST(a AS TEXT)",
"materialize": "CAST(a AS VARCHAR)",
"mysql": "CAST(a AS CHAR)",
"hive": "CAST(a AS STRING)",
"oracle": "CAST(a AS VARCHAR2)",
"postgres": "CAST(a AS VARCHAR)",
"presto": "CAST(a AS VARCHAR)",
"redshift": "CAST(a AS VARCHAR)",
"snowflake": "CAST(a AS VARCHAR)",
"spark": "CAST(a AS STRING)",
"starrocks": "CAST(a AS VARCHAR)",
"tsql": "CAST(a AS VARCHAR)",
"doris": "CAST(a AS VARCHAR)",
},
)
self.validate_all(
"CAST(a AS CHARACTER VARYING(3))",
write={
"bigquery": "CAST(a AS STRING)",
"drill": "CAST(a AS VARCHAR(3))",
"duckdb": "CAST(a AS TEXT(3))",
"materialize": "CAST(a AS VARCHAR(3))",
"mysql": "CAST(a AS CHAR(3))",
"hive": "CAST(a AS VARCHAR(3))",
"oracle": "CAST(a AS VARCHAR2(3))",
"postgres": "CAST(a AS VARCHAR(3))",
"presto": "CAST(a AS VARCHAR(3))",
"redshift": "CAST(a AS VARCHAR(3))",
"snowflake": "CAST(a AS VARCHAR(3))",
"spark": "CAST(a AS VARCHAR(3))",
"starrocks": "CAST(a AS VARCHAR(3))",
"tsql": "CAST(a AS VARCHAR(3))",
"doris": "CAST(a AS VARCHAR(3))",
},
)
self.validate_all(
"CAST(a AS SMALLINT)",
write={
@ -2709,6 +2749,35 @@ SELECT
},
)
def test_window_exclude(self):
for option in ("CURRENT ROW", "TIES", "GROUP"):
self.validate_all(
f"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE {option})",
write={
"duckdb": f"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE {option})",
"postgres": f"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE {option})",
"sqlite": f"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE {option})",
"oracle": f"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE {option})",
},
)
# EXCLUDE NO OTHERS is the default behaviour
self.validate_all(
"SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)",
read={
"duckdb": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE NO OTHERS)",
"postgres": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE NO OTHERS)",
"sqlite": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE NO OTHERS)",
"oracle": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW EXCLUDE NO OTHERS)",
},
write={
"duckdb": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)",
"postgres": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)",
"sqlite": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)",
"oracle": "SELECT SUM(X) OVER (PARTITION BY x RANGE BETWEEN 1 PRECEDING AND CURRENT ROW)",
},
)
def test_nested_ctes(self):
self.validate_all(
"SELECT * FROM (WITH t AS (SELECT 1 AS c) SELECT c FROM t) AS subq",

View file

@ -6,12 +6,14 @@ class TestDruid(Validator):
dialect = "druid"
def test_druid(self):
self.validate_identity("SELECT MOD(1000, 60)")
self.validate_identity("SELECT CEIL(__time TO WEEK) FROM t")
self.validate_identity("SELECT CEIL(col) FROM t")
self.validate_identity("SELECT CEIL(price, 2) AS rounded_price FROM t")
self.validate_identity("SELECT FLOOR(__time TO WEEK) FROM t")
self.validate_identity("SELECT FLOOR(col) FROM t")
self.validate_identity("SELECT FLOOR(price, 2) AS rounded_price FROM t")
self.validate_identity("SELECT CURRENT_TIMESTAMP")
# validate across all dialects
write = {dialect.value: "FLOOR(__time TO WEEK)" for dialect in Dialects}

View file

@ -9,6 +9,8 @@ class TestDuckDB(Validator):
dialect = "duckdb"
def test_duckdb(self):
self.validate_identity("SELECT UUIDV7()")
self.validate_identity("SELECT TRY(LOG(0))")
self.validate_identity("x::timestamp", "CAST(x AS TIMESTAMP)")
self.validate_identity("x::timestamp without time zone", "CAST(x AS TIMESTAMP)")
self.validate_identity("x::timestamp with time zone", "CAST(x AS TIMESTAMPTZ)")

View file

@ -1,4 +1,4 @@
from sqlglot import exp, UnsupportedError
from sqlglot import exp, UnsupportedError, ParseError, parse_one
from tests.dialects.test_dialect import Validator
@ -47,6 +47,9 @@ class TestOracle(Validator):
self.validate_identity("SELECT * FROM V$SESSION")
self.validate_identity("SELECT TO_DATE('January 15, 1989, 11:00 A.M.')")
self.validate_identity("SELECT INSTR(haystack, needle)")
self.validate_identity(
"SELECT * FROM consumer LEFT JOIN groceries ON consumer.groceries_id = consumer.id PIVOT(MAX(type_id) FOR consumer_type IN (1, 2, 3, 4))"
)
self.validate_identity(
"SELECT * FROM test UNPIVOT INCLUDE NULLS (value FOR Description IN (col AS 'PREFIX ' || CHR(38) || ' SUFFIX'))"
)
@ -320,6 +323,7 @@ class TestOracle(Validator):
},
)
self.validate_identity("CREATE OR REPLACE FORCE VIEW foo1.foo2")
self.validate_identity("TO_TIMESTAMP('foo')")
def test_join_marker(self):
self.validate_identity("SELECT e1.x, e2.x FROM e e1, e e2 WHERE e1.y (+) = e2.y")
@ -522,6 +526,8 @@ FROM JSON_TABLE(res, '$.info[*]' COLUMNS(
)) src""",
pretty=True,
)
self.validate_identity("CONVERT('foo', 'dst')")
self.validate_identity("CONVERT('foo', 'dst', 'src')")
def test_connect_by(self):
start = "START WITH last_name = 'King'"
@ -702,3 +708,11 @@ CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 4"""
self.validate_identity(
"ANALYZE TABLE tbl VALIDATE STRUCTURE CASCADE COMPLETE OFFLINE INTO db.tbl"
)
def test_prior(self):
self.validate_identity(
"SELECT id, PRIOR name AS parent_name, name FROM tree CONNECT BY NOCYCLE PRIOR id = parent_id"
)
with self.assertRaises(ParseError):
parse_one("PRIOR as foo", read="oracle")

View file

@ -1055,6 +1055,22 @@ class TestSnowflake(Validator):
},
)
with self.assertRaises(ParseError):
parse_one(
"SELECT id, PRIOR name AS parent_name, name FROM tree CONNECT BY NOCYCLE PRIOR id = parent_id",
dialect="snowflake",
)
self.validate_all(
"SELECT CAST(1 AS DOUBLE), CAST(1 AS DOUBLE)",
read={
"bigquery": "SELECT CAST(1 AS BIGDECIMAL), CAST(1 AS BIGNUMERIC)",
},
write={
"snowflake": "SELECT CAST(1 AS DOUBLE), CAST(1 AS DOUBLE)",
},
)
def test_null_treatment(self):
self.validate_all(
r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1",

View file

@ -6,6 +6,8 @@ class TestStarrocks(Validator):
dialect = "starrocks"
def test_starrocks(self):
self.validate_identity("SELECT ARRAY_JOIN([1, 3, 5, NULL], '_', 'NULL')")
self.validate_identity("SELECT ARRAY_JOIN([1, 3, 5, NULL], '_')")
self.validate_identity("ALTER TABLE a SWAP WITH b")
def test_ddl(self):
@ -36,6 +38,9 @@ class TestStarrocks(Validator):
self.validate_identity(
"CREATE TABLE foo (col1 LARGEINT) DISTRIBUTED BY HASH (col1) BUCKETS 1"
)
self.validate_identity(
"CREATE VIEW foo (foo_col1) SECURITY NONE AS SELECT bar_col1 FROM bar"
)
def test_identity(self):
self.validate_identity("SELECT CAST(`a`.`b` AS INT) FROM foo")

View file

@ -41,13 +41,8 @@ class TestTeradata(Validator):
).assert_is(exp.Command)
def test_translate(self):
self.validate_all(
"TRANSLATE(x USING LATIN_TO_UNICODE)",
write={
"teradata": "CAST(x AS CHAR CHARACTER SET UNICODE)",
},
)
self.validate_identity("CAST(x AS CHAR CHARACTER SET UNICODE)")
self.validate_identity("TRANSLATE(x USING LATIN_TO_UNICODE)")
self.validate_identity("TRANSLATE(x USING LATIN_TO_UNICODE WITH ERROR)")
def test_update(self):
self.validate_all(

View file

@ -1,7 +1,7 @@
from sqlglot import exp, parse, parse_one
from tests.dialects.test_dialect import Validator
from sqlglot.errors import ParseError, UnsupportedError
from sqlglot.optimizer.annotate_types import annotate_types
from tests.dialects.test_dialect import Validator
class TestTSQL(Validator):
@ -962,7 +962,8 @@ FROM (
ProductID
) AS src(ProductID, OrderQty)
ON pi.ProductID = src.ProductID
WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET pi.Quantity = pi.Quantity - src.OrderQty
WHEN MATCHED AND pi.Quantity - src.OrderQty >= 0 THEN UPDATE SET
pi.Quantity = pi.Quantity - src.OrderQty
WHEN MATCHED AND pi.Quantity - src.OrderQty <= 0 THEN DELETE
OUTPUT $action, Inserted.ProductID, Inserted.LocationID, Inserted.Quantity AS NewQty, Deleted.Quantity AS PreviousQty
) AS Changes(Action, ProductID, LocationID, NewQty, PreviousQty)
@ -2242,3 +2243,8 @@ FROM OPENJSON(@json) WITH (
"tsql": "SELECT DATETRUNC(YEAR, CAST('foo1' AS DATE))",
},
)
def test_collation_parse(self):
self.validate_identity("ALTER TABLE a ALTER COLUMN b CHAR(10) COLLATE abc").assert_is(
exp.Alter
).args.get("actions")[0].args.get("collate").this.assert_is(exp.Var)

View file

@ -100,6 +100,117 @@ STRING;
RPAD(tbl.str_col, 1, tbl.str_col);
STRING;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.double_col, tbl.bigint_col);
DOUBLE;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.bigint_col, tbl.double_col);
DOUBLE;
# dialect: hive, spark2, spark
IF(cond, tbl.double_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
IF(cond, tbl.str_col, tbl.double_col);
STRING;
# dialect: databricks
IF(cond, tbl.str_col, tbl.double_col);
DOUBLE;
# dialect: databricks
IF(cond, tbl.double_col, tbl.str_col);
DOUBLE;
# dialect: hive, spark2, spark
IF(cond, tbl.date_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
IF(cond, tbl.str_col, tbl.date_col);
STRING;
# dialect: databricks
IF(cond, tbl.date_col, tbl.str_col);
DATE;
# dialect: databricks
IF(cond, tbl.str_col, tbl.date_col);
DATE;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.date_col, tbl.timestamp_col);
TIMESTAMP;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.timestamp_col, tbl.date_col);
TIMESTAMP;
# dialect: hive, spark2, spark, databricks
IF(cond, NULL, tbl.str_col);
STRING;
# dialect: hive, spark2, spark, databricks
IF(cond, tbl.str_col, NULL);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.str_col, tbl.date_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.str_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.bigint_col, tbl.str_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.str_col, tbl.date_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, tbl.str_col, tbl.bigint_col);
STRING;
# dialect: hive, spark2, spark
COALESCE(tbl.date_col, NULL, tbl.bigint_col, tbl.str_col);
STRING;
# dialect: databricks
COALESCE(tbl.str_col, tbl.bigint_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bigint_col, tbl.str_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.str_col, NULL, tbl.bigint_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bigint_col, NULL, tbl.str_col);
BIGINT;
# dialect: databricks
COALESCE(tbl.bool_col, tbl.str_col);
BOOLEAN;
# dialect: hive, spark2, spark
COALESCE(tbl.interval_col, tbl.str_col);
STRING;
# dialect: databricks
COALESCE(tbl.interval_col, tbl.str_col);
INTERVAL;
# dialect: databricks
COALESCE(tbl.bin_col, tbl.str_col);
BINARY;
--------------------------------------
-- BigQuery
@ -205,6 +316,14 @@ STRING;
CONCAT(tbl.bin_col, tbl.bin_col);
BINARY;
# dialect: bigquery
CONCAT(0, tbl.str_col);
STRING;
# dialect: bigquery
CONCAT(tbl.str_col, 0);
STRING;
# dialect: bigquery
LEFT(tbl.str_col, 1);
STRING;

View file

@ -329,6 +329,11 @@ FROM
t1;
SELECT x.a AS a, x.b AS b, ROW_NUMBER() OVER (PARTITION BY x.a ORDER BY x.a) AS row_num FROM x AS x ORDER BY x.a, x.b, row_num;
# title: Keep ORDER BY
# execute: false
WITH t AS (SELECT t1.x AS x, t1.y AS y, t2.a AS a, t2.b AS b FROM t1 AS t1(x, y) CROSS JOIN t2 AS t2(a, b) ORDER BY t2.a) SELECT t.x AS x, t.y AS y, t.a AS a, t.b AS b FROM t AS t;
SELECT t1.x AS x, t1.y AS y, t2.a AS a, t2.b AS b FROM t1 AS t1(x, y) CROSS JOIN t2 AS t2(a, b) ORDER BY t2.a;
# title: Don't merge window functions, inner table is aliased in outer query
with t1 as (
SELECT

View file

@ -449,3 +449,11 @@ SELECT
FROM foo
WHERE
1 = 1 AND /* first comment */ foo.a /* second comment */ = 1;
MERGE INTO t USING s ON t.id = s.id WHEN MATCHED THEN UPDATE SET status = s.status, amount = s.amount;
MERGE INTO t
USING s
ON t.id = s.id
WHEN MATCHED THEN UPDATE SET
status = s.status,
amount = s.amount;

View file

@ -885,3 +885,13 @@ class TestExecutor(unittest.TestCase):
"avg_bill_length",
"avg_bill_depth",
]
def test_table_to_pylist(self):
columns = ["id", "product", "price"]
rows = [[1, "Shirt", 20.0], [2, "Shoes", 60.0]]
table = Table(columns=columns, rows=rows)
expected = [
{"id": 1, "product": "Shirt", "price": 20.0},
{"id": 2, "product": "Shoes", "price": 60.0},
]
self.assertEqual(table.to_pylist(), expected)

View file

@ -891,7 +891,17 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|')
def test_annotate_funcs(self):
test_schema = {
"tbl": {"bin_col": "BINARY", "str_col": "STRING", "bignum_col": "BIGNUMERIC"}
"tbl": {
"bin_col": "BINARY",
"str_col": "STRING",
"bignum_col": "BIGNUMERIC",
"date_col": "DATE",
"timestamp_col": "TIMESTAMP",
"double_col": "DOUBLE",
"bigint_col": "BIGINT",
"bool_col": "BOOLEAN",
"interval_col": "INTERVAL",
}
}
for i, (meta, sql, expected) in enumerate(
@ -1552,3 +1562,19 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|')
self.assertEqual(4, normalization_distance(gen_expr(2), max_=100))
self.assertEqual(18, normalization_distance(gen_expr(3), max_=100))
self.assertEqual(110, normalization_distance(gen_expr(10), max_=100))
def test_manually_annotate_snowflake(self):
dialect = "snowflake"
schema = {
"SCHEMA": {
"TBL": {"COL": "INT", "col2": "VARCHAR"},
}
}
example_query = 'SELECT * FROM "SCHEMA"."TBL"'
expression = parse_one(example_query, dialect=dialect)
qual = optimizer.qualify.qualify(expression, schema=schema, dialect=dialect)
annotated = optimizer.annotate_types.annotate_types(qual, schema=schema, dialect=dialect)
self.assertTrue(annotated.selects[0].is_type("INT"))
self.assertTrue(annotated.selects[1].is_type("VARCHAR"))

View file

@ -67,9 +67,19 @@ x"""
tokens = Tokenizer().tokenize("SELECT\r\n 1,\r\n 2")
self.assertEqual(tokens[0].line, 1)
self.assertEqual(tokens[0].col, 6)
self.assertEqual(tokens[1].line, 2)
self.assertEqual(tokens[1].col, 3)
self.assertEqual(tokens[2].line, 2)
self.assertEqual(tokens[2].col, 4)
self.assertEqual(tokens[3].line, 3)
self.assertEqual(tokens[3].col, 3)
tokens = Tokenizer().tokenize(" SELECT\n 100")
self.assertEqual(tokens[0].line, 1)
self.assertEqual(tokens[0].col, 8)
self.assertEqual(tokens[1].line, 2)
self.assertEqual(tokens[1].col, 7)
def test_crlf(self):
tokens = Tokenizer().tokenize("SELECT a\r\nFROM b")

View file

@ -5,6 +5,7 @@ from sqlglot.transforms import (
eliminate_distinct_on,
eliminate_join_marks,
eliminate_qualify,
eliminate_window_clause,
remove_precision_parameterized_types,
unalias_group,
)
@ -272,3 +273,15 @@ class TestTransforms(unittest.TestCase):
tree.sql(dialect=dialect)
== "SELECT a.id FROM a LEFT JOIN b ON a.id = b.id AND b.d = const"
)
def test_eliminate_window_clause(self):
self.validate(
eliminate_window_clause,
"SELECT purchases, LAST_VALUE(item) OVER (d) AS most_popular FROM Produce WINDOW a AS (PARTITION BY purchases), b AS (a ORDER BY purchases), c AS (b ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING), d AS (c)",
"SELECT purchases, LAST_VALUE(item) OVER (PARTITION BY purchases ORDER BY purchases ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS most_popular FROM Produce",
)
self.validate(
eliminate_window_clause,
"SELECT LAST_VALUE(c) OVER (a) AS c2 FROM (SELECT LAST_VALUE(i) OVER (a) AS c FROM p WINDOW a AS (PARTITION BY x)) AS q(c) WINDOW a AS (PARTITION BY y)",
"SELECT LAST_VALUE(c) OVER (PARTITION BY y) AS c2 FROM (SELECT LAST_VALUE(i) OVER (PARTITION BY x) AS c FROM p) AS q(c)",
)