Adding upstream version 26.2.1.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
d908bee480
commit
7ee28625fb
85 changed files with 57142 additions and 52288 deletions
|
@ -291,3 +291,15 @@ class TestDatabricks(Validator):
|
|||
self.validate_identity("GRANT SELECT ON TABLE sample_data TO `alf@melmak.et`")
|
||||
self.validate_identity("GRANT ALL PRIVILEGES ON TABLE forecasts TO finance")
|
||||
self.validate_identity("GRANT SELECT ON TABLE t TO `fab9e00e-ca35-11ec-9d64-0242ac120002`")
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE DELTA STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE DELTA STATISTICS FOR ALL COLUMNS")
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE DELTA STATISTICS FOR COLUMNS foo, bar")
|
||||
self.validate_identity("ANALYZE TABLE ctlg.db.tbl COMPUTE DELTA STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLES COMPUTE STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLES FROM db COMPUTE STATISTICS")
|
||||
self.validate_identity("ANALYZE TABLES IN db COMPUTE STATISTICS")
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE ctlg.db.tbl PARTITION(foo = 'foo', bar = 'bar') COMPUTE STATISTICS NOSCAN"
|
||||
)
|
||||
|
|
|
@ -528,7 +528,7 @@ class TestDialect(Validator):
|
|||
"sqlite": "SELECT (NOT x GLOB CAST(x'2a5b5e012d7f5d2a' AS TEXT))",
|
||||
"mysql": "SELECT REGEXP_LIKE(x, '^[[:ascii:]]*$')",
|
||||
"postgres": "SELECT (x ~ '^[[:ascii:]]*$')",
|
||||
"tsql": "SELECT (PATINDEX('%[^' + CHAR(0x00) + '-' + CHAR(0x7f) + ']%' COLLATE Latin1_General_BIN, x) = 0)",
|
||||
"tsql": "SELECT (PATINDEX(CONVERT(VARCHAR(MAX), 0x255b5e002d7f5d25) COLLATE Latin1_General_BIN, x) = 0)",
|
||||
"oracle": "SELECT NVL(REGEXP_LIKE(x, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)",
|
||||
},
|
||||
)
|
||||
|
@ -1686,7 +1686,7 @@ class TestDialect(Validator):
|
|||
write={
|
||||
"drill": "STRPOS(haystack, needle)",
|
||||
"duckdb": "STRPOS(haystack, needle)",
|
||||
"postgres": "POSITION(needle IN haystack)",
|
||||
"postgres": "STRPOS(haystack, needle)",
|
||||
"presto": "STRPOS(haystack, needle)",
|
||||
"spark": "LOCATE(needle, haystack)",
|
||||
"clickhouse": "position(haystack, needle)",
|
||||
|
@ -1699,7 +1699,7 @@ class TestDialect(Validator):
|
|||
write={
|
||||
"drill": "STRPOS(haystack, needle)",
|
||||
"duckdb": "STRPOS(haystack, needle)",
|
||||
"postgres": "POSITION(needle IN haystack)",
|
||||
"postgres": "STRPOS(haystack, needle)",
|
||||
"presto": "STRPOS(haystack, needle)",
|
||||
"bigquery": "STRPOS(haystack, needle)",
|
||||
"spark": "LOCATE(needle, haystack)",
|
||||
|
@ -1711,8 +1711,9 @@ class TestDialect(Validator):
|
|||
self.validate_all(
|
||||
"POSITION(needle, haystack, pos)",
|
||||
write={
|
||||
"drill": "STRPOS(SUBSTR(haystack, pos), needle) + pos - 1",
|
||||
"presto": "STRPOS(SUBSTR(haystack, pos), needle) + pos - 1",
|
||||
"drill": "`IF`(STRPOS(SUBSTR(haystack, pos), needle) = 0, 0, STRPOS(SUBSTR(haystack, pos), needle) + pos - 1)",
|
||||
"presto": "IF(STRPOS(SUBSTR(haystack, pos), needle) = 0, 0, STRPOS(SUBSTR(haystack, pos), needle) + pos - 1)",
|
||||
"postgres": "CASE WHEN STRPOS(SUBSTR(haystack, pos), needle) = 0 THEN 0 ELSE STRPOS(SUBSTR(haystack, pos), needle) + pos - 1 END",
|
||||
"spark": "LOCATE(needle, haystack, pos)",
|
||||
"clickhouse": "position(haystack, needle, pos)",
|
||||
"snowflake": "POSITION(needle, haystack, pos)",
|
||||
|
@ -2335,6 +2336,17 @@ SELECT
|
|||
},
|
||||
)
|
||||
|
||||
# needs to preserve the target alias in then WHEN condition and function but not in the THEN clause
|
||||
self.validate_all(
|
||||
"""MERGE INTO foo AS target USING (SELECT a, b FROM tbl) AS src ON src.a = target.a
|
||||
WHEN MATCHED THEN UPDATE SET target.b = COALESCE(src.b, target.b)
|
||||
WHEN NOT MATCHED THEN INSERT (target.a, target.b) VALUES (src.a, src.b)""",
|
||||
write={
|
||||
"trino": """MERGE INTO foo AS target USING (SELECT a, b FROM tbl) AS src ON src.a = target.a WHEN MATCHED THEN UPDATE SET b = COALESCE(src.b, target.b) WHEN NOT MATCHED THEN INSERT (a, b) VALUES (src.a, src.b)""",
|
||||
"postgres": """MERGE INTO foo AS target USING (SELECT a, b FROM tbl) AS src ON src.a = target.a WHEN MATCHED THEN UPDATE SET b = COALESCE(src.b, target.b) WHEN NOT MATCHED THEN INSERT (a, b) VALUES (src.a, src.b)""",
|
||||
},
|
||||
)
|
||||
|
||||
def test_substring(self):
|
||||
self.validate_all(
|
||||
"SUBSTR('123456', 2, 3)",
|
||||
|
|
|
@ -100,3 +100,8 @@ class TestDoris(Validator):
|
|||
"doris": "SELECT REGEXP(abc, '%foo%')",
|
||||
},
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE tbl")
|
||||
self.validate_identity("ANALYZE DATABASE db")
|
||||
self.validate_identity("ANALYZE TABLE TBL(c1, c2)")
|
||||
|
|
|
@ -19,3 +19,7 @@ class TestDrill(Validator):
|
|||
"mysql": "SELECT '2021-01-01' + INTERVAL '1' MONTH",
|
||||
},
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE STATISTICS")
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE STATISTICS SAMPLE 5 PERCENT")
|
||||
|
|
21
tests/dialects/test_druid.py
Normal file
21
tests/dialects/test_druid.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
from sqlglot.dialects.dialect import Dialects
|
||||
from tests.dialects.test_dialect import Validator
|
||||
|
||||
|
||||
class TestDruid(Validator):
|
||||
dialect = "druid"
|
||||
|
||||
def test_druid(self):
|
||||
self.validate_identity("SELECT CEIL(__time TO WEEK) FROM t")
|
||||
self.validate_identity("SELECT CEIL(col) FROM t")
|
||||
self.validate_identity("SELECT CEIL(price, 2) AS rounded_price FROM t")
|
||||
self.validate_identity("SELECT FLOOR(__time TO WEEK) FROM t")
|
||||
self.validate_identity("SELECT FLOOR(col) FROM t")
|
||||
self.validate_identity("SELECT FLOOR(price, 2) AS rounded_price FROM t")
|
||||
|
||||
# validate across all dialects
|
||||
write = {dialect.value: "FLOOR(__time TO WEEK)" for dialect in Dialects}
|
||||
self.validate_all(
|
||||
"FLOOR(__time TO WEEK)",
|
||||
write=write,
|
||||
)
|
|
@ -1448,3 +1448,6 @@ class TestDuckDB(Validator):
|
|||
"WITH t1 AS (FROM (FROM t2 SELECT foo1, foo2)) FROM t1",
|
||||
"WITH t1 AS (SELECT * FROM (SELECT foo1, foo2 FROM t2)) SELECT * FROM t1",
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE")
|
||||
|
|
|
@ -588,8 +588,8 @@ class TestHive(Validator):
|
|||
self.validate_all(
|
||||
"LOCATE('a', x, 3)",
|
||||
write={
|
||||
"duckdb": "STRPOS(SUBSTR(x, 3), 'a') + 3 - 1",
|
||||
"presto": "STRPOS(SUBSTR(x, 3), 'a') + 3 - 1",
|
||||
"duckdb": "CASE WHEN STRPOS(SUBSTR(x, 3), 'a') = 0 THEN 0 ELSE STRPOS(SUBSTR(x, 3), 'a') + 3 - 1 END",
|
||||
"presto": "IF(STRPOS(SUBSTR(x, 3), 'a') = 0, 0, STRPOS(SUBSTR(x, 3), 'a') + 3 - 1)",
|
||||
"hive": "LOCATE('a', x, 3)",
|
||||
"spark": "LOCATE('a', x, 3)",
|
||||
},
|
||||
|
|
|
@ -1378,3 +1378,13 @@ COMMENT='客户账户表'"""
|
|||
"mysql": "SELECT FORMAT(12332.2, 2, 'de_DE')",
|
||||
},
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE LOCAL TABLE tbl")
|
||||
self.validate_identity("ANALYZE NO_WRITE_TO_BINLOG TABLE tbl")
|
||||
self.validate_identity("ANALYZE tbl UPDATE HISTOGRAM ON col1")
|
||||
self.validate_identity("ANALYZE tbl UPDATE HISTOGRAM ON col1 USING DATA 'json_data'")
|
||||
self.validate_identity("ANALYZE tbl UPDATE HISTOGRAM ON col1 WITH 5 BUCKETS")
|
||||
self.validate_identity("ANALYZE tbl UPDATE HISTOGRAM ON col1 WITH 5 BUCKETS AUTO UPDATE")
|
||||
self.validate_identity("ANALYZE tbl UPDATE HISTOGRAM ON col1 WITH 5 BUCKETS MANUAL UPDATE")
|
||||
self.validate_identity("ANALYZE tbl DROP HISTOGRAM ON col1")
|
||||
|
|
|
@ -654,3 +654,27 @@ WHERE
|
|||
"'W'",
|
||||
):
|
||||
self.validate_identity(f"TRUNC(x, {unit})")
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE tbl")
|
||||
self.validate_identity("ANALYZE INDEX ndx")
|
||||
self.validate_identity("ANALYZE TABLE db.tbl PARTITION(foo = 'foo', bar = 'bar')")
|
||||
self.validate_identity("ANALYZE TABLE db.tbl SUBPARTITION(foo = 'foo', bar = 'bar')")
|
||||
self.validate_identity("ANALYZE INDEX db.ndx PARTITION(foo = 'foo', bar = 'bar')")
|
||||
self.validate_identity("ANALYZE INDEX db.ndx PARTITION(part1)")
|
||||
self.validate_identity("ANALYZE CLUSTER db.cluster")
|
||||
self.validate_identity("ANALYZE TABLE tbl VALIDATE REF UPDATE")
|
||||
self.validate_identity("ANALYZE LIST CHAINED ROWS")
|
||||
self.validate_identity("ANALYZE LIST CHAINED ROWS INTO tbl")
|
||||
self.validate_identity("ANALYZE DELETE STATISTICS")
|
||||
self.validate_identity("ANALYZE DELETE SYSTEM STATISTICS")
|
||||
self.validate_identity("ANALYZE VALIDATE REF UPDATE")
|
||||
self.validate_identity("ANALYZE VALIDATE REF UPDATE SET DANGLING TO NULL")
|
||||
self.validate_identity("ANALYZE VALIDATE STRUCTURE")
|
||||
self.validate_identity("ANALYZE VALIDATE STRUCTURE CASCADE FAST")
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE tbl VALIDATE STRUCTURE CASCADE COMPLETE ONLINE INTO db.tbl"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE tbl VALIDATE STRUCTURE CASCADE COMPLETE OFFLINE INTO db.tbl"
|
||||
)
|
||||
|
|
|
@ -1316,3 +1316,9 @@ CROSS JOIN JSON_ARRAY_ELEMENTS(CAST(JSON_EXTRACT_PATH(tbox, 'boxes') AS JSON)) A
|
|||
self.validate_identity(
|
||||
"SELECT XMLELEMENT(NAME foo, XMLATTRIBUTES('xyz' AS bar), XMLELEMENT(NAME abc), XMLCOMMENT('test'), XMLELEMENT(NAME xyz))"
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TBL")
|
||||
self.validate_identity("ANALYZE TBL(col1, col2)")
|
||||
self.validate_identity("ANALYZE VERBOSE SKIP_LOCKED TBL(col1, col2)")
|
||||
self.validate_identity("ANALYZE BUFFER_USAGE_LIMIT 1337 TBL")
|
||||
|
|
|
@ -1296,3 +1296,7 @@ MATCH_RECOGNIZE (
|
|||
|
||||
# If the setting is overriden to False, then generate ROW access (dot notation)
|
||||
self.assertEqual(s.sql(dialect_row_access_setting), 'SELECT col.x.y."special string"')
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE tbl")
|
||||
self.validate_identity("ANALYZE tbl WITH (prop1=val1, prop2=val2)")
|
||||
|
|
|
@ -666,3 +666,9 @@ FROM (
|
|||
self.validate_identity("GRANT USAGE ON DATABASE sales_db TO Bob")
|
||||
self.validate_identity("GRANT USAGE ON SCHEMA sales_schema TO ROLE Analyst_role")
|
||||
self.validate_identity("GRANT SELECT ON sales_db.sales_schema.tickit_sales_redshift TO Bob")
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TBL(col1, col2)")
|
||||
self.validate_identity("ANALYZE VERBOSE TBL")
|
||||
self.validate_identity("ANALYZE TBL PREDICATE COLUMNS")
|
||||
self.validate_identity("ANALYZE TBL ALL COLUMNS")
|
||||
|
|
|
@ -77,6 +77,7 @@ class TestSnowflake(Validator):
|
|||
self.validate_identity("SELECT MATCH_CONDITION")
|
||||
self.validate_identity("SELECT * REPLACE (CAST(col AS TEXT) AS scol) FROM t")
|
||||
self.validate_identity("1 /* /* */")
|
||||
self.validate_identity("TO_TIMESTAMP(col, fmt)")
|
||||
self.validate_identity(
|
||||
"SELECT * FROM table AT (TIMESTAMP => '2024-07-24') UNPIVOT(a FOR b IN (c)) AS pivot_table"
|
||||
)
|
||||
|
@ -104,7 +105,14 @@ class TestSnowflake(Validator):
|
|||
self.validate_identity(
|
||||
"SELECT * FROM DATA AS DATA_L ASOF JOIN DATA AS DATA_R MATCH_CONDITION (DATA_L.VAL > DATA_R.VAL) ON DATA_L.ID = DATA_R.ID"
|
||||
)
|
||||
self.validate_identity("TO_TIMESTAMP(col, fmt)")
|
||||
self.validate_identity(
|
||||
"WITH t (SELECT 1 AS c) SELECT c FROM t",
|
||||
"WITH t AS (SELECT 1 AS c) SELECT c FROM t",
|
||||
)
|
||||
self.validate_identity(
|
||||
"GET_PATH(json_data, '$id')",
|
||||
"""GET_PATH(json_data, '["$id"]')""",
|
||||
)
|
||||
self.validate_identity(
|
||||
"CAST(x AS GEOGRAPHY)",
|
||||
"TO_GEOGRAPHY(x)",
|
||||
|
@ -481,6 +489,7 @@ class TestSnowflake(Validator):
|
|||
write={
|
||||
"": "SELECT LOGICAL_OR(c1), LOGICAL_OR(c2) FROM test",
|
||||
"duckdb": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
|
||||
"oracle": "SELECT MAX(c1), MAX(c2) FROM test",
|
||||
"postgres": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
|
||||
"snowflake": "SELECT BOOLOR_AGG(c1), BOOLOR_AGG(c2) FROM test",
|
||||
"spark": "SELECT BOOL_OR(c1), BOOL_OR(c2) FROM test",
|
||||
|
@ -492,6 +501,7 @@ class TestSnowflake(Validator):
|
|||
write={
|
||||
"": "SELECT LOGICAL_AND(c1), LOGICAL_AND(c2) FROM test",
|
||||
"duckdb": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
|
||||
"oracle": "SELECT MIN(c1), MIN(c2) FROM test",
|
||||
"postgres": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
|
||||
"snowflake": "SELECT BOOLAND_AGG(c1), BOOLAND_AGG(c2) FROM test",
|
||||
"spark": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
|
||||
|
|
|
@ -263,6 +263,14 @@ TBLPROPERTIES (
|
|||
self.validate_identity("TRIM(LEADING 'SL' FROM 'SSparkSQLS')")
|
||||
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
|
||||
self.validate_identity("SPLIT(str, pattern, lim)")
|
||||
self.validate_identity(
|
||||
"SELECT 1 limit",
|
||||
"SELECT 1 AS limit",
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT 1 offset",
|
||||
"SELECT 1 AS offset",
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT UNIX_TIMESTAMP()",
|
||||
"SELECT UNIX_TIMESTAMP(CURRENT_TIMESTAMP())",
|
||||
|
@ -918,3 +926,15 @@ TBLPROPERTIES (
|
|||
with self.subTest(f"Testing STRING() for {dialect}"):
|
||||
query = parse_one("STRING(a)", dialect=dialect)
|
||||
self.assertEqual(query.sql(dialect), "CAST(a AS STRING)")
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE STATISTICS FOR ALL COLUMNS")
|
||||
self.validate_identity("ANALYZE TABLE tbl COMPUTE STATISTICS FOR COLUMNS foo, bar")
|
||||
self.validate_identity("ANALYZE TABLE ctlg.db.tbl COMPUTE STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLES COMPUTE STATISTICS NOSCAN")
|
||||
self.validate_identity("ANALYZE TABLES FROM db COMPUTE STATISTICS")
|
||||
self.validate_identity("ANALYZE TABLES IN db COMPUTE STATISTICS")
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE ctlg.db.tbl PARTITION(foo = 'foo', bar = 'bar') COMPUTE STATISTICS NOSCAN"
|
||||
)
|
||||
|
|
|
@ -237,3 +237,7 @@ class TestSQLite(Validator):
|
|||
self.validate_identity(
|
||||
"CREATE TABLE store (store_id INTEGER PRIMARY KEY AUTOINCREMENT, mgr_id INTEGER NOT NULL UNIQUE REFERENCES staff ON UPDATE CASCADE)"
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE tbl")
|
||||
self.validate_identity("ANALYZE schma.tbl")
|
||||
|
|
|
@ -126,3 +126,24 @@ class TestStarrocks(Validator):
|
|||
"spark": "SELECT id, t.col FROM tbl LATERAL VIEW EXPLODE(scores) t AS col",
|
||||
},
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE TABLE TBL(c1, c2) PROPERTIES ('prop1'=val1)")
|
||||
self.validate_identity("ANALYZE FULL TABLE TBL(c1, c2) PROPERTIES ('prop1'=val1)")
|
||||
self.validate_identity("ANALYZE SAMPLE TABLE TBL(c1, c2) PROPERTIES ('prop1'=val1)")
|
||||
self.validate_identity("ANALYZE TABLE TBL(c1, c2) WITH SYNC MODE PROPERTIES ('prop1'=val1)")
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE TBL(c1, c2) WITH ASYNC MODE PROPERTIES ('prop1'=val1)"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE TBL UPDATE HISTOGRAM ON c1, c2 PROPERTIES ('prop1'=val1)"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE TBL UPDATE HISTOGRAM ON c1, c2 WITH 5 BUCKETS PROPERTIES ('prop1'=val1)"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE TBL UPDATE HISTOGRAM ON c1, c2 WITH SYNC MODE WITH 5 BUCKETS PROPERTIES ('prop1'=val1)"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ANALYZE TABLE TBL UPDATE HISTOGRAM ON c1, c2 WITH ASYNC MODE WITH 5 BUCKETS PROPERTIES ('prop1'=val1)"
|
||||
)
|
||||
|
|
|
@ -78,3 +78,7 @@ class TestTrino(Validator):
|
|||
self.validate_identity(
|
||||
"ALTER VIEW people SET AUTHORIZATION alice", check_command_warning=True
|
||||
)
|
||||
|
||||
def test_analyze(self):
|
||||
self.validate_identity("ANALYZE tbl")
|
||||
self.validate_identity("ANALYZE tbl WITH (prop1=val1, prop2=val2)")
|
||||
|
|
4
tests/fixtures/identity.sql
vendored
4
tests/fixtures/identity.sql
vendored
|
@ -882,4 +882,6 @@ GRANT SELECT ON orders TO ROLE PUBLIC
|
|||
GRANT SELECT ON nation TO alice WITH GRANT OPTION
|
||||
GRANT DELETE ON SCHEMA finance TO bob
|
||||
SELECT attach
|
||||
SELECT detach
|
||||
SELECT detach
|
||||
SELECT 1 OFFSET 1
|
||||
SELECT 1 LIMIT 1
|
||||
|
|
4
tests/fixtures/optimizer/qualify_columns.sql
vendored
4
tests/fixtures/optimizer/qualify_columns.sql
vendored
|
@ -277,6 +277,10 @@ SELECT x.a AS a FROM x AS x UNION SELECT x.a AS a FROM x AS x UNION SELECT x.a A
|
|||
SELECT a FROM (SELECT a FROM x UNION SELECT a FROM x) ORDER BY a;
|
||||
SELECT _q_0.a AS a FROM (SELECT x.a AS a FROM x AS x UNION SELECT x.a AS a FROM x AS x) AS _q_0 ORDER BY a;
|
||||
|
||||
# title: nested subqueries in union
|
||||
((select a from x where a < 1)) UNION ((select a from x where a > 2));
|
||||
((SELECT x.a AS a FROM x AS x WHERE x.a < 1)) UNION ((SELECT x.a AS a FROM x AS x WHERE x.a > 2));
|
||||
|
||||
--------------------------------------
|
||||
-- Subqueries
|
||||
--------------------------------------
|
||||
|
|
|
@ -822,6 +822,22 @@ class TestBuild(unittest.TestCase):
|
|||
lambda: exp.union("SELECT 1", "SELECT 2", "SELECT 3", "SELECT 4"),
|
||||
"SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4",
|
||||
),
|
||||
(
|
||||
lambda: select("x")
|
||||
.with_("var1", as_=select("x").from_("tbl2").subquery(), scalar=True)
|
||||
.from_("tbl")
|
||||
.where("x > var1"),
|
||||
"WITH (SELECT x FROM tbl2) AS var1 SELECT x FROM tbl WHERE x > var1",
|
||||
"clickhouse",
|
||||
),
|
||||
(
|
||||
lambda: select("x")
|
||||
.with_("var1", as_=select("x").from_("tbl2"), scalar=True)
|
||||
.from_("tbl")
|
||||
.where("x > var1"),
|
||||
"WITH (SELECT x FROM tbl2) AS var1 SELECT x FROM tbl WHERE x > var1",
|
||||
"clickhouse",
|
||||
),
|
||||
]:
|
||||
with self.subTest(sql):
|
||||
self.assertEqual(expression().sql(dialect[0] if dialect else None), sql)
|
||||
|
|
|
@ -880,7 +880,6 @@ FROM tbl1""",
|
|||
"ALTER TABLE table1 RENAME COLUMN c1 c2",
|
||||
"ALTER TYPE electronic_mail RENAME TO email",
|
||||
"ALTER schema doo",
|
||||
"ANALYZE a.y",
|
||||
"CALL catalog.system.iceberg_procedure_name(named_arg_1 => 'arg_1', named_arg_2 => 'arg_2')",
|
||||
"COMMENT ON ACCESS METHOD gin IS 'GIN index access method'",
|
||||
"CREATE OR REPLACE STAGE",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue