Adding upstream version 26.1.3.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
09521056ff
commit
d908bee480
119 changed files with 71635 additions and 68059 deletions
|
@ -29,6 +29,7 @@ class TestClickhouse(Validator):
|
|||
self.assertEqual(expr.sql(dialect="clickhouse"), "COUNT(x)")
|
||||
self.assertIsNone(expr._meta)
|
||||
|
||||
self.validate_identity("WITH arrayJoin([(1, [2, 3])]) AS arr SELECT arr")
|
||||
self.validate_identity("CAST(1 AS Bool)")
|
||||
self.validate_identity("SELECT toString(CHAR(104.1, 101, 108.9, 108.9, 111, 32))")
|
||||
self.validate_identity("@macro").assert_is(exp.Parameter).this.assert_is(exp.Var)
|
||||
|
@ -549,6 +550,9 @@ class TestClickhouse(Validator):
|
|||
"SELECT name FROM data WHERE NOT ((SELECT DISTINCT name FROM data) IS NULL)",
|
||||
)
|
||||
|
||||
self.validate_identity("SELECT 1_2_3_4_5", "SELECT 12345")
|
||||
self.validate_identity("SELECT 1_b", "SELECT 1_b")
|
||||
|
||||
def test_clickhouse_values(self):
|
||||
values = exp.select("*").from_(
|
||||
exp.values([exp.tuple_(1, 2, 3)], alias="subq", columns=["a", "b", "c"])
|
||||
|
|
|
@ -520,6 +520,19 @@ class TestDialect(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
def test_is_ascii(self):
|
||||
self.validate_all(
|
||||
"SELECT IS_ASCII(x)",
|
||||
write={
|
||||
"": "SELECT IS_ASCII(x)",
|
||||
"sqlite": "SELECT (NOT x GLOB CAST(x'2a5b5e012d7f5d2a' AS TEXT))",
|
||||
"mysql": "SELECT REGEXP_LIKE(x, '^[[:ascii:]]*$')",
|
||||
"postgres": "SELECT (x ~ '^[[:ascii:]]*$')",
|
||||
"tsql": "SELECT (PATINDEX('%[^' + CHAR(0x00) + '-' + CHAR(0x7f) + ']%' COLLATE Latin1_General_BIN, x) = 0)",
|
||||
"oracle": "SELECT NVL(REGEXP_LIKE(x, '^[' || CHR(1) || '-' || CHR(127) || ']*$'), TRUE)",
|
||||
},
|
||||
)
|
||||
|
||||
def test_nvl2(self):
|
||||
self.validate_all(
|
||||
"SELECT NVL2(a, b, c)",
|
||||
|
@ -1669,11 +1682,11 @@ class TestDialect(Validator):
|
|||
},
|
||||
)
|
||||
self.validate_all(
|
||||
"POSITION(needle in haystack)",
|
||||
"POSITION(needle IN haystack)",
|
||||
write={
|
||||
"drill": "STRPOS(haystack, needle)",
|
||||
"duckdb": "STRPOS(haystack, needle)",
|
||||
"postgres": "STRPOS(haystack, needle)",
|
||||
"postgres": "POSITION(needle IN haystack)",
|
||||
"presto": "STRPOS(haystack, needle)",
|
||||
"spark": "LOCATE(needle, haystack)",
|
||||
"clickhouse": "position(haystack, needle)",
|
||||
|
@ -1686,7 +1699,7 @@ class TestDialect(Validator):
|
|||
write={
|
||||
"drill": "STRPOS(haystack, needle)",
|
||||
"duckdb": "STRPOS(haystack, needle)",
|
||||
"postgres": "STRPOS(haystack, needle)",
|
||||
"postgres": "POSITION(needle IN haystack)",
|
||||
"presto": "STRPOS(haystack, needle)",
|
||||
"bigquery": "STRPOS(haystack, needle)",
|
||||
"spark": "LOCATE(needle, haystack)",
|
||||
|
|
|
@ -276,12 +276,6 @@ class TestDuckDB(Validator):
|
|||
self.validate_identity("SELECT UNNEST(col, recursive := TRUE) FROM t")
|
||||
self.validate_identity("VAR_POP(a)")
|
||||
self.validate_identity("SELECT * FROM foo ASOF LEFT JOIN bar ON a = b")
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING FIRST(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population) GROUP BY Country")
|
||||
self.validate_identity("PIVOT Cities ON Country, Name USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Country || '_' || Name USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population) GROUP BY Country, Name")
|
||||
self.validate_identity("SELECT {'a': 1} AS x")
|
||||
self.validate_identity("SELECT {'a': {'b': {'c': 1}}, 'd': {'e': 2}} AS x")
|
||||
self.validate_identity("SELECT {'x': 1, 'y': 2, 'z': 3}")
|
||||
|
@ -1415,3 +1409,42 @@ class TestDuckDB(Validator):
|
|||
self.validate_identity("DETACH IF EXISTS file")
|
||||
|
||||
self.validate_identity("DETACH DATABASE db", "DETACH db")
|
||||
|
||||
def test_simplified_pivot_unpivot(self):
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING FIRST(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population) GROUP BY Country")
|
||||
self.validate_identity("PIVOT Cities ON Country, Name USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Country || '_' || Name USING SUM(Population)")
|
||||
self.validate_identity("PIVOT Cities ON Year USING SUM(Population) GROUP BY Country, Name")
|
||||
|
||||
self.validate_identity("UNPIVOT (SELECT 1 AS col1, 2 AS col2) ON foo, bar")
|
||||
self.validate_identity(
|
||||
"UNPIVOT monthly_sales ON jan, feb, mar, apr, may, jun INTO NAME month VALUE sales"
|
||||
)
|
||||
self.validate_identity(
|
||||
"UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales"
|
||||
)
|
||||
self.validate_identity(
|
||||
"UNPIVOT monthly_sales ON (jan, feb, mar) AS q1, (apr, may, jun) AS q2 INTO NAME quarter VALUE month_1_sales, month_2_sales, month_3_sales"
|
||||
)
|
||||
self.validate_identity(
|
||||
"WITH unpivot_alias AS (UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales) SELECT * FROM unpivot_alias"
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT * FROM (UNPIVOT monthly_sales ON COLUMNS(* EXCLUDE (empid, dept)) INTO NAME month VALUE sales) AS unpivot_alias"
|
||||
)
|
||||
|
||||
def test_from_first_with_parentheses(self):
|
||||
self.validate_identity(
|
||||
"CREATE TABLE t1 AS (FROM t2 SELECT foo1, foo2)",
|
||||
"CREATE TABLE t1 AS (SELECT foo1, foo2 FROM t2)",
|
||||
)
|
||||
self.validate_identity(
|
||||
"FROM (FROM t1 SELECT foo1, foo2)",
|
||||
"SELECT * FROM (SELECT foo1, foo2 FROM t1)",
|
||||
)
|
||||
self.validate_identity(
|
||||
"WITH t1 AS (FROM (FROM t2 SELECT foo1, foo2)) FROM t1",
|
||||
"WITH t1 AS (SELECT * FROM (SELECT foo1, foo2 FROM t2)) SELECT * FROM t1",
|
||||
)
|
||||
|
|
|
@ -806,6 +806,8 @@ class TestHive(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
self.validate_identity("SELECT 1_2")
|
||||
|
||||
def test_escapes(self) -> None:
|
||||
self.validate_identity("'\n'", "'\\n'")
|
||||
self.validate_identity("'\\n'")
|
||||
|
|
|
@ -332,6 +332,8 @@ class TestMySQL(Validator):
|
|||
write={
|
||||
"mysql": "CHAR(10)",
|
||||
"presto": "CHR(10)",
|
||||
"sqlite": "CHAR(10)",
|
||||
"tsql": "CHAR(10)",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -723,6 +725,7 @@ class TestMySQL(Validator):
|
|||
write={
|
||||
"duckdb": "SELECT LENGTH('foo')",
|
||||
"mysql": "SELECT CHAR_LENGTH('foo')",
|
||||
"postgres": "SELECT LENGTH('foo')",
|
||||
},
|
||||
)
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ class TestOracle(Validator):
|
|||
self.validate_identity("SELECT COUNT(*) * 10 FROM orders SAMPLE (10) SEED (1)")
|
||||
self.validate_identity("SELECT * FROM V$SESSION")
|
||||
self.validate_identity("SELECT TO_DATE('January 15, 1989, 11:00 A.M.')")
|
||||
self.validate_identity("SELECT INSTR(haystack, needle)")
|
||||
self.validate_identity(
|
||||
"SELECT * FROM test UNPIVOT INCLUDE NULLS (value FOR Description IN (col AS 'PREFIX ' || CHR(38) || ' SUFFIX'))"
|
||||
)
|
||||
|
|
|
@ -49,6 +49,10 @@ class TestPostgres(Validator):
|
|||
self.validate_identity("CAST(x AS DATERANGE)")
|
||||
self.validate_identity("CAST(x AS DATEMULTIRANGE)")
|
||||
self.validate_identity("x$")
|
||||
self.validate_identity("LENGTH(x)")
|
||||
self.validate_identity("LENGTH(x, utf8)")
|
||||
self.validate_identity("CHAR_LENGTH(x)", "LENGTH(x)")
|
||||
self.validate_identity("CHARACTER_LENGTH(x)", "LENGTH(x)")
|
||||
self.validate_identity("SELECT ARRAY[1, 2, 3]")
|
||||
self.validate_identity("SELECT ARRAY(SELECT 1)")
|
||||
self.validate_identity("STRING_AGG(x, y)")
|
||||
|
@ -71,6 +75,9 @@ class TestPostgres(Validator):
|
|||
self.validate_identity("EXEC AS myfunc @id = 123", check_command_warning=True)
|
||||
self.validate_identity("SELECT CURRENT_USER")
|
||||
self.validate_identity("SELECT * FROM ONLY t1")
|
||||
self.validate_identity(
|
||||
"SELECT id, name FROM XMLTABLE('/root/user' PASSING xml_data COLUMNS id INT PATH '@id', name TEXT PATH 'name/text()') AS t"
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT * FROM t WHERE some_column >= CURRENT_DATE + INTERVAL '1 day 1 hour' AND some_another_column IS TRUE"
|
||||
)
|
||||
|
@ -874,6 +881,9 @@ class TestPostgres(Validator):
|
|||
self.validate_identity("ALTER TABLE t1 SET ACCESS METHOD method")
|
||||
self.validate_identity("ALTER TABLE t1 SET TABLESPACE tablespace")
|
||||
self.validate_identity("ALTER TABLE t1 SET (fillfactor = 5, autovacuum_enabled = TRUE)")
|
||||
self.validate_identity(
|
||||
"INSERT INTO newtable AS t(a, b, c) VALUES (1, 2, 3) ON CONFLICT(c) DO UPDATE SET a = t.a + 1 WHERE t.a < 1"
|
||||
)
|
||||
self.validate_identity(
|
||||
"ALTER TABLE tested_table ADD CONSTRAINT unique_example UNIQUE (column_name) NOT VALID"
|
||||
)
|
||||
|
|
|
@ -21,6 +21,7 @@ class TestSnowflake(Validator):
|
|||
expr.selects[0].assert_is(exp.AggFunc)
|
||||
self.assertEqual(expr.sql(dialect="snowflake"), "SELECT APPROX_TOP_K(C4, 3, 5) FROM t")
|
||||
|
||||
self.validate_identity("INSERT INTO test VALUES (x'48FAF43B0AFCEF9B63EE3A93EE2AC2')")
|
||||
self.validate_identity("exclude := [foo]")
|
||||
self.validate_identity("SELECT CAST([1, 2, 3] AS VECTOR(FLOAT, 3))")
|
||||
self.validate_identity("SELECT CONNECT_BY_ROOT test AS test_column_alias")
|
||||
|
@ -495,6 +496,7 @@ class TestSnowflake(Validator):
|
|||
"snowflake": "SELECT BOOLAND_AGG(c1), BOOLAND_AGG(c2) FROM test",
|
||||
"spark": "SELECT BOOL_AND(c1), BOOL_AND(c2) FROM test",
|
||||
"sqlite": "SELECT MIN(c1), MIN(c2) FROM test",
|
||||
"mysql": "SELECT MIN(c1), MIN(c2) FROM test",
|
||||
},
|
||||
)
|
||||
for suffix in (
|
||||
|
@ -2358,3 +2360,11 @@ SINGLE = TRUE""",
|
|||
self.assertEqual(ast.sql("snowflake"), query)
|
||||
self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
|
||||
self.assertEqual(window.this.sql("snowflake"), "db.schema.FUNC(a)")
|
||||
|
||||
def test_offset_without_limit(self):
|
||||
self.validate_all(
|
||||
"SELECT 1 ORDER BY 1 LIMIT NULL OFFSET 0",
|
||||
read={
|
||||
"trino": "SELECT 1 ORDER BY 1 OFFSET 0",
|
||||
},
|
||||
)
|
||||
|
|
|
@ -9,6 +9,8 @@ class TestSpark(Validator):
|
|||
dialect = "spark"
|
||||
|
||||
def test_ddl(self):
|
||||
self.validate_identity("DROP NAMESPACE my_catalog.my_namespace")
|
||||
self.validate_identity("CREATE NAMESPACE my_catalog.my_namespace")
|
||||
self.validate_identity("INSERT OVERWRITE TABLE db1.tb1 TABLE db2.tb2")
|
||||
self.validate_identity("CREATE TABLE foo AS WITH t AS (SELECT 1 AS col) SELECT col FROM t")
|
||||
self.validate_identity("CREATE TEMPORARY VIEW test AS SELECT 1")
|
||||
|
|
|
@ -92,6 +92,17 @@ class TestSQLite(Validator):
|
|||
read={"snowflake": "LEAST(x, y, z)"},
|
||||
write={"snowflake": "LEAST(x, y, z)"},
|
||||
)
|
||||
self.validate_all(
|
||||
"UNICODE(x)",
|
||||
write={
|
||||
"": "UNICODE(x)",
|
||||
"mysql": "ORD(CONVERT(x USING utf32))",
|
||||
"oracle": "ASCII(UNISTR(x))",
|
||||
"postgres": "ASCII(x)",
|
||||
"redshift": "ASCII(x)",
|
||||
"spark": "ASCII(x)",
|
||||
},
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT * FROM station WHERE city IS NOT ''",
|
||||
"SELECT * FROM station WHERE NOT city IS ''",
|
||||
|
|
|
@ -753,6 +753,16 @@ class TestTSQL(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"CREATE TABLE t (col1 DATETIME2(2))",
|
||||
read={
|
||||
"snowflake": "CREATE TABLE t (col1 TIMESTAMP_NTZ(2))",
|
||||
},
|
||||
write={
|
||||
"tsql": "CREATE TABLE t (col1 DATETIME2(2))",
|
||||
},
|
||||
)
|
||||
|
||||
def test_types_bin(self):
|
||||
self.validate_all(
|
||||
"CAST(x as BIT)",
|
||||
|
@ -1220,7 +1230,10 @@ WHERE
|
|||
def test_datefromparts(self):
|
||||
self.validate_all(
|
||||
"SELECT DATEFROMPARTS('2020', 10, 01)",
|
||||
write={"spark": "SELECT MAKE_DATE('2020', 10, 01)"},
|
||||
write={
|
||||
"spark": "SELECT MAKE_DATE('2020', 10, 01)",
|
||||
"tsql": "SELECT DATEFROMPARTS('2020', 10, 01)",
|
||||
},
|
||||
)
|
||||
|
||||
def test_datename(self):
|
||||
|
@ -2090,3 +2103,27 @@ FROM OPENJSON(@json) WITH (
|
|||
"oracle": "SELECT NEXT VALUE FOR db.schema.sequence_name",
|
||||
},
|
||||
)
|
||||
|
||||
# string literals in the DATETRUNC are casted as DATETIME2
|
||||
def test_datetrunc(self):
|
||||
self.validate_all(
|
||||
"SELECT DATETRUNC(month, 'foo')",
|
||||
write={
|
||||
"duckdb": "SELECT DATE_TRUNC('MONTH', CAST('foo' AS TIMESTAMP))",
|
||||
"tsql": "SELECT DATETRUNC(MONTH, CAST('foo' AS DATETIME2))",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
"SELECT DATETRUNC(month, foo)",
|
||||
write={
|
||||
"duckdb": "SELECT DATE_TRUNC('MONTH', foo)",
|
||||
"tsql": "SELECT DATETRUNC(MONTH, foo)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
"SELECT DATETRUNC(year, CAST('foo1' AS date))",
|
||||
write={
|
||||
"duckdb": "SELECT DATE_TRUNC('YEAR', CAST('foo1' AS DATE))",
|
||||
"tsql": "SELECT DATETRUNC(YEAR, CAST('foo1' AS DATE))",
|
||||
},
|
||||
)
|
||||
|
|
44
tests/fixtures/optimizer/optimizer.sql
vendored
44
tests/fixtures/optimizer/optimizer.sql
vendored
|
@ -630,11 +630,11 @@ PIVOT(SUM(`u_cte`.`f`) AS `sum` FOR `u_cte`.`h` IN ('x', 'y')) AS `_q_0`;
|
|||
# dialect: snowflake
|
||||
SELECT * FROM u PIVOT (SUM(f) FOR h IN ('x', 'y'));
|
||||
SELECT
|
||||
"_q_0"."G" AS "G",
|
||||
"_q_0"."'x'" AS "'x'",
|
||||
"_q_0"."'y'" AS "'y'"
|
||||
"_Q_0"."G" AS "G",
|
||||
"_Q_0"."'x'" AS "'x'",
|
||||
"_Q_0"."'y'" AS "'y'"
|
||||
FROM "U" AS "U"
|
||||
PIVOT(SUM("U"."F") FOR "U"."H" IN ('x', 'y')) AS "_q_0";
|
||||
PIVOT(SUM("U"."F") FOR "U"."H" IN ('x', 'y')) AS "_Q_0";
|
||||
|
||||
# title: selecting all columns from a pivoted source and generating spark
|
||||
# note: spark doesn't allow pivot aliases or qualified columns for the pivot's "field" (`h`)
|
||||
|
@ -690,14 +690,14 @@ PIVOT(MAX("SOURCE"."VALUE") FOR "SOURCE"."KEY" IN ('a', 'b', 'c')) AS "FINAL"("I
|
|||
# dialect: snowflake
|
||||
SELECT * FROM m_sales AS m_sales(empid, dept, jan, feb) UNPIVOT(sales FOR month IN (jan, feb)) ORDER BY empid;
|
||||
SELECT
|
||||
"_q_0"."EMPID" AS "EMPID",
|
||||
"_q_0"."DEPT" AS "DEPT",
|
||||
"_q_0"."MONTH" AS "MONTH",
|
||||
"_q_0"."SALES" AS "SALES"
|
||||
"_Q_0"."EMPID" AS "EMPID",
|
||||
"_Q_0"."DEPT" AS "DEPT",
|
||||
"_Q_0"."MONTH" AS "MONTH",
|
||||
"_Q_0"."SALES" AS "SALES"
|
||||
FROM "M_SALES" AS "M_SALES"("EMPID", "DEPT", "JAN", "FEB")
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_Q_0"
|
||||
ORDER BY
|
||||
"_q_0"."EMPID";
|
||||
"_Q_0"."EMPID";
|
||||
|
||||
# title: unpivoted table source, unpivot has column aliases
|
||||
# execute: false
|
||||
|
@ -722,10 +722,10 @@ UNPIVOT("sales" FOR "month" IN ("m_sales"."jan", "m_sales"."feb")) AS "unpiv"("a
|
|||
# dialect: snowflake
|
||||
SELECT * FROM (SELECT * FROM m_sales) AS m_sales(empid, dept, jan, feb) UNPIVOT(sales FOR month IN (jan, feb)) ORDER BY empid;
|
||||
SELECT
|
||||
"_q_0"."EMPID" AS "EMPID",
|
||||
"_q_0"."DEPT" AS "DEPT",
|
||||
"_q_0"."MONTH" AS "MONTH",
|
||||
"_q_0"."SALES" AS "SALES"
|
||||
"_Q_0"."EMPID" AS "EMPID",
|
||||
"_Q_0"."DEPT" AS "DEPT",
|
||||
"_Q_0"."MONTH" AS "MONTH",
|
||||
"_Q_0"."SALES" AS "SALES"
|
||||
FROM (
|
||||
SELECT
|
||||
"M_SALES"."EMPID" AS "EMPID",
|
||||
|
@ -734,9 +734,9 @@ FROM (
|
|||
"M_SALES"."FEB" AS "FEB"
|
||||
FROM "M_SALES" AS "M_SALES"
|
||||
) AS "M_SALES"
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_Q_0"
|
||||
ORDER BY
|
||||
"_q_0"."EMPID";
|
||||
"_Q_0"."EMPID";
|
||||
|
||||
# title: unpivoted table source with a single value column, unpivot columns can be qualified
|
||||
# execute: false
|
||||
|
@ -832,13 +832,13 @@ WHERE
|
|||
GROUP BY `dAy`, `top_term`, rank
|
||||
ORDER BY `DaY` DESC;
|
||||
SELECT
|
||||
`TOp_TeRmS`.`refresh_date` AS `day`,
|
||||
`TOp_TeRmS`.`term` AS `top_term`,
|
||||
`TOp_TeRmS`.`rank` AS `rank`
|
||||
FROM `bigquery-public-data.GooGle_tReNDs.TOp_TeRmS` AS `TOp_TeRmS`
|
||||
`top_terms`.`refresh_date` AS `day`,
|
||||
`top_terms`.`term` AS `top_term`,
|
||||
`top_terms`.`rank` AS `rank`
|
||||
FROM `bigquery-public-data.GooGle_tReNDs.TOp_TeRmS` AS `top_terms`
|
||||
WHERE
|
||||
`TOp_TeRmS`.`rank` = 1
|
||||
AND `TOp_TeRmS`.`refresh_date` >= DATE_SUB(CURRENT_DATE, INTERVAL '2' WEEK)
|
||||
`top_terms`.`rank` = 1
|
||||
AND `top_terms`.`refresh_date` >= DATE_SUB(CURRENT_DATE, INTERVAL '2' WEEK)
|
||||
GROUP BY
|
||||
`day`,
|
||||
`top_term`,
|
||||
|
|
|
@ -801,6 +801,23 @@ class TestBuild(unittest.TestCase):
|
|||
),
|
||||
"MERGE INTO target_table AS target USING source_table AS source ON target.id = source.id WHEN MATCHED THEN UPDATE SET target.name = source.name RETURNING target.*",
|
||||
),
|
||||
(
|
||||
lambda: exp.merge(
|
||||
exp.When(
|
||||
matched=True,
|
||||
then=exp.Update(
|
||||
expressions=[
|
||||
exp.column("name", "target").eq(exp.column("name", "source"))
|
||||
]
|
||||
),
|
||||
),
|
||||
into=exp.table_("target_table").as_("target"),
|
||||
using=exp.table_("source_table").as_("source"),
|
||||
on="target.id = source.id",
|
||||
returning="target.*",
|
||||
),
|
||||
"MERGE INTO target_table AS target USING source_table AS source ON target.id = source.id WHEN MATCHED THEN UPDATE SET target.name = source.name RETURNING target.*",
|
||||
),
|
||||
(
|
||||
lambda: exp.union("SELECT 1", "SELECT 2", "SELECT 3", "SELECT 4"),
|
||||
"SELECT 1 UNION SELECT 2 UNION SELECT 3 UNION SELECT 4",
|
||||
|
|
|
@ -613,10 +613,10 @@ class TestExecutor(unittest.TestCase):
|
|||
("CONCAT('a', 'b')", "ab"),
|
||||
("CONCAT('a', NULL)", None),
|
||||
("CONCAT_WS('_', 'a', 'b')", "a_b"),
|
||||
("STR_POSITION('bar', 'foobarbar')", 4),
|
||||
("STR_POSITION('bar', 'foobarbar', 5)", 7),
|
||||
("STR_POSITION(NULL, 'foobarbar')", None),
|
||||
("STR_POSITION('bar', NULL)", None),
|
||||
("STR_POSITION('foobarbar', 'bar')", 4),
|
||||
("STR_POSITION('foobarbar', 'bar', 5)", 7),
|
||||
("STR_POSITION('foobarbar', NULL)", None),
|
||||
("STR_POSITION(NULL, 'bar')", None),
|
||||
("UPPER('foo')", "FOO"),
|
||||
("UPPER(NULL)", None),
|
||||
("LOWER('FOO')", "foo"),
|
||||
|
|
|
@ -632,6 +632,12 @@ class TestExpressions(unittest.TestCase):
|
|||
self.assertTrue(all(isinstance(e, exp.Expression) for e in expression.walk()))
|
||||
self.assertTrue(all(isinstance(e, exp.Expression) for e in expression.walk(bfs=False)))
|
||||
|
||||
def test_str_position_order(self):
|
||||
str_position_exp = parse_one("STR_POSITION('mytest', 'test')")
|
||||
self.assertIsInstance(str_position_exp, exp.StrPosition)
|
||||
self.assertEqual(str_position_exp.args.get("this").this, "mytest")
|
||||
self.assertEqual(str_position_exp.args.get("substr").this, "test")
|
||||
|
||||
def test_functions(self):
|
||||
self.assertIsInstance(parse_one("x LIKE ANY (y)"), exp.Like)
|
||||
self.assertIsInstance(parse_one("x ILIKE ANY (y)"), exp.ILike)
|
||||
|
|
|
@ -255,6 +255,35 @@ class TestOptimizer(unittest.TestCase):
|
|||
|
||||
@patch("sqlglot.generator.logger")
|
||||
def test_qualify_columns(self, logger):
|
||||
self.assertEqual(
|
||||
optimizer.qualify.qualify(
|
||||
parse_one(
|
||||
"""
|
||||
SELECT Teams.Name, count(*)
|
||||
FROM raw.TeamMemberships as TeamMemberships
|
||||
join raw.Teams
|
||||
on Teams.Id = TeamMemberships.TeamId
|
||||
GROUP BY 1
|
||||
""",
|
||||
read="bigquery",
|
||||
),
|
||||
schema={
|
||||
"raw": {
|
||||
"TeamMemberships": {
|
||||
"Id": "INTEGER",
|
||||
"UserId": "INTEGER",
|
||||
"TeamId": "INTEGER",
|
||||
},
|
||||
"Teams": {
|
||||
"Id": "INTEGER",
|
||||
"Name": "STRING",
|
||||
},
|
||||
}
|
||||
},
|
||||
dialect="bigquery",
|
||||
).sql(dialect="bigquery"),
|
||||
"SELECT `teams`.`name` AS `name`, count(*) AS `_col_1` FROM `raw`.`TeamMemberships` AS `teammemberships` JOIN `raw`.`Teams` AS `teams` ON `teams`.`id` = `teammemberships`.`teamid` GROUP BY `teams`.`name`",
|
||||
)
|
||||
self.assertEqual(
|
||||
optimizer.qualify.qualify(
|
||||
parse_one(
|
||||
|
|
|
@ -888,3 +888,15 @@ class TestParser(unittest.TestCase):
|
|||
ast = parse_one("ALTER TABLE tbl DROP COLUMN col")
|
||||
self.assertEqual(len(list(ast.find_all(exp.Table))), 1)
|
||||
self.assertEqual(len(list(ast.find_all(exp.Column))), 1)
|
||||
|
||||
def test_udf_meta(self):
|
||||
ast = parse_one("YEAR(a) /* sqlglot.anonymous */")
|
||||
self.assertIsInstance(ast, exp.Anonymous)
|
||||
|
||||
# Meta flag is case sensitive
|
||||
ast = parse_one("YEAR(a) /* sqlglot.anONymous */")
|
||||
self.assertIsInstance(ast, exp.Year)
|
||||
|
||||
# Incomplete or incorrect anonymous meta comments are not registered
|
||||
ast = parse_one("YEAR(a) /* sqlglot.anon */")
|
||||
self.assertIsInstance(ast, exp.Year)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue