Adding upstream version 25.7.1.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
f9dae8e951
commit
8356f462bb
102 changed files with 52995 additions and 52070 deletions
|
@ -612,6 +612,7 @@ LANGUAGE js AS
|
|||
write={
|
||||
"bigquery": "SELECT DATETIME_ADD('2023-01-01T00:00:00', INTERVAL 1 MILLISECOND)",
|
||||
"databricks": "SELECT TIMESTAMPADD(MILLISECOND, 1, '2023-01-01T00:00:00')",
|
||||
"duckdb": "SELECT CAST('2023-01-01T00:00:00' AS DATETIME) + INTERVAL 1 MILLISECOND",
|
||||
},
|
||||
),
|
||||
)
|
||||
|
@ -621,6 +622,7 @@ LANGUAGE js AS
|
|||
write={
|
||||
"bigquery": "SELECT DATETIME_SUB('2023-01-01T00:00:00', INTERVAL 1 MILLISECOND)",
|
||||
"databricks": "SELECT TIMESTAMPADD(MILLISECOND, 1 * -1, '2023-01-01T00:00:00')",
|
||||
"duckdb": "SELECT CAST('2023-01-01T00:00:00' AS DATETIME) - INTERVAL 1 MILLISECOND",
|
||||
},
|
||||
),
|
||||
)
|
||||
|
@ -1016,7 +1018,7 @@ LANGUAGE js AS
|
|||
write={
|
||||
"bigquery": "SELECT * FROM UNNEST(['7', '14']) AS x",
|
||||
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS _t0(x)",
|
||||
"spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS _t0(x)",
|
||||
"spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS _t0(x)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
|
|
|
@ -7,6 +7,9 @@ class TestClickhouse(Validator):
|
|||
dialect = "clickhouse"
|
||||
|
||||
def test_clickhouse(self):
|
||||
self.validate_identity("SELECT toFloat(like)")
|
||||
self.validate_identity("SELECT like")
|
||||
|
||||
string_types = [
|
||||
"BLOB",
|
||||
"LONGBLOB",
|
||||
|
|
|
@ -7,6 +7,7 @@ class TestDatabricks(Validator):
|
|||
dialect = "databricks"
|
||||
|
||||
def test_databricks(self):
|
||||
self.validate_identity("ALTER TABLE labels ADD COLUMN label_score FLOAT")
|
||||
self.validate_identity("DESCRIBE HISTORY a.b")
|
||||
self.validate_identity("DESCRIBE history.tbl")
|
||||
self.validate_identity("CREATE TABLE t (a STRUCT<c: MAP<STRING, STRING>>)")
|
||||
|
|
|
@ -628,7 +628,7 @@ class TestDialect(Validator):
|
|||
write={
|
||||
"duckdb": "EPOCH(STRPTIME('2020-01-01', '%Y-%m-%d'))",
|
||||
"hive": "UNIX_TIMESTAMP('2020-01-01', 'yyyy-MM-dd')",
|
||||
"presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('2020-01-01' AS VARCHAR), '%Y-%m-%d')), PARSE_DATETIME(CAST('2020-01-01' AS VARCHAR), 'yyyy-MM-dd')))",
|
||||
"presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('2020-01-01' AS VARCHAR), '%Y-%m-%d')), PARSE_DATETIME(DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP), '%Y-%m-%d'), 'yyyy-MM-dd')))",
|
||||
"starrocks": "UNIX_TIMESTAMP('2020-01-01', '%Y-%m-%d')",
|
||||
"doris": "UNIX_TIMESTAMP('2020-01-01', '%Y-%m-%d')",
|
||||
},
|
||||
|
|
|
@ -308,6 +308,14 @@ class TestDuckDB(Validator):
|
|||
"SELECT JSON_EXTRACT(c, '$.k1') = 'v1'",
|
||||
"SELECT (c -> '$.k1') = 'v1'",
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT JSON_EXTRACT(c, '$[*].id')[0:2]",
|
||||
"SELECT (c -> '$[*].id')[0 : 2]",
|
||||
)
|
||||
self.validate_identity(
|
||||
"SELECT JSON_EXTRACT_STRING(c, '$[*].id')[0:2]",
|
||||
"SELECT (c ->> '$[*].id')[0 : 2]",
|
||||
)
|
||||
self.validate_identity(
|
||||
"""SELECT '{"foo": [1, 2, 3]}' -> 'foo' -> 0""",
|
||||
"""SELECT '{"foo": [1, 2, 3]}' -> '$.foo' -> '$[0]'""",
|
||||
|
@ -1048,7 +1056,14 @@ class TestDuckDB(Validator):
|
|||
"CAST([STRUCT_PACK(a := 1)] AS STRUCT(a BIGINT)[])",
|
||||
"CAST([ROW(1)] AS STRUCT(a BIGINT)[])",
|
||||
)
|
||||
|
||||
self.validate_identity(
|
||||
"STRUCT_PACK(a := 'b')::json",
|
||||
"CAST({'a': 'b'} AS JSON)",
|
||||
)
|
||||
self.validate_identity(
|
||||
"STRUCT_PACK(a := 'b')::STRUCT(a TEXT)",
|
||||
"CAST(ROW('b') AS STRUCT(a TEXT))",
|
||||
)
|
||||
self.validate_all(
|
||||
"CAST(x AS VARCHAR(5))",
|
||||
write={
|
||||
|
|
|
@ -372,7 +372,7 @@ class TestHive(Validator):
|
|||
"UNIX_TIMESTAMP(x)",
|
||||
write={
|
||||
"duckdb": "EPOCH(STRPTIME(x, '%Y-%m-%d %H:%M:%S'))",
|
||||
"presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST(x AS VARCHAR), '%Y-%m-%d %T')), PARSE_DATETIME(CAST(x AS VARCHAR), 'yyyy-MM-dd HH:mm:ss')))",
|
||||
"presto": "TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST(x AS VARCHAR), '%Y-%m-%d %T')), PARSE_DATETIME(DATE_FORMAT(x, '%Y-%m-%d %T'), 'yyyy-MM-dd HH:mm:ss')))",
|
||||
"hive": "UNIX_TIMESTAMP(x)",
|
||||
"spark": "UNIX_TIMESTAMP(x)",
|
||||
"": "STR_TO_UNIX(x, '%Y-%m-%d %H:%M:%S')",
|
||||
|
|
|
@ -24,6 +24,9 @@ class TestMySQL(Validator):
|
|||
self.validate_identity("ALTER TABLE t ADD INDEX `i` (`c`)")
|
||||
self.validate_identity("ALTER TABLE t ADD UNIQUE `i` (`c`)")
|
||||
self.validate_identity("ALTER TABLE test_table MODIFY COLUMN test_column LONGTEXT")
|
||||
self.validate_identity(
|
||||
"INSERT INTO things (a, b) VALUES (1, 2) AS new_data ON DUPLICATE KEY UPDATE id = LAST_INSERT_ID(id), a = new_data.a, b = new_data.b"
|
||||
)
|
||||
self.validate_identity(
|
||||
"CREATE TABLE `oauth_consumer` (`key` VARCHAR(32) NOT NULL, UNIQUE `OAUTH_CONSUMER_KEY` (`key`))"
|
||||
)
|
||||
|
|
|
@ -17,9 +17,6 @@ class TestPostgres(Validator):
|
|||
)
|
||||
|
||||
self.validate_identity("SHA384(x)")
|
||||
self.validate_identity(
|
||||
'CREATE TABLE x (a TEXT COLLATE "de_DE")', "CREATE TABLE x (a TEXT COLLATE de_DE)"
|
||||
)
|
||||
self.validate_identity("1.x", "1. AS x")
|
||||
self.validate_identity("|/ x", "SQRT(x)")
|
||||
self.validate_identity("||/ x", "CBRT(x)")
|
||||
|
@ -565,15 +562,10 @@ class TestPostgres(Validator):
|
|||
"postgres": "GENERATE_SERIES(CAST('2019-01-01' AS TIMESTAMP), CURRENT_TIMESTAMP, INTERVAL '1 DAY')",
|
||||
"presto": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
"trino": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
"GENERATE_SERIES(a, b)",
|
||||
write={
|
||||
"postgres": "GENERATE_SERIES(a, b)",
|
||||
"presto": "SEQUENCE(a, b)",
|
||||
"trino": "SEQUENCE(a, b)",
|
||||
"tsql": "GENERATE_SERIES(a, b)",
|
||||
"hive": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
"spark2": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
"spark": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
"databricks": "SEQUENCE(CAST('2019-01-01' AS TIMESTAMP), CAST(CURRENT_TIMESTAMP() AS TIMESTAMP), INTERVAL '1' DAY)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
|
@ -583,6 +575,20 @@ class TestPostgres(Validator):
|
|||
"presto": "SEQUENCE(a, b)",
|
||||
"trino": "SEQUENCE(a, b)",
|
||||
"tsql": "GENERATE_SERIES(a, b)",
|
||||
"hive": "SEQUENCE(a, b)",
|
||||
"spark2": "SEQUENCE(a, b)",
|
||||
"spark": "SEQUENCE(a, b)",
|
||||
"databricks": "SEQUENCE(a, b)",
|
||||
},
|
||||
write={
|
||||
"postgres": "GENERATE_SERIES(a, b)",
|
||||
"presto": "SEQUENCE(a, b)",
|
||||
"trino": "SEQUENCE(a, b)",
|
||||
"tsql": "GENERATE_SERIES(a, b)",
|
||||
"hive": "SEQUENCE(a, b)",
|
||||
"spark2": "SEQUENCE(a, b)",
|
||||
"spark": "SEQUENCE(a, b)",
|
||||
"databricks": "SEQUENCE(a, b)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
|
@ -759,6 +765,14 @@ class TestPostgres(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"SELECT TO_DATE('01/01/2000', 'MM/DD/YYYY')",
|
||||
write={
|
||||
"duckdb": "SELECT CAST(STRPTIME('01/01/2000', '%m/%d/%Y') AS DATE)",
|
||||
"postgres": "SELECT TO_DATE('01/01/2000', 'MM/DD/YYYY')",
|
||||
},
|
||||
)
|
||||
|
||||
def test_ddl(self):
|
||||
# Checks that user-defined types are parsed into DataType instead of Identifier
|
||||
self.parse_one("CREATE TABLE t (a udt)").this.expressions[0].args["kind"].assert_is(
|
||||
|
@ -775,6 +789,8 @@ class TestPostgres(Validator):
|
|||
cdef.args["kind"].assert_is(exp.DataType)
|
||||
self.assertEqual(expr.sql(dialect="postgres"), "CREATE TABLE t (x INTERVAL DAY)")
|
||||
|
||||
self.validate_identity('CREATE TABLE x (a TEXT COLLATE "de_DE")')
|
||||
self.validate_identity('CREATE TABLE x (a TEXT COLLATE pg_catalog."default")')
|
||||
self.validate_identity("CREATE TABLE t (col INT[3][5])")
|
||||
self.validate_identity("CREATE TABLE t (col INT[3])")
|
||||
self.validate_identity("CREATE INDEX IF NOT EXISTS ON t(c)")
|
||||
|
@ -981,6 +997,34 @@ class TestPostgres(Validator):
|
|||
self.validate_identity("CREATE TABLE tbl (col UUID UNIQUE DEFAULT GEN_RANDOM_UUID())")
|
||||
self.validate_identity("CREATE TABLE tbl (col UUID, UNIQUE NULLS NOT DISTINCT (col))")
|
||||
|
||||
self.validate_identity("CREATE INDEX CONCURRENTLY ix_table_id ON tbl USING btree(id)")
|
||||
self.validate_identity(
|
||||
"CREATE INDEX CONCURRENTLY IF NOT EXISTS ix_table_id ON tbl USING btree(id)"
|
||||
)
|
||||
|
||||
self.validate_identity(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS public.rental
|
||||
(
|
||||
inventory_id INT NOT NULL,
|
||||
CONSTRAINT rental_customer_id_fkey FOREIGN KEY (customer_id)
|
||||
REFERENCES public.customer (customer_id) MATCH FULL
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE RESTRICT,
|
||||
CONSTRAINT rental_inventory_id_fkey FOREIGN KEY (inventory_id)
|
||||
REFERENCES public.inventory (inventory_id) MATCH PARTIAL
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE RESTRICT,
|
||||
CONSTRAINT rental_staff_id_fkey FOREIGN KEY (staff_id)
|
||||
REFERENCES public.staff (staff_id) MATCH SIMPLE
|
||||
ON UPDATE CASCADE
|
||||
ON DELETE RESTRICT,
|
||||
INITIALLY IMMEDIATE
|
||||
)
|
||||
""",
|
||||
"CREATE TABLE IF NOT EXISTS public.rental (inventory_id INT NOT NULL, CONSTRAINT rental_customer_id_fkey FOREIGN KEY (customer_id) REFERENCES public.customer (customer_id) MATCH FULL ON UPDATE CASCADE ON DELETE RESTRICT, CONSTRAINT rental_inventory_id_fkey FOREIGN KEY (inventory_id) REFERENCES public.inventory (inventory_id) MATCH PARTIAL ON UPDATE CASCADE ON DELETE RESTRICT, CONSTRAINT rental_staff_id_fkey FOREIGN KEY (staff_id) REFERENCES public.staff (staff_id) MATCH SIMPLE ON UPDATE CASCADE ON DELETE RESTRICT, INITIALLY IMMEDIATE)",
|
||||
)
|
||||
|
||||
with self.assertRaises(ParseError):
|
||||
transpile("CREATE TABLE products (price DECIMAL CHECK price > 0)", read="postgres")
|
||||
with self.assertRaises(ParseError):
|
||||
|
|
|
@ -413,6 +413,19 @@ class TestPresto(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
self.validate_identity("DATE_ADD('DAY', FLOOR(5), y)")
|
||||
self.validate_identity(
|
||||
"""SELECT DATE_ADD('DAY', MOD(5, 2.5), y), DATE_ADD('DAY', CEIL(5.5), y)""",
|
||||
"""SELECT DATE_ADD('DAY', CAST(5 % 2.5 AS BIGINT), y), DATE_ADD('DAY', CAST(CEIL(5.5) AS BIGINT), y)""",
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"DATE_ADD('MINUTE', CAST(FLOOR(CAST(EXTRACT(MINUTE FROM CURRENT_TIMESTAMP) AS DOUBLE) / NULLIF(30, 0)) * 30 AS BIGINT), col)",
|
||||
read={
|
||||
"spark": "TIMESTAMPADD(MINUTE, FLOOR(EXTRACT(MINUTE FROM CURRENT_TIMESTAMP)/30)*30, col)",
|
||||
},
|
||||
)
|
||||
|
||||
def test_ddl(self):
|
||||
self.validate_all(
|
||||
"CREATE TABLE test WITH (FORMAT = 'PARQUET') AS SELECT 1",
|
||||
|
@ -942,8 +955,8 @@ class TestPresto(Validator):
|
|||
write={
|
||||
"bigquery": "SELECT * FROM UNNEST(['7', '14'])",
|
||||
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS x",
|
||||
"hive": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x",
|
||||
"spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x",
|
||||
"hive": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x",
|
||||
"spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
|
@ -951,8 +964,8 @@ class TestPresto(Validator):
|
|||
write={
|
||||
"bigquery": "SELECT * FROM UNNEST(['7', '14']) AS y",
|
||||
"presto": "SELECT * FROM UNNEST(ARRAY['7', '14']) AS x(y)",
|
||||
"hive": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x(y)",
|
||||
"spark": "SELECT * FROM UNNEST(ARRAY('7', '14')) AS x(y)",
|
||||
"hive": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x(y)",
|
||||
"spark": "SELECT * FROM EXPLODE(ARRAY('7', '14')) AS x(y)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
|
|
|
@ -11,6 +11,10 @@ class TestSnowflake(Validator):
|
|||
dialect = "snowflake"
|
||||
|
||||
def test_snowflake(self):
|
||||
self.validate_identity(
|
||||
"SELECT * FROM table AT (TIMESTAMP => '2024-07-24') UNPIVOT(a FOR b IN (c)) AS pivot_table"
|
||||
)
|
||||
|
||||
self.assertEqual(
|
||||
# Ensures we don't fail when generating ParseJSON with the `safe` arg set to `True`
|
||||
self.validate_identity("""SELECT TRY_PARSE_JSON('{"x: 1}')""").sql(),
|
||||
|
@ -827,6 +831,22 @@ WHERE
|
|||
},
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT('key5', 'value5'), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
|
||||
write={
|
||||
"snowflake": "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT('key5', 'value5'), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
|
||||
"duckdb": "SELECT STRUCT_INSERT(STRUCT_INSERT(STRUCT_INSERT({'key5': 'value5'}, key1 := 5), key2 := 2.2), key3 := 'value3')",
|
||||
},
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT(), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
|
||||
write={
|
||||
"snowflake": "SELECT OBJECT_INSERT(OBJECT_INSERT(OBJECT_INSERT(OBJECT_CONSTRUCT(), 'key1', 5), 'key2', 2.2), 'key3', 'value3')",
|
||||
"duckdb": "SELECT STRUCT_INSERT(STRUCT_INSERT(STRUCT_PACK(key1 := 5), key2 := 2.2), key3 := 'value3')",
|
||||
},
|
||||
)
|
||||
|
||||
def test_null_treatment(self):
|
||||
self.validate_all(
|
||||
r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1",
|
||||
|
@ -899,6 +919,11 @@ WHERE
|
|||
"SELECT * FROM @foo/bar (FILE_FORMAT => ds_sandbox.test.my_csv_format, PATTERN => 'test') AS bla",
|
||||
)
|
||||
|
||||
self.validate_identity(
|
||||
"SELECT * FROM @test.public.thing/location/somefile.csv( FILE_FORMAT => 'fmt' )",
|
||||
"SELECT * FROM @test.public.thing/location/somefile.csv (FILE_FORMAT => 'fmt')",
|
||||
)
|
||||
|
||||
def test_sample(self):
|
||||
self.validate_identity("SELECT * FROM testtable TABLESAMPLE BERNOULLI (20.3)")
|
||||
self.validate_identity("SELECT * FROM testtable TABLESAMPLE SYSTEM (3) SEED (82)")
|
||||
|
|
|
@ -391,6 +391,17 @@ class TestTSQL(Validator):
|
|||
self.validate_identity("HASHBYTES('MD2', 'x')")
|
||||
self.validate_identity("LOG(n, b)")
|
||||
|
||||
self.validate_all(
|
||||
"STDEV(x)",
|
||||
read={
|
||||
"": "STDDEV(x)",
|
||||
},
|
||||
write={
|
||||
"": "STDDEV(x)",
|
||||
"tsql": "STDEV(x)",
|
||||
},
|
||||
)
|
||||
|
||||
def test_option(self):
|
||||
possible_options = [
|
||||
"HASH GROUP",
|
||||
|
@ -888,6 +899,14 @@ class TestTSQL(Validator):
|
|||
},
|
||||
)
|
||||
|
||||
for colstore in ("NONCLUSTERED COLUMNSTORE", "CLUSTERED COLUMNSTORE"):
|
||||
self.validate_identity(f"CREATE {colstore} INDEX index_name ON foo.bar")
|
||||
|
||||
self.validate_identity(
|
||||
"CREATE COLUMNSTORE INDEX index_name ON foo.bar",
|
||||
"CREATE NONCLUSTERED COLUMNSTORE INDEX index_name ON foo.bar",
|
||||
)
|
||||
|
||||
def test_insert_cte(self):
|
||||
self.validate_all(
|
||||
"INSERT INTO foo.bar WITH cte AS (SELECT 1 AS one) SELECT * FROM cte",
|
||||
|
|
2
tests/fixtures/identity.sql
vendored
2
tests/fixtures/identity.sql
vendored
|
@ -204,6 +204,7 @@ USE ROLE x
|
|||
USE WAREHOUSE x
|
||||
USE DATABASE x
|
||||
USE SCHEMA x.y
|
||||
USE CATALOG abc
|
||||
NOT 1
|
||||
NOT NOT 1
|
||||
SELECT * FROM test
|
||||
|
@ -870,3 +871,4 @@ SELECT unnest
|
|||
SELECT * FROM a STRAIGHT_JOIN b
|
||||
SELECT COUNT(DISTINCT "foo bar") FROM (SELECT 1 AS "foo bar") AS t
|
||||
SELECT vector
|
||||
WITH all AS (SELECT 1 AS count) SELECT all.count FROM all
|
44
tests/fixtures/optimizer/optimizer.sql
vendored
44
tests/fixtures/optimizer/optimizer.sql
vendored
|
@ -547,7 +547,8 @@ FROM (
|
|||
"tb"."b" AS "b",
|
||||
"tb"."c" AS "c"
|
||||
FROM "sc"."tb" AS "tb"
|
||||
) AS "_q_0" PIVOT(SUM("_q_0"."c") FOR "_q_0"."b" IN ('x', 'y', 'z')) AS "_q_1";
|
||||
) AS "_q_0"
|
||||
PIVOT(SUM("_q_0"."c") FOR "_q_0"."b" IN ('x', 'y', 'z')) AS "_q_1";
|
||||
|
||||
# title: pivoted source with explicit selections where one of them is excluded & selected at the same time
|
||||
# note: we need to respect the exclude when selecting * from pivoted source and not include the computed column twice
|
||||
|
@ -564,7 +565,8 @@ FROM (
|
|||
"tb"."b" AS "b",
|
||||
"tb"."c" AS "c"
|
||||
FROM "sc"."tb" AS "tb"
|
||||
) AS "_q_0" PIVOT(SUM("_q_0"."c") FOR "_q_0"."b" IN ('x', 'y', 'z')) AS "_q_1";
|
||||
) AS "_q_0"
|
||||
PIVOT(SUM("_q_0"."c") FOR "_q_0"."b" IN ('x', 'y', 'z')) AS "_q_1";
|
||||
|
||||
# title: pivoted source with implicit selections
|
||||
# execute: false
|
||||
|
@ -579,7 +581,8 @@ FROM (
|
|||
"u"."g" AS "g",
|
||||
"u"."h" AS "h"
|
||||
FROM "u" AS "u"
|
||||
) AS "_q_0" PIVOT(SUM("_q_0"."f") FOR "_q_0"."h" IN ('x', 'y')) AS "_q_1";
|
||||
) AS "_q_0"
|
||||
PIVOT(SUM("_q_0"."f") FOR "_q_0"."h" IN ('x', 'y')) AS "_q_1";
|
||||
|
||||
# title: selecting explicit qualified columns from pivoted source with explicit selections
|
||||
# execute: false
|
||||
|
@ -592,7 +595,8 @@ FROM (
|
|||
"u"."f" AS "f",
|
||||
"u"."h" AS "h"
|
||||
FROM "u" AS "u"
|
||||
) AS "_q_0" PIVOT(SUM("_q_0"."f") FOR "_q_0"."h" IN ('x', 'y')) AS "piv";
|
||||
) AS "_q_0"
|
||||
PIVOT(SUM("_q_0"."f") FOR "_q_0"."h" IN ('x', 'y')) AS "piv";
|
||||
|
||||
# title: selecting explicit unqualified columns from pivoted source with implicit selections
|
||||
# execute: false
|
||||
|
@ -600,7 +604,8 @@ SELECT x, y FROM u PIVOT (SUM(f) FOR h IN ('x', 'y'));
|
|||
SELECT
|
||||
"_q_0"."x" AS "x",
|
||||
"_q_0"."y" AS "y"
|
||||
FROM "u" AS "u" PIVOT(SUM("u"."f") FOR "u"."h" IN ('x', 'y')) AS "_q_0";
|
||||
FROM "u" AS "u"
|
||||
PIVOT(SUM("u"."f") FOR "u"."h" IN ('x', 'y')) AS "_q_0";
|
||||
|
||||
# title: selecting all columns from a pivoted CTE source, using alias for the aggregation and generating bigquery
|
||||
# execute: false
|
||||
|
@ -617,7 +622,8 @@ SELECT
|
|||
`_q_0`.`g` AS `g`,
|
||||
`_q_0`.`sum_x` AS `sum_x`,
|
||||
`_q_0`.`sum_y` AS `sum_y`
|
||||
FROM `u_cte` AS `u_cte` PIVOT(SUM(`u_cte`.`f`) AS `sum` FOR `u_cte`.`h` IN ('x', 'y')) AS `_q_0`;
|
||||
FROM `u_cte` AS `u_cte`
|
||||
PIVOT(SUM(`u_cte`.`f`) AS `sum` FOR `u_cte`.`h` IN ('x', 'y')) AS `_q_0`;
|
||||
|
||||
# title: selecting all columns from a pivoted source and generating snowflake
|
||||
# execute: false
|
||||
|
@ -627,7 +633,8 @@ SELECT
|
|||
"_q_0"."G" AS "G",
|
||||
"_q_0"."'x'" AS "'x'",
|
||||
"_q_0"."'y'" AS "'y'"
|
||||
FROM "U" AS "U" PIVOT(SUM("U"."F") FOR "U"."H" IN ('x', 'y')) AS "_q_0";
|
||||
FROM "U" AS "U"
|
||||
PIVOT(SUM("U"."F") FOR "U"."H" IN ('x', 'y')) AS "_q_0";
|
||||
|
||||
# title: selecting all columns from a pivoted source and generating spark
|
||||
# note: spark doesn't allow pivot aliases or qualified columns for the pivot's "field" (`h`)
|
||||
|
@ -641,7 +648,8 @@ SELECT
|
|||
FROM (
|
||||
SELECT
|
||||
*
|
||||
FROM `u` AS `u` PIVOT(SUM(`u`.`f`) FOR `h` IN ('x', 'y'))
|
||||
FROM `u` AS `u`
|
||||
PIVOT(SUM(`u`.`f`) FOR `h` IN ('x', 'y'))
|
||||
) AS `_q_0`;
|
||||
|
||||
# title: selecting all columns from a pivoted source, pivot has column aliases
|
||||
|
@ -674,7 +682,8 @@ WITH "SOURCE" AS (
|
|||
SELECT
|
||||
"FINAL"."ID" AS "ID",
|
||||
"FINAL"."TIMESTAMP_1" AS "TIMESTAMP_1"
|
||||
FROM "SOURCE" AS "SOURCE" PIVOT(MAX("SOURCE"."VALUE") FOR "SOURCE"."KEY" IN ('a', 'b', 'c')) AS "FINAL"("ID", "TIMESTAMP_1", "TIMESTAMP_2", "COL_1", "COL_2", "COL_3");
|
||||
FROM "SOURCE" AS "SOURCE"
|
||||
PIVOT(MAX("SOURCE"."VALUE") FOR "SOURCE"."KEY" IN ('a', 'b', 'c')) AS "FINAL"("ID", "TIMESTAMP_1", "TIMESTAMP_2", "COL_1", "COL_2", "COL_3");
|
||||
|
||||
# title: unpivoted table source with a single value column, unpivot columns can't be qualified
|
||||
# execute: false
|
||||
|
@ -685,7 +694,8 @@ SELECT
|
|||
"_q_0"."DEPT" AS "DEPT",
|
||||
"_q_0"."MONTH" AS "MONTH",
|
||||
"_q_0"."SALES" AS "SALES"
|
||||
FROM "M_SALES" AS "M_SALES"("EMPID", "DEPT", "JAN", "FEB") UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
FROM "M_SALES" AS "M_SALES"("EMPID", "DEPT", "JAN", "FEB")
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
ORDER BY
|
||||
"_q_0"."EMPID";
|
||||
|
||||
|
@ -704,7 +714,8 @@ FROM (
|
|||
"m_sales"."jan" AS "jan",
|
||||
"m_sales"."feb" AS "feb"
|
||||
FROM "m_sales" AS "m_sales"
|
||||
) AS "m_sales" UNPIVOT("sales" FOR "month" IN ("m_sales"."jan", "m_sales"."feb")) AS "unpiv"("a", "b", "c", "d");
|
||||
) AS "m_sales"
|
||||
UNPIVOT("sales" FOR "month" IN ("m_sales"."jan", "m_sales"."feb")) AS "unpiv"("a", "b", "c", "d");
|
||||
|
||||
# title: unpivoted derived table source with a single value column
|
||||
# execute: false
|
||||
|
@ -722,20 +733,22 @@ FROM (
|
|||
"M_SALES"."JAN" AS "JAN",
|
||||
"M_SALES"."FEB" AS "FEB"
|
||||
FROM "M_SALES" AS "M_SALES"
|
||||
) AS "M_SALES" UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
) AS "M_SALES"
|
||||
UNPIVOT("SALES" FOR "MONTH" IN ("JAN", "FEB")) AS "_q_0"
|
||||
ORDER BY
|
||||
"_q_0"."EMPID";
|
||||
|
||||
# title: unpivoted table source with a single value column, unpivot columns can be qualified
|
||||
# execute: false
|
||||
# dialect: bigquery
|
||||
# note: the named columns aren't supported by BQ but we add them here to avoid defining a schema
|
||||
# note: the named columns aren not supported by BQ but we add them here to avoid defining a schema
|
||||
SELECT * FROM produce AS produce(product, q1, q2, q3, q4) UNPIVOT(sales FOR quarter IN (q1, q2, q3, q4));
|
||||
SELECT
|
||||
`_q_0`.`product` AS `product`,
|
||||
`_q_0`.`quarter` AS `quarter`,
|
||||
`_q_0`.`sales` AS `sales`
|
||||
FROM `produce` AS `produce` UNPIVOT(`sales` FOR `quarter` IN (`produce`.`q1`, `produce`.`q2`, `produce`.`q3`, `produce`.`q4`)) AS `_q_0`;
|
||||
FROM `produce` AS `produce`
|
||||
UNPIVOT(`sales` FOR `quarter` IN (`produce`.`q1`, `produce`.`q2`, `produce`.`q3`, `produce`.`q4`)) AS `_q_0`;
|
||||
|
||||
# title: unpivoted table source with multiple value columns
|
||||
# execute: false
|
||||
|
@ -746,7 +759,8 @@ SELECT
|
|||
`_q_0`.`semesters` AS `semesters`,
|
||||
`_q_0`.`first_half_sales` AS `first_half_sales`,
|
||||
`_q_0`.`second_half_sales` AS `second_half_sales`
|
||||
FROM `produce` AS `produce` UNPIVOT((`first_half_sales`, `second_half_sales`) FOR `semesters` IN ((`produce`.`q1`, `produce`.`q2`) AS 'semester_1', (`produce`.`q3`, `produce`.`q4`) AS 'semester_2')) AS `_q_0`;
|
||||
FROM `produce` AS `produce`
|
||||
UNPIVOT((`first_half_sales`, `second_half_sales`) FOR `semesters` IN ((`produce`.`q1`, `produce`.`q2`) AS 'semester_1', (`produce`.`q3`, `produce`.`q4`) AS 'semester_2')) AS `_q_0`;
|
||||
|
||||
# title: quoting is preserved
|
||||
# dialect: snowflake
|
||||
|
|
|
@ -1120,3 +1120,6 @@ FROM foo""",
|
|||
AssertionError, "x is not <class 'sqlglot.expressions.Identifier'>\\."
|
||||
):
|
||||
parse_one("x").assert_is(exp.Identifier)
|
||||
|
||||
def test_parse_identifier(self):
|
||||
self.assertEqual(exp.parse_identifier("a ' b"), exp.to_identifier("a ' b"))
|
||||
|
|
|
@ -561,6 +561,10 @@ FROM x""",
|
|||
AND Z""",
|
||||
"""SELECT X FROM catalog.db.table WHERE Y AND Z""",
|
||||
)
|
||||
self.validate(
|
||||
"""with a as /* comment */ ( select * from b) select * from a""",
|
||||
"""WITH a AS (SELECT * FROM b) /* comment */ SELECT * FROM a""",
|
||||
)
|
||||
|
||||
def test_types(self):
|
||||
self.validate("INT 1", "CAST(1 AS INT)")
|
||||
|
@ -760,7 +764,7 @@ FROM x""",
|
|||
self.validate("STR_TO_TIME('x', 'y')", "DATE_PARSE('x', 'y')", write="presto")
|
||||
self.validate(
|
||||
"STR_TO_UNIX('x', 'y')",
|
||||
"TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('x' AS VARCHAR), 'y')), PARSE_DATETIME(CAST('x' AS VARCHAR), 'y')))",
|
||||
"TO_UNIXTIME(COALESCE(TRY(DATE_PARSE(CAST('x' AS VARCHAR), 'y')), PARSE_DATETIME(DATE_FORMAT(CAST('x' AS TIMESTAMP), 'y'), 'y')))",
|
||||
write="presto",
|
||||
)
|
||||
self.validate("TIME_TO_STR(x, 'y')", "DATE_FORMAT(x, 'y')", write="presto")
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue