1
0
Fork 0

Adding upstream version 26.6.0.

Signed-off-by: Daniel Baumann <mail@daniel-baumann.ch>
This commit is contained in:
Daniel Baumann 2025-02-13 22:07:36 +01:00
parent cfc058b43a
commit 4b797b16f0
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
99 changed files with 40433 additions and 38803 deletions

View file

@ -234,6 +234,10 @@ LANGUAGE js AS
self.validate_identity(
"CREATE OR REPLACE VIEW test (tenant_id OPTIONS (description='Test description on table creation')) AS SELECT 1 AS tenant_id, 1 AS customer_id",
)
self.validate_identity(
"ARRAY(SELECT AS STRUCT e.x AS y, e.z AS bla FROM UNNEST(bob))::ARRAY<STRUCT<y STRING, bro NUMERIC>>",
"CAST(ARRAY(SELECT AS STRUCT e.x AS y, e.z AS bla FROM UNNEST(bob)) AS ARRAY<STRUCT<y STRING, bro NUMERIC>>)",
)
self.validate_identity(
"SELECT * FROM `proj.dataset.INFORMATION_SCHEMA.SOME_VIEW`",
"SELECT * FROM `proj.dataset.INFORMATION_SCHEMA.SOME_VIEW` AS `proj.dataset.INFORMATION_SCHEMA.SOME_VIEW`",
@ -1661,6 +1665,13 @@ WHERE
)
self.validate_identity("SELECT * FROM ML.FEATURES_AT_TIME((SELECT 1), num_rows => 1)")
self.validate_identity(
"EXPORT DATA OPTIONS (URI='gs://path*.csv.gz', FORMAT='CSV') AS SELECT * FROM all_rows"
)
self.validate_identity(
"EXPORT DATA WITH CONNECTION myproject.us.myconnection OPTIONS (URI='gs://path*.csv.gz', FORMAT='CSV') AS SELECT * FROM all_rows"
)
def test_errors(self):
with self.assertRaises(TokenError):
transpile("'\\'", read="bigquery")
@ -2035,10 +2046,10 @@ OPTIONS (
)
self.validate_all(
"SELECT ARRAY<INT>[1, 2, 3]",
"SELECT ARRAY<FLOAT64>[1, 2, 3]",
write={
"bigquery": "SELECT CAST([1, 2, 3] AS ARRAY<INT64>)",
"duckdb": "SELECT CAST([1, 2, 3] AS INT[])",
"bigquery": "SELECT ARRAY<FLOAT64>[1, 2, 3]",
"duckdb": "SELECT CAST([1, 2, 3] AS DOUBLE[])",
},
)
self.validate_all(
@ -2051,14 +2062,14 @@ OPTIONS (
self.validate_all(
"SELECT * FROM UNNEST(ARRAY<STRUCT<x INT64>>[])",
write={
"bigquery": "SELECT * FROM UNNEST(CAST([] AS ARRAY<STRUCT<x INT64>>))",
"bigquery": "SELECT * FROM UNNEST(ARRAY<STRUCT<x INT64>>[])",
"duckdb": "SELECT * FROM (SELECT UNNEST(CAST([] AS STRUCT(x BIGINT)[]), max_depth => 2))",
},
)
self.validate_all(
"SELECT * FROM UNNEST(ARRAY<STRUCT<device_id INT64, time DATETIME, signal INT64, state STRING>>[STRUCT(1, DATETIME '2023-11-01 09:34:01', 74, 'INACTIVE'),STRUCT(4, DATETIME '2023-11-01 09:38:01', 80, 'ACTIVE')])",
write={
"bigquery": "SELECT * FROM UNNEST(CAST([STRUCT(1, CAST('2023-11-01 09:34:01' AS DATETIME), 74, 'INACTIVE'), STRUCT(4, CAST('2023-11-01 09:38:01' AS DATETIME), 80, 'ACTIVE')] AS ARRAY<STRUCT<device_id INT64, time DATETIME, signal INT64, state STRING>>))",
"bigquery": "SELECT * FROM UNNEST(ARRAY<STRUCT<device_id INT64, time DATETIME, signal INT64, state STRING>>[STRUCT(1, CAST('2023-11-01 09:34:01' AS DATETIME), 74, 'INACTIVE'), STRUCT(4, CAST('2023-11-01 09:38:01' AS DATETIME), 80, 'ACTIVE')])",
"duckdb": "SELECT * FROM (SELECT UNNEST(CAST([ROW(1, CAST('2023-11-01 09:34:01' AS TIMESTAMP), 74, 'INACTIVE'), ROW(4, CAST('2023-11-01 09:38:01' AS TIMESTAMP), 80, 'ACTIVE')] AS STRUCT(device_id BIGINT, time TIMESTAMP, signal BIGINT, state TEXT)[]), max_depth => 2))",
},
)

View file

@ -98,6 +98,9 @@ class TestClickhouse(Validator):
self.validate_identity("TRUNCATE TABLE t1 ON CLUSTER test_cluster")
self.validate_identity("TRUNCATE DATABASE db")
self.validate_identity("TRUNCATE DATABASE db ON CLUSTER test_cluster")
self.validate_identity(
"SELECT DATE_BIN(toDateTime('2023-01-01 14:45:00'), INTERVAL '1' MINUTE, toDateTime('2023-01-01 14:35:30'), 'UTC')",
)
self.validate_identity(
"SELECT CAST(1730098800 AS DateTime64) AS DATETIME, 'test' AS interp ORDER BY DATETIME WITH FILL FROM toDateTime64(1730098800, 3) - INTERVAL '7' HOUR TO toDateTime64(1730185140, 3) - INTERVAL '7' HOUR STEP toIntervalSecond(900) INTERPOLATE (interp)"
)
@ -551,6 +554,9 @@ class TestClickhouse(Validator):
self.validate_identity("SELECT 1_2_3_4_5", "SELECT 12345")
self.validate_identity("SELECT 1_b", "SELECT 1_b")
self.validate_identity(
"SELECT COUNT(1) FROM table SETTINGS additional_table_filters = {'a': 'b', 'c': 'd'}"
)
def test_clickhouse_values(self):
values = exp.select("*").from_(

View file

@ -32,10 +32,6 @@ class TestDatabricks(Validator):
self.validate_identity(
"CREATE TABLE IF NOT EXISTS db.table (a TIMESTAMP, b BOOLEAN GENERATED ALWAYS AS (NOT a IS NULL)) USING DELTA"
)
self.validate_identity(
"SELECT DATE_FORMAT(CAST(FROM_UTC_TIMESTAMP(foo, 'America/Los_Angeles') AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss') AS foo FROM t",
"SELECT DATE_FORMAT(CAST(FROM_UTC_TIMESTAMP(CAST(foo AS TIMESTAMP), 'America/Los_Angeles') AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss') AS foo FROM t",
)
self.validate_identity(
"SELECT * FROM sales UNPIVOT INCLUDE NULLS (sales FOR quarter IN (q1 AS `Jan-Mar`))"
)
@ -54,6 +50,10 @@ class TestDatabricks(Validator):
self.validate_identity(
"COPY INTO target FROM `s3://link` FILEFORMAT = AVRO VALIDATE = ALL FILES = ('file1', 'file2') FORMAT_OPTIONS ('opt1'='true', 'opt2'='test') COPY_OPTIONS ('mergeSchema'='true')"
)
self.validate_identity(
"SELECT DATE_FORMAT(CAST(FROM_UTC_TIMESTAMP(foo, 'America/Los_Angeles') AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss') AS foo FROM t",
"SELECT DATE_FORMAT(CAST(FROM_UTC_TIMESTAMP(CAST(foo AS TIMESTAMP), 'America/Los_Angeles') AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss') AS foo FROM t",
)
self.validate_identity(
"DATE_DIFF(day, created_at, current_date())",
"DATEDIFF(DAY, created_at, CURRENT_DATE)",
@ -62,6 +62,10 @@ class TestDatabricks(Validator):
r'SELECT r"\\foo.bar\"',
r"SELECT '\\\\foo.bar\\'",
)
self.validate_identity(
"FROM_UTC_TIMESTAMP(x::TIMESTAMP, tz)",
"FROM_UTC_TIMESTAMP(CAST(x AS TIMESTAMP), tz)",
)
self.validate_all(
"CREATE TABLE foo (x INT GENERATED ALWAYS AS (YEAR(y)))",

View file

@ -83,11 +83,20 @@ class TestDialect(Validator):
maxDiff = None
def test_enum(self):
dialect_by_key = Dialect.classes
for dialect in Dialects:
self.assertIsNotNone(Dialect[dialect])
self.assertIsNotNone(Dialect.get(dialect))
self.assertIsNotNone(Dialect.get_or_raise(dialect))
self.assertIsNotNone(Dialect[dialect.value])
self.assertIn(dialect, dialect_by_key)
def test_lazy_load(self):
import subprocess
code = "import sqlglot; assert len(sqlglot.Dialect._classes) == 1; print('Success')"
result = subprocess.run(["python", "-c", code], capture_output=True, text=True)
assert "Success" in result.stdout
def test_get_or_raise(self):
self.assertIsInstance(Dialect.get_or_raise(Hive), Hive)
@ -2624,6 +2633,8 @@ SELECT
"snowflake": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"sqlite": "SELECT SUM(IIF(col % 2 = 0, 1, 0)) FROM foo",
"tsql": "SELECT COUNT_IF(col % 2 = 0) FROM foo",
"postgres": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FROM foo",
"redshift": "SELECT SUM(CASE WHEN col % 2 = 0 THEN 1 ELSE 0 END) FROM foo",
},
)
self.validate_all(
@ -3173,6 +3184,7 @@ FROM subquery2""",
"postgres": "GEN_RANDOM_UUID()",
"bigquery": "GENERATE_UUID()",
"snowflake": "UUID_STRING()",
"tsql": "NEWID()",
},
write={
"hive": "UUID()",
@ -3186,6 +3198,7 @@ FROM subquery2""",
"postgres": "GEN_RANDOM_UUID()",
"bigquery": "GENERATE_UUID()",
"snowflake": "UUID_STRING()",
"tsql": "NEWID()",
},
)
@ -3306,3 +3319,19 @@ FROM subquery2""",
"postgres": f"PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY x){suffix}",
},
)
def test_current_schema(self):
self.validate_all(
"CURRENT_SCHEMA()",
read={
"mysql": "SCHEMA()",
"postgres": "CURRENT_SCHEMA()",
"tsql": "SCHEMA_NAME()",
},
write={
"sqlite": "'main'",
"mysql": "SCHEMA()",
"postgres": "CURRENT_SCHEMA",
"tsql": "SCHEMA_NAME()",
},
)

View file

@ -256,6 +256,7 @@ class TestDuckDB(Validator):
parse_one("a // b", read="duckdb").assert_is(exp.IntDiv).sql(dialect="duckdb"), "a // b"
)
self.validate_identity("CAST(x AS FOO)")
self.validate_identity("SELECT UNNEST([1, 2])").selects[0].assert_is(exp.UDTF)
self.validate_identity("'red' IN flags").args["field"].assert_is(exp.Column)
self.validate_identity("'red' IN tbl.flags")

View file

@ -0,0 +1,23 @@
from tests.dialects.test_dialect import Validator
class TestDune(Validator):
dialect = "dune"
def test_dune(self):
self.validate_identity("CAST(x AS INT256)")
self.validate_identity("CAST(x AS UINT256)")
self.validate_all(
"SELECT 0xdeadbeef",
read={
"dune": "SELECT X'deadbeef'",
"postgres": "SELECT x'deadbeef'",
"trino": "SELECT X'deadbeef'",
},
write={
"dune": "SELECT 0xdeadbeef",
"postgres": "SELECT x'deadbeef'",
"trino": "SELECT X'deadbeef'",
},
)

View file

@ -336,8 +336,8 @@ class TestHive(Validator):
"bigquery": "FORMAT_DATE('%Y-%m-%d %H:%M:%S', CAST('2020-01-01' AS DATETIME))",
"duckdb": "STRFTIME(CAST('2020-01-01' AS TIMESTAMP), '%Y-%m-%d %H:%M:%S')",
"presto": "DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP), '%Y-%m-%d %T')",
"hive": "DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss')",
"spark": "DATE_FORMAT(CAST('2020-01-01' AS TIMESTAMP), 'yyyy-MM-dd HH:mm:ss')",
"hive": "DATE_FORMAT('2020-01-01', 'yyyy-MM-dd HH:mm:ss')",
"spark": "DATE_FORMAT('2020-01-01', 'yyyy-MM-dd HH:mm:ss')",
},
)
self.validate_all(
@ -758,7 +758,7 @@ class TestHive(Validator):
self.validate_all(
"SELECT a, SUM(c) FROM t GROUP BY a, DATE_FORMAT(b, 'yyyy'), GROUPING SETS ((a, DATE_FORMAT(b, 'yyyy')), a)",
write={
"hive": "SELECT a, SUM(c) FROM t GROUP BY a, DATE_FORMAT(CAST(b AS TIMESTAMP), 'yyyy'), GROUPING SETS ((a, DATE_FORMAT(CAST(b AS TIMESTAMP), 'yyyy')), a)",
"hive": "SELECT a, SUM(c) FROM t GROUP BY a, DATE_FORMAT(b, 'yyyy'), GROUPING SETS ((a, DATE_FORMAT(b, 'yyyy')), a)",
},
)
self.validate_all(

View file

@ -253,8 +253,7 @@ class TestMySQL(Validator):
self.validate_identity("SET GLOBAL TRANSACTION ISOLATION LEVEL SERIALIZABLE")
self.validate_identity("SET TRANSACTION READ ONLY")
self.validate_identity("SET GLOBAL TRANSACTION ISOLATION LEVEL REPEATABLE READ, READ WRITE")
self.validate_identity("SELECT SCHEMA()")
self.validate_identity("SELECT DATABASE()")
self.validate_identity("DATABASE()", "SCHEMA()")
self.validate_identity(
"SET GLOBAL sort_buffer_size = 1000000, SESSION sort_buffer_size = 1000000"
)

View file

@ -16,6 +16,7 @@ class TestOracle(Validator):
)
self.parse_one("ALTER TABLE tbl_name DROP FOREIGN KEY fk_symbol").assert_is(exp.Alter)
self.validate_identity("CAST(value AS NUMBER DEFAULT 0 ON CONVERSION ERROR)")
self.validate_identity("SYSDATE")
self.validate_identity("CREATE GLOBAL TEMPORARY TABLE t AS SELECT * FROM orders")
self.validate_identity("CREATE PRIVATE TEMPORARY TABLE t AS SELECT * FROM orders")
@ -79,6 +80,10 @@ class TestOracle(Validator):
self.validate_identity(
"SELECT MIN(column_name) KEEP (DENSE_RANK FIRST ORDER BY column_name DESC) FROM table_name"
)
self.validate_identity(
"SELECT CAST('January 15, 1989, 11:00 A.M.' AS DATE DEFAULT NULL ON CONVERSION ERROR, 'Month dd, YYYY, HH:MI A.M.') FROM DUAL",
"SELECT TO_DATE('January 15, 1989, 11:00 A.M.', 'Month dd, YYYY, HH12:MI P.M.') FROM DUAL",
)
self.validate_identity(
"SELECT TRUNC(SYSDATE)",
"SELECT TRUNC(SYSDATE, 'DD')",
@ -300,6 +305,14 @@ class TestOracle(Validator):
"SELECT /*+ ORDERED */ * /* test */ FROM tbl",
)
self.validate_all(
"SELECT * FROM t FETCH FIRST 10 ROWS ONLY",
write={
"oracle": "SELECT * FROM t FETCH FIRST 10 ROWS ONLY",
"tsql": "SELECT * FROM t ORDER BY (SELECT NULL) OFFSET 0 ROWS FETCH FIRST 10 ROWS ONLY",
},
)
def test_join_marker(self):
self.validate_identity("SELECT e1.x, e2.x FROM e e1, e e2 WHERE e1.y (+) = e2.y")

View file

@ -73,6 +73,7 @@ class TestPostgres(Validator):
self.validate_identity("SELECT * FROM r CROSS JOIN LATERAL UNNEST(ARRAY[1]) AS s(location)")
self.validate_identity("CAST(1 AS DECIMAL) / CAST(2 AS DECIMAL) * -100")
self.validate_identity("EXEC AS myfunc @id = 123", check_command_warning=True)
self.validate_identity("SELECT CURRENT_SCHEMA")
self.validate_identity("SELECT CURRENT_USER")
self.validate_identity("SELECT * FROM ONLY t1")
self.validate_identity(
@ -144,10 +145,6 @@ class TestPostgres(Validator):
"SELECT ARRAY[1, 2, 3] <@ ARRAY[1, 2]",
"SELECT ARRAY[1, 2] @> ARRAY[1, 2, 3]",
)
self.validate_identity(
"SELECT ARRAY[]::INT[] AS foo",
"SELECT CAST(ARRAY[] AS INT[]) AS foo",
)
self.validate_identity(
"SELECT DATE_PART('isodow'::varchar(6), current_date)",
"SELECT EXTRACT(CAST('isodow' AS VARCHAR(6)) FROM CURRENT_DATE)",
@ -349,6 +346,13 @@ class TestPostgres(Validator):
"CAST(x AS BIGINT)",
)
self.validate_all(
"SELECT ARRAY[]::INT[] AS foo",
write={
"postgres": "SELECT CAST(ARRAY[] AS INT[]) AS foo",
"duckdb": "SELECT CAST([] AS INT[]) AS foo",
},
)
self.validate_all(
"STRING_TO_ARRAY('xx~^~yy~^~zz', '~^~', 'yy')",
read={
@ -837,6 +841,30 @@ class TestPostgres(Validator):
"SELECT PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY a) FILTER(WHERE CAST(b AS BOOLEAN)) AS mean_value FROM (VALUES (0, 't')) AS fake_data(a, b)"
)
self.validate_all(
"SELECT JSON_OBJECT_AGG(k, v) FROM t",
write={
"postgres": "SELECT JSON_OBJECT_AGG(k, v) FROM t",
"duckdb": "SELECT JSON_GROUP_OBJECT(k, v) FROM t",
},
)
self.validate_all(
"SELECT JSONB_OBJECT_AGG(k, v) FROM t",
write={
"postgres": "SELECT JSONB_OBJECT_AGG(k, v) FROM t",
"duckdb": "SELECT JSON_GROUP_OBJECT(k, v) FROM t",
},
)
self.validate_all(
"SELECT DATE_BIN('30 days', timestamp_col, (SELECT MIN(TIMESTAMP) from table)) FROM table",
write={
"postgres": "SELECT DATE_BIN('30 days', timestamp_col, (SELECT MIN(TIMESTAMP) FROM table)) FROM table",
"duckdb": 'SELECT TIME_BUCKET(\'30 days\', timestamp_col, (SELECT MIN(TIMESTAMP) FROM "table")) FROM "table"',
},
)
def test_ddl(self):
# Checks that user-defined types are parsed into DataType instead of Identifier
self.parse_one("CREATE TABLE t (a udt)").this.expressions[0].args["kind"].assert_is(
@ -1060,7 +1088,8 @@ class TestPostgres(Validator):
"duckdb": "CREATE TABLE x (a UUID, b BLOB)",
"presto": "CREATE TABLE x (a UUID, b VARBINARY)",
"hive": "CREATE TABLE x (a UUID, b BINARY)",
"spark": "CREATE TABLE x (a UUID, b BINARY)",
"spark": "CREATE TABLE x (a STRING, b BINARY)",
"tsql": "CREATE TABLE x (a UNIQUEIDENTIFIER, b VARBINARY)",
},
)

View file

@ -104,7 +104,7 @@ class TestPresto(Validator):
self.validate_all(
"CAST(ARRAY[1, 2] AS ARRAY(BIGINT))",
write={
"bigquery": "CAST([1, 2] AS ARRAY<INT64>)",
"bigquery": "ARRAY<INT64>[1, 2]",
"duckdb": "CAST([1, 2] AS BIGINT[])",
"presto": "CAST(ARRAY[1, 2] AS ARRAY(BIGINT))",
"spark": "CAST(ARRAY(1, 2) AS ARRAY<BIGINT>)",
@ -406,7 +406,7 @@ class TestPresto(Validator):
},
)
self.validate_all(
"SELECT AT_TIMEZONE(CAST(CAST('2012-10-31 00:00' AS TIMESTAMP WITH TIME ZONE) AS TIMESTAMP), 'America/Sao_Paulo')",
"SELECT AT_TIMEZONE(CAST('2012-10-31 00:00' AS TIMESTAMP WITH TIME ZONE), 'America/Sao_Paulo')",
read={
"spark": "SELECT FROM_UTC_TIMESTAMP(TIMESTAMP '2012-10-31 00:00', 'America/Sao_Paulo')",
},

View file

@ -320,6 +320,7 @@ class TestRedshift(Validator):
)
def test_identity(self):
self.validate_identity("ALTER TABLE table_name ALTER COLUMN bla TYPE VARCHAR")
self.validate_identity("SELECT CAST(value AS FLOAT(8))")
self.validate_identity("1 div", "1 AS div")
self.validate_identity("LISTAGG(DISTINCT foo, ', ')")

View file

@ -979,6 +979,8 @@ class TestSnowflake(Validator):
self.validate_identity("SELECT BIT_SHIFTLEFT(a, 1)", "SELECT BITSHIFTLEFT(a, 1)")
self.validate_identity("SELECT BIT_SHIFTRIGHT(a, 1)", "SELECT BITSHIFTRIGHT(a, 1)")
self.validate_identity("CREATE TABLE t (id INT PRIMARY KEY AUTOINCREMENT)")
def test_null_treatment(self):
self.validate_all(
r"SELECT FIRST_VALUE(TABLE1.COLUMN1) OVER (PARTITION BY RANDOM_COLUMN1, RANDOM_COLUMN2 ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS MY_ALIAS FROM TABLE1",

View file

@ -150,6 +150,8 @@ TBLPROPERTIES (
"hive": "TO_DATE(x)",
"presto": "CAST(CAST(x AS TIMESTAMP) AS DATE)",
"spark": "TO_DATE(x)",
"snowflake": "TRY_TO_DATE(x, 'yyyy-mm-DD')",
"databricks": "TO_DATE(x)",
},
)
self.validate_all(
@ -159,6 +161,8 @@ TBLPROPERTIES (
"hive": "TO_DATE(x, 'yyyy')",
"presto": "CAST(DATE_PARSE(x, '%Y') AS DATE)",
"spark": "TO_DATE(x, 'yyyy')",
"snowflake": "TRY_TO_DATE(x, 'yyyy')",
"databricks": "TO_DATE(x, 'yyyy')",
},
)
@ -342,7 +346,7 @@ TBLPROPERTIES (
"SELECT DATE_FORMAT(DATE '2020-01-01', 'EEEE') AS weekday",
write={
"presto": "SELECT DATE_FORMAT(CAST(CAST('2020-01-01' AS DATE) AS TIMESTAMP), '%W') AS weekday",
"spark": "SELECT DATE_FORMAT(CAST(CAST('2020-01-01' AS DATE) AS TIMESTAMP), 'EEEE') AS weekday",
"spark": "SELECT DATE_FORMAT(CAST('2020-01-01' AS DATE), 'EEEE') AS weekday",
},
)
self.validate_all(

View file

@ -7,6 +7,8 @@ class TestTrino(Validator):
def test_trino(self):
self.validate_identity("JSON_EXTRACT(content, json_path)")
self.validate_identity("JSON_QUERY(content, 'lax $.HY.*')")
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITH WRAPPER)")
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITH ARRAY WRAPPER)")
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITH UNCONDITIONAL WRAPPER)")
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITHOUT CONDITIONAL WRAPPER)")
self.validate_identity("JSON_QUERY(description, 'strict $.comment' KEEP QUOTES)")

View file

@ -443,6 +443,32 @@ class TestTSQL(Validator):
"CREATE TABLE db.t1 (a INTEGER, b INTEGER, CONSTRAINT c PRIMARY KEY (a DESC, b))"
)
self.validate_all(
"SCHEMA_NAME(id)",
write={
"sqlite": "'main'",
"mysql": "SCHEMA()",
"postgres": "CURRENT_SCHEMA",
"tsql": "SCHEMA_NAME(id)",
},
)
with self.assertRaises(ParseError):
parse_one("SELECT begin", read="tsql")
self.validate_identity("CREATE PROCEDURE test(@v1 INTEGER = 1, @v2 CHAR(1) = 'c')")
self.validate_identity("DECLARE @v1 AS INTEGER = 1, @v2 AS CHAR(1) = 'c'")
for output in ("OUT", "OUTPUT", "READ_ONLY"):
self.validate_identity(
f"CREATE PROCEDURE test(@v1 INTEGER = 1 {output}, @v2 CHAR(1) {output})"
)
self.validate_identity(
"CREATE PROCEDURE test(@v1 AS INTEGER = 1, @v2 AS CHAR(1) = 'c')",
"CREATE PROCEDURE test(@v1 INTEGER = 1, @v2 CHAR(1) = 'c')",
)
def test_option(self):
possible_options = [
"HASH GROUP",
@ -900,6 +926,16 @@ class TestTSQL(Validator):
},
write={
"databricks": "CREATE TABLE tbl (id BIGINT NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 10 INCREMENT BY 1) PRIMARY KEY)",
"postgres": "CREATE TABLE tbl (id INT NOT NULL GENERATED BY DEFAULT AS IDENTITY (START WITH 10 INCREMENT BY 1) PRIMARY KEY)",
},
)
self.validate_all(
"CREATE TABLE x (a UNIQUEIDENTIFIER, b VARBINARY)",
write={
"duckdb": "CREATE TABLE x (a UUID, b BLOB)",
"presto": "CREATE TABLE x (a UUID, b VARBINARY)",
"spark": "CREATE TABLE x (a STRING, b BINARY)",
"postgres": "CREATE TABLE x (a UUID, b BYTEA)",
},
)
self.validate_all(

View file

@ -885,3 +885,7 @@ SELECT attach
SELECT detach
SELECT 1 OFFSET 1
SELECT 1 LIMIT 1
CAST(x AS INT128)
CAST(x AS UINT128)
CAST(x AS UINT256)
SELECT export

View file

@ -254,6 +254,11 @@ START WITH (t1.id IS NOT NULL)
CONNECT BY PRIOR t1.id = t2.id;
WITH T1 AS (SELECT 1 AS C1, 1 AS C2, 'Y' AS TOP_PARENT_INDICATOR, 1 AS ID FROM DUAL DUAL), T2 AS (SELECT 1 AS C2, 2 AS ID FROM DUAL DUAL) SELECT T1.C1 AS C1 FROM T1 T1 LEFT JOIN T2 T2 ON T1.C2 = T2.C2 WHERE (T1.TOP_PARENT_INDICATOR = 'Y' OR LEVEL = 1) START WITH (NOT T1.ID IS NULL) CONNECT BY PRIOR T1.ID = T2.ID;
# execute: false
# dialect: postgres
SELECT * FROM ROWS FROM (GENERATE_SERIES(1, 3), GENERATE_SERIES(10, 12)) AS t(a, b);
SELECT t.a AS a, t.b AS b FROM ROWS FROM (GENERATE_SERIES(1, 3), GENERATE_SERIES(10, 12)) AS t(a, b);
--------------------------------------
-- Derived tables
--------------------------------------

View file

@ -556,10 +556,10 @@ TPCDS_SCHEMA = {
def rewrite_fixtures(in_path, out_path, schema, num, kind):
with open(out_path, "w", encoding="UTF-8") as fixture:
with open(out_path, "w", encoding="utf-8") as fixture:
for i in range(num):
i = i + 1
with open(in_path.format(i=i), encoding="UTF-8") as file:
with open(in_path.format(i=i), encoding="utf-8") as file:
original = "\n".join(
line.rstrip()
for line in file.read().split(";")[0].split("\n")

View file

@ -55,7 +55,6 @@ class TestExpressions(unittest.TestCase):
parse_one("ROW() OVER(Partition by y)"),
parse_one("ROW() OVER (partition BY y)"),
)
self.assertEqual(parse_one("TO_DATE(x)", read="hive"), parse_one("ts_or_ds_to_date(x)"))
self.assertEqual(exp.Table(pivots=[]), exp.Table())
self.assertNotEqual(exp.Table(pivots=[None]), exp.Table())
self.assertEqual(

View file

@ -40,7 +40,7 @@ class TestJsonpath(unittest.TestCase):
self.assertEqual(parse(selector).sql(), f"'{expected}'")
def test_cts_file(self):
with open(os.path.join(FIXTURES_DIR, "jsonpath", "cts.json")) as file:
with open(os.path.join(FIXTURES_DIR, "jsonpath", "cts.json"), encoding="utf-8") as file:
tests = json.load(file)["tests"]
# sqlglot json path generator rewrites to a normal form

View file

@ -174,6 +174,13 @@ class TestTransforms(unittest.TestCase):
def test_eliminate_join_marks(self):
for dialect in ("oracle", "redshift"):
# No join marks => query remains unaffected
self.validate(
eliminate_join_marks,
"SELECT a.f1, b.f2 FROM a JOIN b ON a.id = b.id WHERE a.blabla = 'a'",
"SELECT a.f1, b.f2 FROM a JOIN b ON a.id = b.id WHERE a.blabla = 'a'",
dialect,
)
self.validate(
eliminate_join_marks,
"SELECT T1.d, T2.c FROM T1, T2 WHERE T1.x = T2.x (+) and T2.y (+) > 5",

View file

@ -610,7 +610,7 @@ FROM tbl1""",
self.validate("CAST(x AS INT)::BOOLEAN", "CAST(CAST(x AS INT) AS BOOLEAN)")
with self.assertRaises(ParseError):
transpile("x::z", read="duckdb")
transpile("x::z", read="clickhouse")
def test_not_range(self):
self.validate("a NOT LIKE b", "NOT a LIKE b")