1
0
Fork 0

Adding upstream version 17.3.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 20:43:42 +01:00
parent fbce006e29
commit d28371fcc4
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
43 changed files with 5488 additions and 5047 deletions

View file

@ -107,6 +107,27 @@ class TestBigQuery(Validator):
self.validate_all("CAST(x AS NVARCHAR)", write={"bigquery": "CAST(x AS STRING)"})
self.validate_all("CAST(x AS TIMESTAMPTZ)", write={"bigquery": "CAST(x AS TIMESTAMP)"})
self.validate_all("CAST(x AS RECORD)", write={"bigquery": "CAST(x AS STRUCT)"})
self.validate_all(
"MD5(x)",
write={
"": "MD5_DIGEST(x)",
"bigquery": "MD5(x)",
"hive": "UNHEX(MD5(x))",
"spark": "UNHEX(MD5(x))",
},
)
self.validate_all(
"SELECT TO_HEX(MD5(some_string))",
read={
"duckdb": "SELECT MD5(some_string)",
"spark": "SELECT MD5(some_string)",
},
write={
"": "SELECT MD5(some_string)",
"bigquery": "SELECT TO_HEX(MD5(some_string))",
"duckdb": "SELECT MD5(some_string)",
},
)
self.validate_all(
"SELECT CAST('20201225' AS TIMESTAMP FORMAT 'YYYYMMDD' AT TIME ZONE 'America/New_York')",
write={"bigquery": "SELECT PARSE_TIMESTAMP('%Y%m%d', '20201225', 'America/New_York')"},
@ -263,7 +284,7 @@ class TestBigQuery(Validator):
"duckdb": "CAST(a AS BIGINT)",
"presto": "CAST(a AS BIGINT)",
"hive": "CAST(a AS BIGINT)",
"spark": "CAST(a AS LONG)",
"spark": "CAST(a AS BIGINT)",
},
)
self.validate_all(
@ -413,7 +434,7 @@ class TestBigQuery(Validator):
"duckdb": "CREATE TABLE db.example_table (col_a STRUCT(struct_col_a BIGINT, struct_col_b STRUCT(nested_col_a TEXT, nested_col_b TEXT)))",
"presto": "CREATE TABLE db.example_table (col_a ROW(struct_col_a BIGINT, struct_col_b ROW(nested_col_a VARCHAR, nested_col_b VARCHAR)))",
"hive": "CREATE TABLE db.example_table (col_a STRUCT<struct_col_a BIGINT, struct_col_b STRUCT<nested_col_a STRING, nested_col_b STRING>>)",
"spark": "CREATE TABLE db.example_table (col_a STRUCT<struct_col_a: LONG, struct_col_b: STRUCT<nested_col_a: STRING, nested_col_b: STRING>>)",
"spark": "CREATE TABLE db.example_table (col_a STRUCT<struct_col_a: BIGINT, struct_col_b: STRUCT<nested_col_a: STRING, nested_col_b: STRING>>)",
},
)
self.validate_all(

View file

@ -218,7 +218,7 @@ class TestDialect(Validator):
"presto": "CAST(a AS SMALLINT)",
"redshift": "CAST(a AS SMALLINT)",
"snowflake": "CAST(a AS SMALLINT)",
"spark": "CAST(a AS SHORT)",
"spark": "CAST(a AS SMALLINT)",
"sqlite": "CAST(a AS INTEGER)",
"starrocks": "CAST(a AS SMALLINT)",
},

View file

@ -532,7 +532,7 @@ class TestDuckDB(Validator):
"duckdb": "CAST(COL AS BIGINT[])",
"presto": "CAST(COL AS ARRAY(BIGINT))",
"hive": "CAST(COL AS ARRAY<BIGINT>)",
"spark": "CAST(COL AS ARRAY<LONG>)",
"spark": "CAST(COL AS ARRAY<BIGINT>)",
"postgres": "CAST(COL AS BIGINT[])",
"snowflake": "CAST(COL AS ARRAY)",
},

View file

@ -73,7 +73,7 @@ class TestHive(Validator):
"duckdb": "TRY_CAST(1 AS SMALLINT)",
"presto": "TRY_CAST(1 AS SMALLINT)",
"hive": "CAST(1 AS SMALLINT)",
"spark": "CAST(1 AS SHORT)",
"spark": "CAST(1 AS SMALLINT)",
},
)
self.validate_all(
@ -82,7 +82,7 @@ class TestHive(Validator):
"duckdb": "TRY_CAST(1 AS SMALLINT)",
"presto": "TRY_CAST(1 AS SMALLINT)",
"hive": "CAST(1 AS SMALLINT)",
"spark": "CAST(1 AS SHORT)",
"spark": "CAST(1 AS SMALLINT)",
},
)
self.validate_all(
@ -91,7 +91,7 @@ class TestHive(Validator):
"duckdb": "TRY_CAST(1 AS TINYINT)",
"presto": "TRY_CAST(1 AS TINYINT)",
"hive": "CAST(1 AS TINYINT)",
"spark": "CAST(1 AS BYTE)",
"spark": "CAST(1 AS TINYINT)",
},
)
self.validate_all(
@ -100,7 +100,7 @@ class TestHive(Validator):
"duckdb": "TRY_CAST(1 AS BIGINT)",
"presto": "TRY_CAST(1 AS BIGINT)",
"hive": "CAST(1 AS BIGINT)",
"spark": "CAST(1 AS LONG)",
"spark": "CAST(1 AS BIGINT)",
},
)
self.validate_all(

View file

@ -121,6 +121,7 @@ class TestPostgres(Validator):
)
def test_postgres(self):
self.validate_identity("CAST(x AS MONEY)")
self.validate_identity("CAST(x AS INT4RANGE)")
self.validate_identity("CAST(x AS INT4MULTIRANGE)")
self.validate_identity("CAST(x AS INT8RANGE)")

View file

@ -65,7 +65,7 @@ class TestPresto(Validator):
"bigquery": "CAST([1, 2] AS ARRAY<INT64>)",
"duckdb": "CAST(LIST_VALUE(1, 2) AS BIGINT[])",
"presto": "CAST(ARRAY[1, 2] AS ARRAY(BIGINT))",
"spark": "CAST(ARRAY(1, 2) AS ARRAY<LONG>)",
"spark": "CAST(ARRAY(1, 2) AS ARRAY<BIGINT>)",
"snowflake": "CAST([1, 2] AS ARRAY)",
},
)

View file

@ -232,6 +232,13 @@ TBLPROPERTIES (
self.validate_identity("TRIM(TRAILING 'SL' FROM 'SSparkSQLS')")
self.validate_identity("SPLIT(str, pattern, lim)")
self.validate_all(
"UNHEX(MD5(x))",
write={
"bigquery": "FROM_HEX(TO_HEX(MD5(x)))",
"spark": "UNHEX(MD5(x))",
},
)
self.validate_all(
"SELECT * FROM ((VALUES 1))", write={"spark": "SELECT * FROM (VALUES (1))"}
)

View file

@ -95,7 +95,7 @@ class TestSQLite(Validator):
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={
"sqlite": 'SELECT CAST("a"."b" AS INTEGER) FROM foo',
"spark": "SELECT CAST(`a`.`b` AS SHORT) FROM foo",
"spark": "SELECT CAST(`a`.`b` AS SMALLINT) FROM foo",
},
)
self.validate_all(

View file

@ -43,7 +43,7 @@ class TestTSQL(Validator):
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={
"tsql": 'SELECT CAST("a"."b" AS SMALLINT) FROM foo',
"spark": "SELECT CAST(`a`.`b` AS SHORT) FROM foo",
"spark": "SELECT CAST(`a`.`b` AS SMALLINT) FROM foo",
},
)
self.validate_all(
@ -84,7 +84,7 @@ class TestTSQL(Validator):
"SELECT CAST([a].[b] AS SMALLINT) FROM foo",
write={
"tsql": 'SELECT CAST("a"."b" AS SMALLINT) FROM foo',
"spark": "SELECT CAST(`a`.`b` AS SHORT) FROM foo",
"spark": "SELECT CAST(`a`.`b` AS SMALLINT) FROM foo",
},
)
self.validate_all(
@ -155,6 +155,211 @@ class TestTSQL(Validator):
},
)
def test__types_ints(self):
self.validate_all(
"CAST(X AS INT)",
write={
"hive": "CAST(X AS INT)",
"spark2": "CAST(X AS INT)",
"spark": "CAST(X AS INT)",
"tsql": "CAST(X AS INTEGER)",
},
)
self.validate_all(
"CAST(X AS BIGINT)",
write={
"hive": "CAST(X AS BIGINT)",
"spark2": "CAST(X AS BIGINT)",
"spark": "CAST(X AS BIGINT)",
"tsql": "CAST(X AS BIGINT)",
},
)
self.validate_all(
"CAST(X AS SMALLINT)",
write={
"hive": "CAST(X AS SMALLINT)",
"spark2": "CAST(X AS SMALLINT)",
"spark": "CAST(X AS SMALLINT)",
"tsql": "CAST(X AS SMALLINT)",
},
)
self.validate_all(
"CAST(X AS TINYINT)",
write={
"hive": "CAST(X AS TINYINT)",
"spark2": "CAST(X AS TINYINT)",
"spark": "CAST(X AS TINYINT)",
"tsql": "CAST(X AS TINYINT)",
},
)
def test_types_decimals(self):
self.validate_all(
"CAST(x as FLOAT)",
write={
"spark": "CAST(x AS FLOAT)",
"tsql": "CAST(x AS FLOAT)",
},
)
self.validate_all(
"CAST(x as DOUBLE)",
write={
"spark": "CAST(x AS DOUBLE)",
"tsql": "CAST(x AS DOUBLE)",
},
)
self.validate_all(
"CAST(x as DECIMAL(15, 4))",
write={
"spark": "CAST(x AS DECIMAL(15, 4))",
"tsql": "CAST(x AS NUMERIC(15, 4))",
},
)
self.validate_all(
"CAST(x as NUMERIC(13,3))",
write={
"spark": "CAST(x AS DECIMAL(13, 3))",
"tsql": "CAST(x AS NUMERIC(13, 3))",
},
)
self.validate_all(
"CAST(x as MONEY)",
write={
"spark": "CAST(x AS DECIMAL(15, 4))",
"tsql": "CAST(x AS MONEY)",
},
)
self.validate_all(
"CAST(x as SMALLMONEY)",
write={
"spark": "CAST(x AS DECIMAL(6, 4))",
"tsql": "CAST(x AS SMALLMONEY)",
},
)
self.validate_all(
"CAST(x as REAL)",
write={
"spark": "CAST(x AS FLOAT)",
"tsql": "CAST(x AS FLOAT)",
},
)
def test_types_string(self):
self.validate_all(
"CAST(x as CHAR(1))",
write={
"spark": "CAST(x AS CHAR(1))",
"tsql": "CAST(x AS CHAR(1))",
},
)
self.validate_all(
"CAST(x as VARCHAR(2))",
write={
"spark": "CAST(x AS VARCHAR(2))",
"tsql": "CAST(x AS VARCHAR(2))",
},
)
self.validate_all(
"CAST(x as NCHAR(1))",
write={
"spark": "CAST(x AS CHAR(1))",
"tsql": "CAST(x AS CHAR(1))",
},
)
self.validate_all(
"CAST(x as NVARCHAR(2))",
write={
"spark": "CAST(x AS VARCHAR(2))",
"tsql": "CAST(x AS VARCHAR(2))",
},
)
def test_types_date(self):
self.validate_all(
"CAST(x as DATE)",
write={
"spark": "CAST(x AS DATE)",
"tsql": "CAST(x AS DATE)",
},
)
self.validate_all(
"CAST(x as DATE)",
write={
"spark": "CAST(x AS DATE)",
"tsql": "CAST(x AS DATE)",
},
)
self.validate_all(
"CAST(x as TIME(4))",
write={
"spark": "CAST(x AS TIMESTAMP)",
"tsql": "CAST(x AS TIMESTAMP(4))",
},
)
self.validate_all(
"CAST(x as DATETIME2)",
write={
"spark": "CAST(x AS TIMESTAMP)",
"tsql": "CAST(x AS DATETIME2)",
},
)
self.validate_all(
"CAST(x as DATETIMEOFFSET)",
write={
"spark": "CAST(x AS TIMESTAMP)",
"tsql": "CAST(x AS TIMESTAMPTZ)",
},
)
self.validate_all(
"CAST(x as SMALLDATETIME)",
write={
"spark": "CAST(x AS TIMESTAMP)",
"tsql": "CAST(x AS DATETIME2)",
},
)
def test_types_bin(self):
self.validate_all(
"CAST(x as BIT)",
write={
"spark": "CAST(x AS BOOLEAN)",
"tsql": "CAST(x AS BIT)",
},
)
self.validate_all(
"CAST(x as UNIQUEIDENTIFIER)",
write={
"spark": "CAST(x AS STRING)",
"tsql": "CAST(x AS UNIQUEIDENTIFIER)",
},
)
self.validate_all(
"CAST(x as VARBINARY)",
write={
"spark": "CAST(x AS BINARY)",
"tsql": "CAST(x AS VARBINARY)",
},
)
def test_udf(self):
self.validate_identity(
"CREATE PROCEDURE foo @a INTEGER, @b INTEGER AS SELECT @a = SUM(bla) FROM baz AS bar"