Adding upstream version 26.16.2.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
4bfa0e7e53
commit
6e767a6f98
110 changed files with 62370 additions and 61414 deletions
|
@ -276,15 +276,17 @@ class TestAthena(Validator):
|
|||
exp.FileFormatProperty(this=exp.Literal.string("parquet")),
|
||||
exp.LocationProperty(this=exp.Literal.string("s3://foo")),
|
||||
exp.PartitionedByProperty(
|
||||
this=exp.Schema(expressions=[exp.to_column("partition_col")])
|
||||
this=exp.Schema(expressions=[exp.to_column("partition_col", quoted=True)])
|
||||
),
|
||||
]
|
||||
),
|
||||
expression=exp.select("1"),
|
||||
)
|
||||
|
||||
# Even if identify=True, the column names should not be quoted within the string literals in the partitioned_by ARRAY[]
|
||||
self.assertEqual(
|
||||
ctas_hive.sql(dialect=self.dialect, identify=True),
|
||||
"CREATE TABLE \"foo\".\"bar\" WITH (format='parquet', external_location='s3://foo', partitioned_by=ARRAY['\"partition_col\"']) AS SELECT 1",
|
||||
"CREATE TABLE \"foo\".\"bar\" WITH (format='parquet', external_location='s3://foo', partitioned_by=ARRAY['partition_col']) AS SELECT 1",
|
||||
)
|
||||
self.assertEqual(
|
||||
ctas_hive.sql(dialect=self.dialect, identify=False),
|
||||
|
@ -303,7 +305,8 @@ class TestAthena(Validator):
|
|||
expressions=[
|
||||
exp.to_column("partition_col"),
|
||||
exp.PartitionedByBucket(
|
||||
this=exp.to_column("a"), expression=exp.Literal.number(4)
|
||||
this=exp.to_column("a", quoted=True),
|
||||
expression=exp.Literal.number(4),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
@ -312,11 +315,25 @@ class TestAthena(Validator):
|
|||
),
|
||||
expression=exp.select("1"),
|
||||
)
|
||||
# Even if identify=True, the column names should not be quoted within the string literals in the partitioning ARRAY[]
|
||||
# Technically Trino's Iceberg connector does support quoted column names in the string literals but its undocumented
|
||||
# so we dont do it to keep consistency with the Hive connector
|
||||
self.assertEqual(
|
||||
ctas_iceberg.sql(dialect=self.dialect, identify=True),
|
||||
"CREATE TABLE \"foo\".\"bar\" WITH (table_type='iceberg', location='s3://foo', partitioning=ARRAY['\"partition_col\"', 'BUCKET(\"a\", 4)']) AS SELECT 1",
|
||||
"CREATE TABLE \"foo\".\"bar\" WITH (table_type='iceberg', location='s3://foo', partitioning=ARRAY['partition_col', 'BUCKET(a, 4)']) AS SELECT 1",
|
||||
)
|
||||
self.assertEqual(
|
||||
ctas_iceberg.sql(dialect=self.dialect, identify=False),
|
||||
"CREATE TABLE foo.bar WITH (table_type='iceberg', location='s3://foo', partitioning=ARRAY['partition_col', 'BUCKET(a, 4)']) AS SELECT 1",
|
||||
)
|
||||
|
||||
def test_parse_partitioned_by_returns_iceberg_transforms(self):
|
||||
# check that parse_into works for PartitionedByProperty and also that correct AST nodes are emitted for Iceberg transforms
|
||||
parsed = self.parse_one(
|
||||
"(a, bucket(4, b), truncate(3, c), month(d))", into=exp.PartitionedByProperty
|
||||
)
|
||||
|
||||
assert isinstance(parsed, exp.PartitionedByProperty)
|
||||
assert isinstance(parsed.this, exp.Schema)
|
||||
assert next(n for n in parsed.this.expressions if isinstance(n, exp.PartitionedByBucket))
|
||||
assert next(n for n in parsed.this.expressions if isinstance(n, exp.PartitionByTruncate))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue