Merging upstream version 25.20.1.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
d9afe6504e
commit
f9e1084cc5
119 changed files with 78094 additions and 71498 deletions
|
@ -1345,3 +1345,26 @@ FROM READ_CSV('tests/fixtures/optimizer/tpc-h/nation.csv.gz', 'delimiter', '|')
|
|||
self.assertEqual(4, normalization_distance(gen_expr(2), max_=100))
|
||||
self.assertEqual(18, normalization_distance(gen_expr(3), max_=100))
|
||||
self.assertEqual(110, normalization_distance(gen_expr(10), max_=100))
|
||||
|
||||
def test_custom_annotators(self):
|
||||
# In Spark hierarchy, SUBSTRING result type is dependent on input expr type
|
||||
for dialect in ("spark2", "spark", "databricks"):
|
||||
for expr_type_pair in (
|
||||
("col", "STRING"),
|
||||
("col", "BINARY"),
|
||||
("'str_literal'", "STRING"),
|
||||
("CAST('str_literal' AS BINARY)", "BINARY"),
|
||||
):
|
||||
with self.subTest(
|
||||
f"Testing {dialect}'s SUBSTRING() result type for {expr_type_pair}"
|
||||
):
|
||||
expr, type = expr_type_pair
|
||||
ast = parse_one(f"SELECT substring({expr}, 2, 3) AS x FROM tbl", read=dialect)
|
||||
|
||||
subst_type = (
|
||||
optimizer.optimize(ast, schema={"tbl": {"col": type}}, dialect=dialect)
|
||||
.expressions[0]
|
||||
.type
|
||||
)
|
||||
|
||||
self.assertEqual(subst_type.sql(dialect), exp.DataType.build(type).sql(dialect))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue