Adding upstream version 16.4.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
d61627452f
commit
cac8fd11fe
90 changed files with 35638 additions and 33343 deletions
|
@ -30,7 +30,7 @@ class TestDataFrameWriter(DataFrameSQLValidator):
|
|||
|
||||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_insertInto_byName(self):
|
||||
sqlglot.schema.add_table("table_name", {"employee_id": "INT"})
|
||||
sqlglot.schema.add_table("table_name", {"employee_id": "INT"}, dialect="spark")
|
||||
df = self.df_employee.write.byName.insertInto("table_name")
|
||||
expected = "INSERT INTO table_name SELECT `a1`.`employee_id` AS `employee_id` FROM VALUES (1, 'Jack', 'Shephard', 37, 1), (2, 'John', 'Locke', 65, 1), (3, 'Kate', 'Austen', 37, 2), (4, 'Claire', 'Littleton', 27, 2), (5, 'Hugo', 'Reyes', 29, 100) AS `a1`(`employee_id`, `fname`, `lname`, `age`, `store_id`)"
|
||||
self.compare_sql(df, expected)
|
||||
|
@ -88,8 +88,8 @@ class TestDataFrameWriter(DataFrameSQLValidator):
|
|||
self.compare_sql(df, expected_statements)
|
||||
|
||||
def test_quotes(self):
|
||||
sqlglot.schema.add_table('"Test"', {'"ID"': "STRING"})
|
||||
df = self.spark.table('"Test"')
|
||||
sqlglot.schema.add_table("`Test`", {"`ID`": "STRING"}, dialect="spark")
|
||||
df = self.spark.table("`Test`")
|
||||
self.compare_sql(
|
||||
df.select(df['"ID"']), ["SELECT `Test`.`ID` AS `ID` FROM `Test` AS `Test`"]
|
||||
df.select(df["`ID`"]), ["SELECT `test`.`id` AS `id` FROM `test` AS `test`"]
|
||||
)
|
||||
|
|
|
@ -71,7 +71,7 @@ class TestDataframeSession(DataFrameSQLValidator):
|
|||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_sql_select_only(self):
|
||||
query = "SELECT cola, colb FROM table"
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"})
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
|
||||
df = self.spark.sql(query)
|
||||
self.assertEqual(
|
||||
"SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`",
|
||||
|
@ -80,17 +80,17 @@ class TestDataframeSession(DataFrameSQLValidator):
|
|||
|
||||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_select_quoted(self):
|
||||
sqlglot.schema.add_table('"TEST"', {"name": "string"})
|
||||
sqlglot.schema.add_table("`TEST`", {"name": "string"}, dialect="spark")
|
||||
|
||||
self.assertEqual(
|
||||
SparkSession().table('"TEST"').select(F.col("name")).sql(dialect="snowflake")[0],
|
||||
'''SELECT "TEST"."name" AS "name" FROM "TEST" AS "TEST"''',
|
||||
SparkSession().table("`TEST`").select(F.col("name")).sql(dialect="snowflake")[0],
|
||||
'''SELECT "test"."name" AS "name" FROM "test" AS "test"''',
|
||||
)
|
||||
|
||||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_sql_with_aggs(self):
|
||||
query = "SELECT cola, colb FROM table"
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"})
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
|
||||
df = self.spark.sql(query).groupBy(F.col("cola")).agg(F.sum("colb"))
|
||||
self.assertEqual(
|
||||
"WITH t38189 AS (SELECT cola, colb FROM table), t42330 AS (SELECT cola, colb FROM t38189) SELECT cola, SUM(colb) FROM t42330 GROUP BY cola",
|
||||
|
@ -100,7 +100,7 @@ class TestDataframeSession(DataFrameSQLValidator):
|
|||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_sql_create(self):
|
||||
query = "CREATE TABLE new_table AS WITH t1 AS (SELECT cola, colb FROM table) SELECT cola, colb, FROM t1"
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"})
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
|
||||
df = self.spark.sql(query)
|
||||
expected = "CREATE TABLE new_table AS SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
|
||||
self.compare_sql(df, expected)
|
||||
|
@ -108,7 +108,7 @@ class TestDataframeSession(DataFrameSQLValidator):
|
|||
@mock.patch("sqlglot.schema", MappingSchema())
|
||||
def test_sql_insert(self):
|
||||
query = "WITH t1 AS (SELECT cola, colb FROM table) INSERT INTO new_table SELECT cola, colb FROM t1"
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"})
|
||||
sqlglot.schema.add_table("table", {"cola": "string", "colb": "string"}, dialect="spark")
|
||||
df = self.spark.sql(query)
|
||||
expected = "INSERT INTO new_table SELECT `table`.`cola` AS `cola`, `table`.`colb` AS `colb` FROM `table` AS `table`"
|
||||
self.compare_sql(df, expected)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue