Merging upstream version 20.1.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
d4fe7bdb16
commit
90988d8258
127 changed files with 73384 additions and 73067 deletions
|
@ -1,6 +1,14 @@
|
|||
from unittest import mock
|
||||
|
||||
from sqlglot import ErrorLevel, ParseError, TokenError, UnsupportedError, transpile
|
||||
from sqlglot import (
|
||||
ErrorLevel,
|
||||
ParseError,
|
||||
TokenError,
|
||||
UnsupportedError,
|
||||
parse,
|
||||
transpile,
|
||||
)
|
||||
from sqlglot.helper import logger as helper_logger
|
||||
from tests.dialects.test_dialect import Validator
|
||||
|
||||
|
||||
|
@ -9,6 +17,28 @@ class TestBigQuery(Validator):
|
|||
maxDiff = None
|
||||
|
||||
def test_bigquery(self):
|
||||
with self.assertLogs(helper_logger) as cm:
|
||||
self.validate_all(
|
||||
"SELECT a[1], b[OFFSET(1)], c[ORDINAL(1)], d[SAFE_OFFSET(1)], e[SAFE_ORDINAL(1)]",
|
||||
write={
|
||||
"duckdb": "SELECT a[2], b[2], c[1], d[2], e[1]",
|
||||
"bigquery": "SELECT a[1], b[OFFSET(1)], c[ORDINAL(1)], d[SAFE_OFFSET(1)], e[SAFE_ORDINAL(1)]",
|
||||
"presto": "SELECT a[2], b[2], c[1], ELEMENT_AT(d, 2), ELEMENT_AT(e, 1)",
|
||||
},
|
||||
)
|
||||
|
||||
self.validate_all(
|
||||
"a[0]",
|
||||
read={
|
||||
"duckdb": "a[1]",
|
||||
"presto": "a[1]",
|
||||
},
|
||||
)
|
||||
|
||||
self.validate_identity(
|
||||
"select array_contains([1, 2, 3], 1)",
|
||||
"SELECT EXISTS(SELECT 1 FROM UNNEST([1, 2, 3]) AS _col WHERE _col = 1)",
|
||||
)
|
||||
self.validate_identity("CREATE SCHEMA x DEFAULT COLLATE 'en'")
|
||||
self.validate_identity("CREATE TABLE x (y INT64) DEFAULT COLLATE 'en'")
|
||||
self.validate_identity("PARSE_JSON('{}', wide_number_mode => 'exact')")
|
||||
|
@ -37,6 +67,15 @@ class TestBigQuery(Validator):
|
|||
with self.assertRaises(ParseError):
|
||||
transpile("DATE_ADD(x, day)", read="bigquery")
|
||||
|
||||
for_in_stmts = parse(
|
||||
"FOR record IN (SELECT word FROM shakespeare) DO SELECT record.word; END FOR;",
|
||||
read="bigquery",
|
||||
)
|
||||
self.assertEqual(
|
||||
[s.sql(dialect="bigquery") for s in for_in_stmts],
|
||||
["FOR record IN (SELECT word FROM shakespeare) DO SELECT record.word", "END FOR"],
|
||||
)
|
||||
|
||||
self.validate_identity("SELECT test.Unknown FROM test")
|
||||
self.validate_identity(r"SELECT '\n\r\a\v\f\t'")
|
||||
self.validate_identity("SELECT * FROM tbl FOR SYSTEM_TIME AS OF z")
|
||||
|
@ -89,6 +128,11 @@ class TestBigQuery(Validator):
|
|||
self.validate_identity("ROLLBACK TRANSACTION")
|
||||
self.validate_identity("CAST(x AS BIGNUMERIC)")
|
||||
self.validate_identity("SELECT y + 1 FROM x GROUP BY y + 1 ORDER BY 1")
|
||||
self.validate_identity("SELECT TIMESTAMP_SECONDS(2) AS t")
|
||||
self.validate_identity("SELECT TIMESTAMP_MILLIS(2) AS t")
|
||||
self.validate_identity(
|
||||
"FOR record IN (SELECT word, word_count FROM bigquery-public-data.samples.shakespeare LIMIT 5) DO SELECT record.word, record.word_count"
|
||||
)
|
||||
self.validate_identity(
|
||||
"DATE(CAST('2016-12-25 05:30:00+07' AS DATETIME), 'America/Los_Angeles')"
|
||||
)
|
||||
|
@ -142,6 +186,19 @@ class TestBigQuery(Validator):
|
|||
self.validate_all('x <> """"""', write={"bigquery": "x <> ''"})
|
||||
self.validate_all("x <> ''''''", write={"bigquery": "x <> ''"})
|
||||
self.validate_all("CAST(x AS DATETIME)", read={"": "x::timestamp"})
|
||||
self.validate_all(
|
||||
"SELECT TIMESTAMP_MICROS(x)",
|
||||
read={
|
||||
"duckdb": "SELECT MAKE_TIMESTAMP(x)",
|
||||
"spark": "SELECT TIMESTAMP_MICROS(x)",
|
||||
},
|
||||
write={
|
||||
"bigquery": "SELECT TIMESTAMP_MICROS(x)",
|
||||
"duckdb": "SELECT MAKE_TIMESTAMP(x)",
|
||||
"snowflake": "SELECT TO_TIMESTAMP(x / 1000, 3)",
|
||||
"spark": "SELECT TIMESTAMP_MICROS(x)",
|
||||
},
|
||||
)
|
||||
self.validate_all(
|
||||
"SELECT * FROM t WHERE EXISTS(SELECT * FROM unnest(nums) AS x WHERE x > 1)",
|
||||
write={
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue