Merging upstream version 10.2.6.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
40155883c5
commit
17f6b2c749
36 changed files with 1281 additions and 493 deletions
|
@ -110,17 +110,17 @@ class BigQuery(Dialect):
|
|||
|
||||
KEYWORDS = {
|
||||
**tokens.Tokenizer.KEYWORDS,
|
||||
"BEGIN": TokenType.COMMAND,
|
||||
"BEGIN TRANSACTION": TokenType.BEGIN,
|
||||
"CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
|
||||
"CURRENT_TIME": TokenType.CURRENT_TIME,
|
||||
"GEOGRAPHY": TokenType.GEOGRAPHY,
|
||||
"INT64": TokenType.BIGINT,
|
||||
"FLOAT64": TokenType.DOUBLE,
|
||||
"INT64": TokenType.BIGINT,
|
||||
"NOT DETERMINISTIC": TokenType.VOLATILE,
|
||||
"QUALIFY": TokenType.QUALIFY,
|
||||
"UNKNOWN": TokenType.NULL,
|
||||
"WINDOW": TokenType.WINDOW,
|
||||
"NOT DETERMINISTIC": TokenType.VOLATILE,
|
||||
"BEGIN": TokenType.COMMAND,
|
||||
"BEGIN TRANSACTION": TokenType.BEGIN,
|
||||
}
|
||||
KEYWORDS.pop("DIV")
|
||||
|
||||
|
@ -131,6 +131,7 @@ class BigQuery(Dialect):
|
|||
"DATE_ADD": _date_add(exp.DateAdd),
|
||||
"DATETIME_ADD": _date_add(exp.DatetimeAdd),
|
||||
"DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)),
|
||||
"REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
|
||||
"TIME_ADD": _date_add(exp.TimeAdd),
|
||||
"TIMESTAMP_ADD": _date_add(exp.TimestampAdd),
|
||||
"DATE_SUB": _date_add(exp.DateSub),
|
||||
|
@ -144,6 +145,7 @@ class BigQuery(Dialect):
|
|||
|
||||
FUNCTION_PARSERS = {
|
||||
**parser.Parser.FUNCTION_PARSERS,
|
||||
"ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
|
||||
}
|
||||
FUNCTION_PARSERS.pop("TRIM")
|
||||
|
||||
|
@ -161,7 +163,6 @@ class BigQuery(Dialect):
|
|||
class Generator(generator.Generator):
|
||||
TRANSFORMS = {
|
||||
**generator.Generator.TRANSFORMS,
|
||||
exp.Array: inline_array_sql,
|
||||
exp.ArraySize: rename_func("ARRAY_LENGTH"),
|
||||
exp.DateAdd: _date_add_sql("DATE", "ADD"),
|
||||
exp.DateSub: _date_add_sql("DATE", "SUB"),
|
||||
|
@ -183,6 +184,7 @@ class BigQuery(Dialect):
|
|||
exp.VolatilityProperty: lambda self, e: f"DETERMINISTIC"
|
||||
if e.name == "IMMUTABLE"
|
||||
else "NOT DETERMINISTIC",
|
||||
exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
|
||||
}
|
||||
|
||||
TYPE_MAPPING = {
|
||||
|
@ -210,24 +212,31 @@ class BigQuery(Dialect):
|
|||
|
||||
EXPLICIT_UNION = True
|
||||
|
||||
def transaction_sql(self, *_):
|
||||
def array_sql(self, expression: exp.Array) -> str:
|
||||
first_arg = seq_get(expression.expressions, 0)
|
||||
if isinstance(first_arg, exp.Subqueryable):
|
||||
return f"ARRAY{self.wrap(self.sql(first_arg))}"
|
||||
|
||||
return inline_array_sql(self, expression)
|
||||
|
||||
def transaction_sql(self, *_) -> str:
|
||||
return "BEGIN TRANSACTION"
|
||||
|
||||
def commit_sql(self, *_):
|
||||
def commit_sql(self, *_) -> str:
|
||||
return "COMMIT TRANSACTION"
|
||||
|
||||
def rollback_sql(self, *_):
|
||||
def rollback_sql(self, *_) -> str:
|
||||
return "ROLLBACK TRANSACTION"
|
||||
|
||||
def in_unnest_op(self, unnest):
|
||||
return self.sql(unnest)
|
||||
def in_unnest_op(self, expression: exp.Unnest) -> str:
|
||||
return self.sql(expression)
|
||||
|
||||
def except_op(self, expression):
|
||||
def except_op(self, expression: exp.Except) -> str:
|
||||
if not expression.args.get("distinct", False):
|
||||
self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
|
||||
return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
|
||||
|
||||
def intersect_op(self, expression):
|
||||
def intersect_op(self, expression: exp.Intersect) -> str:
|
||||
if not expression.args.get("distinct", False):
|
||||
self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
|
||||
return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
|
||||
|
|
|
@ -190,6 +190,7 @@ class Hive(Dialect):
|
|||
"ADD FILES": TokenType.COMMAND,
|
||||
"ADD JAR": TokenType.COMMAND,
|
||||
"ADD JARS": TokenType.COMMAND,
|
||||
"WITH SERDEPROPERTIES": TokenType.SERDE_PROPERTIES,
|
||||
}
|
||||
|
||||
class Parser(parser.Parser):
|
||||
|
@ -238,6 +239,13 @@ class Hive(Dialect):
|
|||
"YEAR": lambda args: exp.Year(this=exp.TsOrDsToDate.from_arg_list(args)),
|
||||
}
|
||||
|
||||
PROPERTY_PARSERS = {
|
||||
**parser.Parser.PROPERTY_PARSERS,
|
||||
TokenType.SERDE_PROPERTIES: lambda self: exp.SerdeProperties(
|
||||
expressions=self._parse_wrapped_csv(self._parse_property)
|
||||
),
|
||||
}
|
||||
|
||||
class Generator(generator.Generator):
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING,
|
||||
|
@ -297,6 +305,8 @@ class Hive(Dialect):
|
|||
exp.UnixToTime: rename_func("FROM_UNIXTIME"),
|
||||
exp.UnixToTimeStr: rename_func("FROM_UNIXTIME"),
|
||||
exp.PartitionedByProperty: lambda self, e: f"PARTITIONED BY {self.sql(e, 'this')}",
|
||||
exp.RowFormatSerdeProperty: lambda self, e: f"ROW FORMAT SERDE {self.sql(e, 'this')}",
|
||||
exp.SerdeProperties: lambda self, e: self.properties(e, prefix="WITH SERDEPROPERTIES"),
|
||||
exp.NumberToStr: rename_func("FORMAT_NUMBER"),
|
||||
}
|
||||
|
||||
|
@ -308,12 +318,15 @@ class Hive(Dialect):
|
|||
exp.SchemaCommentProperty,
|
||||
exp.LocationProperty,
|
||||
exp.TableFormatProperty,
|
||||
exp.RowFormatDelimitedProperty,
|
||||
exp.RowFormatSerdeProperty,
|
||||
exp.SerdeProperties,
|
||||
}
|
||||
|
||||
def with_properties(self, properties):
|
||||
return self.properties(
|
||||
properties,
|
||||
prefix="TBLPROPERTIES",
|
||||
prefix=self.seg("TBLPROPERTIES"),
|
||||
)
|
||||
|
||||
def datatype_sql(self, expression):
|
||||
|
|
|
@ -98,6 +98,7 @@ class Oracle(Dialect):
|
|||
class Tokenizer(tokens.Tokenizer):
|
||||
KEYWORDS = {
|
||||
**tokens.Tokenizer.KEYWORDS,
|
||||
"MINUS": TokenType.EXCEPT,
|
||||
"START": TokenType.BEGIN,
|
||||
"TOP": TokenType.TOP,
|
||||
"VARCHAR2": TokenType.VARCHAR,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from sqlglot import exp, transforms
|
||||
from sqlglot.dialects.dialect import rename_func
|
||||
from sqlglot.dialects.postgres import Postgres
|
||||
from sqlglot.tokens import TokenType
|
||||
|
||||
|
@ -13,12 +14,20 @@ class Redshift(Postgres):
|
|||
"HH": "%H",
|
||||
}
|
||||
|
||||
class Parser(Postgres.Parser):
|
||||
FUNCTIONS = {
|
||||
**Postgres.Parser.FUNCTIONS, # type: ignore
|
||||
"DECODE": exp.Matches.from_arg_list,
|
||||
"NVL": exp.Coalesce.from_arg_list,
|
||||
}
|
||||
|
||||
class Tokenizer(Postgres.Tokenizer):
|
||||
ESCAPES = ["\\"]
|
||||
|
||||
KEYWORDS = {
|
||||
**Postgres.Tokenizer.KEYWORDS, # type: ignore
|
||||
"COPY": TokenType.COMMAND,
|
||||
"ENCODE": TokenType.ENCODE,
|
||||
"GEOMETRY": TokenType.GEOMETRY,
|
||||
"GEOGRAPHY": TokenType.GEOGRAPHY,
|
||||
"HLLSKETCH": TokenType.HLLSKETCH,
|
||||
|
@ -50,4 +59,5 @@ class Redshift(Postgres):
|
|||
exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})",
|
||||
exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})",
|
||||
exp.DistStyleProperty: lambda self, e: self.naked_property(e),
|
||||
exp.Matches: rename_func("DECODE"),
|
||||
}
|
||||
|
|
|
@ -198,6 +198,7 @@ class Snowflake(Dialect):
|
|||
"TIMESTAMP_NTZ": TokenType.TIMESTAMP,
|
||||
"TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
|
||||
"TIMESTAMPNTZ": TokenType.TIMESTAMP,
|
||||
"MINUS": TokenType.EXCEPT,
|
||||
"SAMPLE": TokenType.TABLE_SAMPLE,
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue