1
0
Fork 0

Merging upstream version 25.6.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 21:43:00 +01:00
parent 78f79d1d22
commit 4a7feb3eaa
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
69 changed files with 46817 additions and 45778 deletions

View file

@ -735,6 +735,13 @@ class BigQuery(Dialect):
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
}
# WINDOW comes after QUALIFY
# https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
AFTER_HAVING_MODIFIER_TRANSFORMS = {
"qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
"windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
}
# from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
RESERVED_KEYWORDS = {
"all",

View file

@ -2,7 +2,7 @@ from __future__ import annotations
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
from sqlglot import exp, generator, parser, tokens
from sqlglot.dialects.dialect import (
Dialect,
arg_max_or_min_no_count,
@ -332,6 +332,8 @@ class ClickHouse(Dialect):
TokenType.SET,
}
RESERVED_TOKENS = parser.Parser.RESERVED_TOKENS - {TokenType.SELECT}
AGG_FUNC_MAPPING = (
lambda functions, suffixes: {
f"{f}{sfx}": (f, sfx) for sfx in (suffixes + [""]) for f in functions
@ -789,7 +791,6 @@ class ClickHouse(Dialect):
exp.Quantile: _quantile_sql,
exp.RegexpLike: lambda self, e: self.func("match", e.this, e.expression),
exp.Rand: rename_func("randCanonical"),
exp.Select: transforms.preprocess([transforms.eliminate_qualify]),
exp.StartsWith: rename_func("startsWith"),
exp.StrPosition: lambda self, e: self.func(
"position", e.this, e.args.get("substr"), e.args.get("position")

View file

@ -356,6 +356,11 @@ class Dialect(metaclass=_Dialect):
EXPAND_ALIAS_REFS_EARLY_ONLY_IN_GROUP_BY = False
"""Whether alias reference expansion before qualification should only happen for the GROUP BY clause."""
SUPPORTS_ORDER_BY_ALL = False
"""
Whether ORDER BY ALL is supported (expands to all the selected columns) as in DuckDB, Spark3/Databricks
"""
# --- Autofilled ---
tokenizer_class = Tokenizer

View file

@ -116,17 +116,24 @@ def _build_make_timestamp(args: t.List) -> exp.Expression:
def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
args: t.List[str] = []
# BigQuery allows inline construction such as "STRUCT<a STRING, b INTEGER>('str', 1)" which is
# canonicalized to "ROW('str', 1) AS STRUCT(a TEXT, b INT)" in DuckDB
is_struct_cast = expression.find_ancestor(exp.Cast)
for i, expr in enumerate(expression.expressions):
if isinstance(expr, exp.PropertyEQ):
key = expr.name
value = expr.expression
is_property_eq = isinstance(expr, exp.PropertyEQ)
value = expr.expression if is_property_eq else expr
if is_struct_cast:
args.append(self.sql(value))
else:
key = f"_{i}"
value = expr
key = expr.name if is_property_eq else f"_{i}"
args.append(f"{self.sql(exp.Literal.string(key))}: {self.sql(value)}")
args.append(f"{self.sql(exp.Literal.string(key))}: {self.sql(value)}")
csv_args = ", ".join(args)
return f"{{{', '.join(args)}}}"
return f"ROW({csv_args})" if is_struct_cast else f"{{{csv_args}}}"
def _datatype_sql(self: DuckDB.Generator, expression: exp.DataType) -> str:
@ -172,6 +179,7 @@ class DuckDB(Dialect):
SAFE_DIVISION = True
INDEX_OFFSET = 1
CONCAT_COALESCE = True
SUPPORTS_ORDER_BY_ALL = True
# https://duckdb.org/docs/sql/introduction.html#creating-a-new-table
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
@ -381,6 +389,7 @@ class DuckDB(Dialect):
SUPPORTS_TO_NUMBER = False
COPY_HAS_INTO_KEYWORD = False
STAR_EXCEPT = "EXCLUDE"
PAD_FILL_PATTERN_IS_REQUIRED = True
TRANSFORMS = {
**generator.Generator.TRANSFORMS,
@ -448,6 +457,8 @@ class DuckDB(Dialect):
),
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
exp.Return: lambda self, e: self.sql(e, "this"),
exp.ReturnsProperty: lambda self, e: "TABLE" if isinstance(e.this, exp.Schema) else "",
exp.Rand: rename_func("RANDOM"),
exp.SafeDivide: no_safe_divide_sql,
exp.Split: rename_func("STR_SPLIT"),
@ -609,6 +620,7 @@ class DuckDB(Dialect):
# can be transpiled to DuckDB, so we explicitly override them accordingly
PROPERTIES_LOCATION[exp.LikeProperty] = exp.Properties.Location.POST_SCHEMA
PROPERTIES_LOCATION[exp.TemporaryProperty] = exp.Properties.Location.POST_CREATE
PROPERTIES_LOCATION[exp.ReturnsProperty] = exp.Properties.Location.POST_ALIAS
def strtotime_sql(self, expression: exp.StrToTime) -> str:
if expression.args.get("safe"):

View file

@ -447,6 +447,7 @@ class Hive(Dialect):
SUPPORTS_TO_NUMBER = False
WITH_PROPERTIES_PREFIX = "TBLPROPERTIES"
PARSE_JSON_NAME = None
PAD_FILL_PATTERN_IS_REQUIRED = True
EXPRESSIONS_WITHOUT_NESTED_CTES = {
exp.Insert,

View file

@ -690,6 +690,7 @@ class MySQL(Dialect):
JSON_KEY_VALUE_PAIR_SEP = ","
SUPPORTS_TO_NUMBER = False
PARSE_JSON_NAME = None
PAD_FILL_PATTERN_IS_REQUIRED = True
TRANSFORMS = {
**generator.Generator.TRANSFORMS,

View file

@ -447,6 +447,9 @@ class Postgres(Dialect):
return self.expression(exp.Extract, this=part, expression=value)
def _parse_unique_key(self) -> t.Optional[exp.Expression]:
return None
class Generator(generator.Generator):
SINGLE_STRING_INTERVAL = True
RENAME_TABLE_WITH_DB = False

View file

@ -35,6 +35,8 @@ from sqlglot.helper import apply_index_offset, seq_get
from sqlglot.tokens import TokenType
from sqlglot.transforms import unqualify_columns
DATE_ADD_OR_SUB = t.Union[exp.DateAdd, exp.TimestampAdd, exp.DateSub]
def _explode_to_unnest_sql(self: Presto.Generator, expression: exp.Lateral) -> str:
if isinstance(expression.this, exp.Explode):
@ -223,6 +225,21 @@ def _build_to_char(args: t.List) -> exp.TimeToStr:
return build_formatted_time(exp.TimeToStr, "teradata")(args)
def _date_delta_sql(
name: str, negate_interval: bool = False
) -> t.Callable[[Presto.Generator, DATE_ADD_OR_SUB], str]:
def _delta_sql(self: Presto.Generator, expression: DATE_ADD_OR_SUB) -> str:
interval = _to_int(expression.expression)
return self.func(
name,
unit_to_str(expression),
interval * (-1) if negate_interval else interval,
expression.this,
)
return _delta_sql
class Presto(Dialect):
INDEX_OFFSET = 1
NULL_ORDERING = "nulls_are_last"
@ -335,6 +352,7 @@ class Presto(Dialect):
SUPPORTS_TO_NUMBER = False
HEX_FUNC = "TO_HEX"
PARSE_JSON_NAME = "JSON_PARSE"
PAD_FILL_PATTERN_IS_REQUIRED = True
PROPERTIES_LOCATION = {
**generator.Generator.PROPERTIES_LOCATION,
@ -385,24 +403,14 @@ class Presto(Dialect):
exp.BitwiseXor: lambda self, e: self.func("BITWISE_XOR", e.this, e.expression),
exp.Cast: transforms.preprocess([transforms.epoch_cast_to_ts]),
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
exp.DateAdd: lambda self, e: self.func(
"DATE_ADD",
unit_to_str(e),
_to_int(e.expression),
e.this,
),
exp.DateAdd: _date_delta_sql("DATE_ADD"),
exp.DateDiff: lambda self, e: self.func(
"DATE_DIFF", unit_to_str(e), e.expression, e.this
),
exp.DateStrToDate: datestrtodate_sql,
exp.DateToDi: lambda self,
e: f"CAST(DATE_FORMAT({self.sql(e, 'this')}, {Presto.DATEINT_FORMAT}) AS INT)",
exp.DateSub: lambda self, e: self.func(
"DATE_ADD",
unit_to_str(e),
_to_int(e.expression * -1),
e.this,
),
exp.DateSub: _date_delta_sql("DATE_ADD", negate_interval=True),
exp.Decode: lambda self, e: encode_decode_sql(self, e, "FROM_UTF8"),
exp.DiToDate: lambda self,
e: f"CAST(DATE_PARSE(CAST({self.sql(e, 'this')} AS VARCHAR), {Presto.DATEINT_FORMAT}) AS DATE)",
@ -451,6 +459,7 @@ class Presto(Dialect):
exp.StructExtract: struct_extract_sql,
exp.Table: transforms.preprocess([_unnest_sequence]),
exp.Timestamp: no_timestamp_sql,
exp.TimestampAdd: _date_delta_sql("DATE_ADD"),
exp.TimestampTrunc: timestamptrunc_sql(),
exp.TimeStrToDate: timestrtotime_sql,
exp.TimeStrToTime: timestrtotime_sql,

View file

@ -90,6 +90,8 @@ def _dateadd_sql(self: Spark.Generator, expression: exp.TsOrDsAdd | exp.Timestam
class Spark(Spark2):
SUPPORTS_ORDER_BY_ALL = True
class Tokenizer(Spark2.Tokenizer):
STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = False
@ -129,6 +131,7 @@ class Spark(Spark2):
class Generator(Spark2.Generator):
SUPPORTS_TO_NUMBER = True
PAD_FILL_PATTERN_IS_REQUIRED = False
TYPE_MAPPING = {
**Spark2.Generator.TYPE_MAPPING,

View file

@ -1869,7 +1869,7 @@ class TitleColumnConstraint(ColumnConstraintKind):
class UniqueColumnConstraint(ColumnConstraintKind):
arg_types = {"this": False, "index_type": False, "on_conflict": False}
arg_types = {"this": False, "index_type": False, "on_conflict": False, "nulls": False}
class UppercaseColumnConstraint(ColumnConstraintKind):
@ -2015,7 +2015,7 @@ class CopyParameter(Expression):
arg_types = {"this": True, "expression": False, "expressions": False}
class Copy(Expression):
class Copy(DML):
arg_types = {
"this": True,
"kind": True,
@ -3043,6 +3043,7 @@ class Table(Expression):
"only": False,
"partition": False,
"changes": False,
"rows_from": False,
}
@property
@ -4797,6 +4798,11 @@ class List(Func):
is_var_len_args = True
# String pad, kind True -> LPAD, False -> RPAD
class Pad(Func):
arg_types = {"this": True, "expression": True, "fill_pattern": False, "is_left": True}
# https://docs.snowflake.com/en/sql-reference/functions/to_char
# https://docs.oracle.com/en/database/oracle/oracle-database/23/sqlrf/TO_CHAR-number.html
class ToChar(Func):

View file

@ -375,6 +375,9 @@ class Generator(metaclass=_Generator):
# Whether to quote the generated expression of exp.JsonPath
QUOTE_JSON_PATH = True
# Whether the text pattern/fill (3rd) parameter of RPAD()/LPAD() is optional (defaults to space)
PAD_FILL_PATTERN_IS_REQUIRED = False
# The name to generate for the JSONPath expression. If `None`, only `this` will be generated
PARSE_JSON_NAME: t.Optional[str] = "PARSE_JSON"
@ -406,13 +409,13 @@ class Generator(metaclass=_Generator):
AFTER_HAVING_MODIFIER_TRANSFORMS = {
"cluster": lambda self, e: self.sql(e, "cluster"),
"distribute": lambda self, e: self.sql(e, "distribute"),
"qualify": lambda self, e: self.sql(e, "qualify"),
"sort": lambda self, e: self.sql(e, "sort"),
"windows": lambda self, e: (
self.seg("WINDOW ") + self.expressions(e, key="windows", flat=True)
if e.args.get("windows")
else ""
),
"qualify": lambda self, e: self.sql(e, "qualify"),
}
TOKEN_MAPPING: t.Dict[TokenType, str] = {}
@ -512,6 +515,7 @@ class Generator(metaclass=_Generator):
# Expressions whose comments are separated from them for better formatting
WITH_SEPARATED_COMMENTS: t.Tuple[t.Type[exp.Expression], ...] = (
exp.Command,
exp.Create,
exp.Delete,
exp.Drop,
@ -957,7 +961,8 @@ class Generator(metaclass=_Generator):
index_type = f" USING {index_type}" if index_type else ""
on_conflict = self.sql(expression, "on_conflict")
on_conflict = f" {on_conflict}" if on_conflict else ""
return f"UNIQUE{this}{index_type}{on_conflict}"
nulls_sql = " NULLS NOT DISTINCT" if expression.args.get("nulls") else ""
return f"UNIQUE{nulls_sql}{this}{index_type}{on_conflict}"
def createable_sql(self, expression: exp.Create, locations: t.DefaultDict) -> str:
return self.sql(expression, "this")
@ -996,6 +1001,7 @@ class Generator(metaclass=_Generator):
expression_sql = f"{begin}{self.sep()}{expression_sql}{end}"
if self.CREATE_FUNCTION_RETURN_AS or not isinstance(expression.expression, exp.Return):
postalias_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_ALIAS):
postalias_props_sql = self.properties(
exp.Properties(
@ -1003,9 +1009,8 @@ class Generator(metaclass=_Generator):
),
wrapped=False,
)
expression_sql = f" AS {postalias_props_sql}{expression_sql}"
else:
expression_sql = f" AS{expression_sql}"
postalias_props_sql = f" {postalias_props_sql}" if postalias_props_sql else ""
expression_sql = f" AS{postalias_props_sql}{expression_sql}"
postindex_props_sql = ""
if properties_locs.get(exp.Properties.Location.POST_INDEX):
@ -1754,6 +1759,10 @@ class Generator(metaclass=_Generator):
changes = self.sql(expression, "changes")
changes = f" {changes}" if changes else ""
rows_from = self.expressions(expression, key="rows_from")
if rows_from:
table = f"ROWS FROM {self.wrap(rows_from)}"
return f"{only}{table}{changes}{partition}{version}{file_format}{alias}{hints}{pivots}{joins}{laterals}{ordinality}"
def tablesample_sql(
@ -4043,3 +4052,12 @@ class Generator(metaclass=_Generator):
end = f"{self.seg('')}{end}" if end else ""
return f"CHANGES ({information}){at_before}{end}"
def pad_sql(self, expression: exp.Pad) -> str:
prefix = "L" if expression.args.get("is_left") else "R"
fill_pattern = self.sql(expression, "fill_pattern") or None
if not fill_pattern and self.PAD_FILL_PATTERN_IS_REQUIRED:
fill_pattern = "' '"
return self.func(f"{prefix}PAD", expression.this, expression.expression, fill_pattern)

View file

@ -184,7 +184,6 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
exp.Ceil,
exp.DatetimeDiff,
exp.DateDiff,
exp.Extract,
exp.TimestampDiff,
exp.TimeDiff,
exp.DateToDi,
@ -268,6 +267,7 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
exp.Div: lambda self, e: self._annotate_div(e),
exp.Dot: lambda self, e: self._annotate_dot(e),
exp.Explode: lambda self, e: self._annotate_explode(e),
exp.Extract: lambda self, e: self._annotate_extract(e),
exp.Filter: lambda self, e: self._annotate_by_args(e, "this"),
exp.GenerateDateArray: lambda self, e: self._annotate_with_type(
e, exp.DataType.build("ARRAY<DATE>")
@ -680,3 +680,14 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
self._set_type(expression, map_type)
return expression
def _annotate_extract(self, expression: exp.Extract) -> exp.Extract:
self._annotate_args(expression)
part = expression.name
if part == "TIME":
self._set_type(expression, exp.DataType.Type.TIME)
elif part == "DATE":
self._set_type(expression, exp.DataType.Type.DATE)
else:
self._set_type(expression, exp.DataType.Type.INT)
return expression

View file

@ -513,7 +513,9 @@ def _expand_stars(
new_selections = []
except_columns: t.Dict[int, t.Set[str]] = {}
replace_columns: t.Dict[int, t.Dict[str, str]] = {}
replace_columns: t.Dict[int, t.Dict[str, exp.Alias]] = {}
rename_columns: t.Dict[int, t.Dict[str, str]] = {}
coalesced_columns = set()
dialect = resolver.schema.dialect
@ -548,11 +550,13 @@ def _expand_stars(
tables.extend(scope.selected_sources)
_add_except_columns(expression, tables, except_columns)
_add_replace_columns(expression, tables, replace_columns)
_add_rename_columns(expression, tables, rename_columns)
elif expression.is_star:
if not isinstance(expression, exp.Dot):
tables.append(expression.table)
_add_except_columns(expression.this, tables, except_columns)
_add_replace_columns(expression.this, tables, replace_columns)
_add_rename_columns(expression.this, tables, rename_columns)
elif is_bigquery:
struct_fields = _expand_struct_stars(expression)
if struct_fields:
@ -578,6 +582,8 @@ def _expand_stars(
table_id = id(table)
columns_to_exclude = except_columns.get(table_id) or set()
renamed_columns = rename_columns.get(table_id, {})
replaced_columns = replace_columns.get(table_id, {})
if pivot:
if pivot_output_columns and pivot_exclude_columns:
@ -606,10 +612,12 @@ def _expand_stars(
alias(exp.func("coalesce", *coalesce_args), alias=name, copy=False)
)
else:
alias_ = replace_columns.get(table_id, {}).get(name, name)
column = exp.column(name, table=table)
alias_ = renamed_columns.get(name, name)
selection_expr = replaced_columns.get(name) or exp.column(name, table=table)
new_selections.append(
alias(column, alias_, copy=False) if alias_ != name else column
alias(selection_expr, alias_, copy=False)
if alias_ != name
else selection_expr
)
# Ensures we don't overwrite the initial selections with an empty list
@ -631,15 +639,29 @@ def _add_except_columns(
except_columns[id(table)] = columns
def _add_rename_columns(
expression: exp.Expression, tables, rename_columns: t.Dict[int, t.Dict[str, str]]
) -> None:
rename = expression.args.get("rename")
if not rename:
return
columns = {e.this.name: e.alias for e in rename}
for table in tables:
rename_columns[id(table)] = columns
def _add_replace_columns(
expression: exp.Expression, tables, replace_columns: t.Dict[int, t.Dict[str, str]]
expression: exp.Expression, tables, replace_columns: t.Dict[int, t.Dict[str, exp.Alias]]
) -> None:
replace = expression.args.get("replace")
if not replace:
return
columns = {e.this.name: e.alias for e in replace}
columns = {e.alias: e for e in replace}
for table in tables:
replace_columns[id(table)] = columns

View file

@ -284,6 +284,7 @@ class Scope:
or column.name not in named_selects
)
)
or (isinstance(ancestor, exp.Star) and not column.arg_key == "except")
):
self._columns.append(column)

View file

@ -108,6 +108,15 @@ def build_mod(args: t.List) -> exp.Mod:
return exp.Mod(this=this, expression=expression)
def build_pad(args: t.List, is_left: bool = True):
return exp.Pad(
this=seq_get(args, 0),
expression=seq_get(args, 1),
fill_pattern=seq_get(args, 2),
is_left=is_left,
)
class _Parser(type):
def __new__(cls, clsname, bases, attrs):
klass = super().__new__(cls, clsname, bases, attrs)
@ -159,7 +168,11 @@ class Parser(metaclass=_Parser):
"LOG2": lambda args: exp.Log(this=exp.Literal.number(2), expression=seq_get(args, 0)),
"LOG10": lambda args: exp.Log(this=exp.Literal.number(10), expression=seq_get(args, 0)),
"LOWER": build_lower,
"LPAD": lambda args: build_pad(args),
"LEFTPAD": lambda args: build_pad(args),
"MOD": build_mod,
"RPAD": lambda args: build_pad(args, is_left=False),
"RIGHTPAD": lambda args: build_pad(args, is_left=False),
"SCOPE_RESOLUTION": lambda args: exp.ScopeResolution(expression=seq_get(args, 0))
if len(args) != 2
else exp.ScopeResolution(this=seq_get(args, 0), expression=seq_get(args, 1)),
@ -777,7 +790,7 @@ class Parser(metaclass=_Parser):
TokenType.PARAMETER: lambda self: self._parse_parameter(),
TokenType.COLON: lambda self: (
self.expression(exp.Placeholder, this=self._prev.text)
if self._match(TokenType.NUMBER) or self._match_set(self.ID_VAR_TOKENS)
if self._match_set(self.ID_VAR_TOKENS)
else None
),
}
@ -1478,7 +1491,10 @@ class Parser(metaclass=_Parser):
def _parse_command(self) -> exp.Command:
self._warn_unsupported()
return self.expression(
exp.Command, this=self._prev.text.upper(), expression=self._parse_string()
exp.Command,
comments=self._prev_comments,
this=self._prev.text.upper(),
expression=self._parse_string(),
)
def _try_parse(self, parse_method: t.Callable[[], T], retreat: bool = False) -> t.Optional[T]:
@ -3375,11 +3391,17 @@ class Parser(metaclass=_Parser):
bracket = parse_bracket and self._parse_bracket(None)
bracket = self.expression(exp.Table, this=bracket) if bracket else None
rows_from = self._match_text_seq("ROWS", "FROM") and self._parse_wrapped_csv(
self._parse_table
)
rows_from = self.expression(exp.Table, rows_from=rows_from) if rows_from else None
only = self._match(TokenType.ONLY)
this = t.cast(
exp.Expression,
bracket
or rows_from
or self._parse_bracket(
self._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
),
@ -3842,6 +3864,9 @@ class Parser(metaclass=_Parser):
if not this:
return None
if this.name.upper() == "ALL" and self.dialect.SUPPORTS_ORDER_BY_ALL:
this = exp.var("ALL")
asc = self._match(TokenType.ASC)
desc = self._match(TokenType.DESC) or (asc and False)
@ -4252,6 +4277,13 @@ class Parser(metaclass=_Parser):
index = self._index
data_type = self._parse_types(check_func=True, allow_identifiers=False)
# parse_types() returns a Cast if we parsed BQ's inline constructor <type>(<values>) e.g.
# STRUCT<a INT, b STRING>(1, 'foo'), which is canonicalized to CAST(<values> AS <type>)
if isinstance(data_type, exp.Cast):
# This constructor can contain ops directly after it, for instance struct unnesting:
# STRUCT<a INT, b STRING>(1, 'foo').* --> CAST(STRUCT(1, 'foo') AS STRUCT<a iNT, b STRING).*
return self._parse_column_ops(data_type)
if data_type:
index2 = self._index
this = self._parse_primary()
@ -4471,9 +4503,14 @@ class Parser(metaclass=_Parser):
this=exp.DataType.Type[type_token.value],
expressions=expressions,
nested=nested,
values=values,
prefix=prefix,
)
# Empty arrays/structs are allowed
if values is not None:
cls = exp.Struct if is_struct else exp.Array
this = exp.cast(cls(expressions=values), this, copy=False)
elif expressions:
this.set("expressions", expressions)
@ -5142,11 +5179,15 @@ class Parser(metaclass=_Parser):
return self.CONSTRAINT_PARSERS[constraint](self)
def _parse_unique_key(self) -> t.Optional[exp.Expression]:
return self._parse_id_var(any_token=False)
def _parse_unique(self) -> exp.UniqueColumnConstraint:
self._match_text_seq("KEY")
return self.expression(
exp.UniqueColumnConstraint,
this=self._parse_schema(self._parse_id_var(any_token=False)),
nulls=self._match_text_seq("NULLS", "NOT", "DISTINCT"),
this=self._parse_schema(self._parse_unique_key()),
index_type=self._match(TokenType.USING) and self._advance_any() and self._prev.text,
on_conflict=self._parse_on_conflict(),
)