Merging upstream version 22.2.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
b13ba670fd
commit
2c28c49d7e
148 changed files with 68457 additions and 63176 deletions
|
@ -17,6 +17,8 @@ if t.TYPE_CHECKING:
|
|||
|
||||
logger = logging.getLogger("sqlglot")
|
||||
|
||||
OPTIONS_TYPE = t.Dict[str, t.Sequence[t.Union[t.Sequence[str], str]]]
|
||||
|
||||
|
||||
def build_var_map(args: t.List) -> exp.StarMap | exp.VarMap:
|
||||
if len(args) == 1 and args[0].is_star:
|
||||
|
@ -367,6 +369,7 @@ class Parser(metaclass=_Parser):
|
|||
TokenType.TEMPORARY,
|
||||
TokenType.TOP,
|
||||
TokenType.TRUE,
|
||||
TokenType.TRUNCATE,
|
||||
TokenType.UNIQUE,
|
||||
TokenType.UNPIVOT,
|
||||
TokenType.UPDATE,
|
||||
|
@ -435,6 +438,7 @@ class Parser(metaclass=_Parser):
|
|||
TokenType.TABLE,
|
||||
TokenType.TIMESTAMP,
|
||||
TokenType.TIMESTAMPTZ,
|
||||
TokenType.TRUNCATE,
|
||||
TokenType.WINDOW,
|
||||
TokenType.XOR,
|
||||
*TYPE_TOKENS,
|
||||
|
@ -578,7 +582,7 @@ class Parser(metaclass=_Parser):
|
|||
exp.Column: lambda self: self._parse_column(),
|
||||
exp.Condition: lambda self: self._parse_conjunction(),
|
||||
exp.DataType: lambda self: self._parse_types(allow_identifiers=False),
|
||||
exp.Expression: lambda self: self._parse_statement(),
|
||||
exp.Expression: lambda self: self._parse_expression(),
|
||||
exp.From: lambda self: self._parse_from(),
|
||||
exp.Group: lambda self: self._parse_group(),
|
||||
exp.Having: lambda self: self._parse_having(),
|
||||
|
@ -625,10 +629,10 @@ class Parser(metaclass=_Parser):
|
|||
TokenType.SET: lambda self: self._parse_set(),
|
||||
TokenType.UNCACHE: lambda self: self._parse_uncache(),
|
||||
TokenType.UPDATE: lambda self: self._parse_update(),
|
||||
TokenType.TRUNCATE: lambda self: self._parse_truncate_table(),
|
||||
TokenType.USE: lambda self: self.expression(
|
||||
exp.Use,
|
||||
kind=self._match_texts(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"))
|
||||
and exp.var(self._prev.text),
|
||||
kind=self._parse_var_from_options(self.USABLES, raise_unmatched=False),
|
||||
this=self._parse_table(schema=False),
|
||||
),
|
||||
}
|
||||
|
@ -642,36 +646,44 @@ class Parser(metaclass=_Parser):
|
|||
TokenType.DPIPE_SLASH: lambda self: self.expression(exp.Cbrt, this=self._parse_unary()),
|
||||
}
|
||||
|
||||
PRIMARY_PARSERS = {
|
||||
TokenType.STRING: lambda self, token: self.expression(
|
||||
exp.Literal, this=token.text, is_string=True
|
||||
STRING_PARSERS = {
|
||||
TokenType.HEREDOC_STRING: lambda self, token: self.expression(
|
||||
exp.RawString, this=token.text
|
||||
),
|
||||
TokenType.NUMBER: lambda self, token: self.expression(
|
||||
exp.Literal, this=token.text, is_string=False
|
||||
),
|
||||
TokenType.STAR: lambda self, _: self.expression(
|
||||
exp.Star, **{"except": self._parse_except(), "replace": self._parse_replace()}
|
||||
),
|
||||
TokenType.NULL: lambda self, _: self.expression(exp.Null),
|
||||
TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
|
||||
TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
|
||||
TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
|
||||
TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
|
||||
TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
|
||||
TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
|
||||
TokenType.NATIONAL_STRING: lambda self, token: self.expression(
|
||||
exp.National, this=token.text
|
||||
),
|
||||
TokenType.RAW_STRING: lambda self, token: self.expression(exp.RawString, this=token.text),
|
||||
TokenType.HEREDOC_STRING: lambda self, token: self.expression(
|
||||
exp.RawString, this=token.text
|
||||
TokenType.STRING: lambda self, token: self.expression(
|
||||
exp.Literal, this=token.text, is_string=True
|
||||
),
|
||||
TokenType.UNICODE_STRING: lambda self, token: self.expression(
|
||||
exp.UnicodeString,
|
||||
this=token.text,
|
||||
escape=self._match_text_seq("UESCAPE") and self._parse_string(),
|
||||
),
|
||||
}
|
||||
|
||||
NUMERIC_PARSERS = {
|
||||
TokenType.BIT_STRING: lambda self, token: self.expression(exp.BitString, this=token.text),
|
||||
TokenType.BYTE_STRING: lambda self, token: self.expression(exp.ByteString, this=token.text),
|
||||
TokenType.HEX_STRING: lambda self, token: self.expression(exp.HexString, this=token.text),
|
||||
TokenType.NUMBER: lambda self, token: self.expression(
|
||||
exp.Literal, this=token.text, is_string=False
|
||||
),
|
||||
}
|
||||
|
||||
PRIMARY_PARSERS = {
|
||||
**STRING_PARSERS,
|
||||
**NUMERIC_PARSERS,
|
||||
TokenType.INTRODUCER: lambda self, token: self._parse_introducer(token),
|
||||
TokenType.NULL: lambda self, _: self.expression(exp.Null),
|
||||
TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
|
||||
TokenType.FALSE: lambda self, _: self.expression(exp.Boolean, this=False),
|
||||
TokenType.SESSION_PARAMETER: lambda self, _: self._parse_session_parameter(),
|
||||
TokenType.STAR: lambda self, _: self.expression(
|
||||
exp.Star, **{"except": self._parse_except(), "replace": self._parse_replace()}
|
||||
),
|
||||
}
|
||||
|
||||
PLACEHOLDER_PARSERS = {
|
||||
|
@ -799,7 +811,9 @@ class Parser(metaclass=_Parser):
|
|||
exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
|
||||
),
|
||||
"CHECK": lambda self: self.expression(
|
||||
exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction)
|
||||
exp.CheckColumnConstraint,
|
||||
this=self._parse_wrapped(self._parse_conjunction),
|
||||
enforced=self._match_text_seq("ENFORCED"),
|
||||
),
|
||||
"COLLATE": lambda self: self.expression(
|
||||
exp.CollateColumnConstraint, this=self._parse_var()
|
||||
|
@ -873,6 +887,8 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
FUNCTIONS_WITH_ALIASED_ARGS = {"STRUCT"}
|
||||
|
||||
KEY_VALUE_DEFINITIONS = (exp.Alias, exp.EQ, exp.PropertyEQ, exp.Slice)
|
||||
|
||||
FUNCTION_PARSERS = {
|
||||
"CAST": lambda self: self._parse_cast(self.STRICT_CAST),
|
||||
"CONVERT": lambda self: self._parse_convert(self.STRICT_CAST),
|
||||
|
@ -895,6 +911,7 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
QUERY_MODIFIER_PARSERS = {
|
||||
TokenType.MATCH_RECOGNIZE: lambda self: ("match", self._parse_match_recognize()),
|
||||
TokenType.PREWHERE: lambda self: ("prewhere", self._parse_prewhere()),
|
||||
TokenType.WHERE: lambda self: ("where", self._parse_where()),
|
||||
TokenType.GROUP_BY: lambda self: ("group", self._parse_group()),
|
||||
TokenType.HAVING: lambda self: ("having", self._parse_having()),
|
||||
|
@ -934,22 +951,23 @@ class Parser(metaclass=_Parser):
|
|||
exp.DataType.Type.JSON: lambda self, this, _: self.expression(exp.ParseJSON, this=this),
|
||||
}
|
||||
|
||||
MODIFIABLES = (exp.Subquery, exp.Subqueryable, exp.Table)
|
||||
|
||||
DDL_SELECT_TOKENS = {TokenType.SELECT, TokenType.WITH, TokenType.L_PAREN}
|
||||
|
||||
PRE_VOLATILE_TOKENS = {TokenType.CREATE, TokenType.REPLACE, TokenType.UNIQUE}
|
||||
|
||||
TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
|
||||
TRANSACTION_CHARACTERISTICS = {
|
||||
"ISOLATION LEVEL REPEATABLE READ",
|
||||
"ISOLATION LEVEL READ COMMITTED",
|
||||
"ISOLATION LEVEL READ UNCOMMITTED",
|
||||
"ISOLATION LEVEL SERIALIZABLE",
|
||||
"READ WRITE",
|
||||
"READ ONLY",
|
||||
TRANSACTION_CHARACTERISTICS: OPTIONS_TYPE = {
|
||||
"ISOLATION": (
|
||||
("LEVEL", "REPEATABLE", "READ"),
|
||||
("LEVEL", "READ", "COMMITTED"),
|
||||
("LEVEL", "READ", "UNCOMITTED"),
|
||||
("LEVEL", "SERIALIZABLE"),
|
||||
),
|
||||
"READ": ("WRITE", "ONLY"),
|
||||
}
|
||||
|
||||
USABLES: OPTIONS_TYPE = dict.fromkeys(("ROLE", "WAREHOUSE", "DATABASE", "SCHEMA"), tuple())
|
||||
|
||||
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
|
||||
|
||||
CLONE_KEYWORDS = {"CLONE", "COPY"}
|
||||
|
@ -1012,6 +1030,9 @@ class Parser(metaclass=_Parser):
|
|||
# If this is True and '(' is not found, the keyword will be treated as an identifier
|
||||
VALUES_FOLLOWED_BY_PAREN = True
|
||||
|
||||
# Whether implicit unnesting is supported, e.g. SELECT 1 FROM y.z AS z, z.a (Redshift)
|
||||
SUPPORTS_IMPLICIT_UNNEST = False
|
||||
|
||||
__slots__ = (
|
||||
"error_level",
|
||||
"error_message_context",
|
||||
|
@ -2450,10 +2471,37 @@ class Parser(metaclass=_Parser):
|
|||
alias=self._parse_table_alias() if parse_alias else None,
|
||||
)
|
||||
|
||||
def _implicit_unnests_to_explicit(self, this: E) -> E:
|
||||
from sqlglot.optimizer.normalize_identifiers import normalize_identifiers as _norm
|
||||
|
||||
refs = {_norm(this.args["from"].this.copy(), dialect=self.dialect).alias_or_name}
|
||||
for i, join in enumerate(this.args.get("joins") or []):
|
||||
table = join.this
|
||||
normalized_table = table.copy()
|
||||
normalized_table.meta["maybe_column"] = True
|
||||
normalized_table = _norm(normalized_table, dialect=self.dialect)
|
||||
|
||||
if isinstance(table, exp.Table) and not join.args.get("on"):
|
||||
if normalized_table.parts[0].name in refs:
|
||||
table_as_column = table.to_column()
|
||||
unnest = exp.Unnest(expressions=[table_as_column])
|
||||
|
||||
# Table.to_column creates a parent Alias node that we want to convert to
|
||||
# a TableAlias and attach to the Unnest, so it matches the parser's output
|
||||
if isinstance(table.args.get("alias"), exp.TableAlias):
|
||||
table_as_column.replace(table_as_column.this)
|
||||
exp.alias_(unnest, None, table=[table.args["alias"].this], copy=False)
|
||||
|
||||
table.replace(unnest)
|
||||
|
||||
refs.add(normalized_table.alias_or_name)
|
||||
|
||||
return this
|
||||
|
||||
def _parse_query_modifiers(
|
||||
self, this: t.Optional[exp.Expression]
|
||||
) -> t.Optional[exp.Expression]:
|
||||
if isinstance(this, self.MODIFIABLES):
|
||||
if isinstance(this, (exp.Query, exp.Table)):
|
||||
for join in iter(self._parse_join, None):
|
||||
this.append("joins", join)
|
||||
for lateral in iter(self._parse_lateral, None):
|
||||
|
@ -2478,6 +2526,10 @@ class Parser(metaclass=_Parser):
|
|||
offset.set("expressions", limit_by_expressions)
|
||||
continue
|
||||
break
|
||||
|
||||
if self.SUPPORTS_IMPLICIT_UNNEST and this and "from" in this.args:
|
||||
this = self._implicit_unnests_to_explicit(this)
|
||||
|
||||
return this
|
||||
|
||||
def _parse_hint(self) -> t.Optional[exp.Hint]:
|
||||
|
@ -2803,7 +2855,9 @@ class Parser(metaclass=_Parser):
|
|||
or self._parse_placeholder()
|
||||
)
|
||||
|
||||
def _parse_table_parts(self, schema: bool = False, is_db_reference: bool = False) -> exp.Table:
|
||||
def _parse_table_parts(
|
||||
self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
|
||||
) -> exp.Table:
|
||||
catalog = None
|
||||
db = None
|
||||
table: t.Optional[exp.Expression | str] = self._parse_table_part(schema=schema)
|
||||
|
@ -2817,8 +2871,20 @@ class Parser(metaclass=_Parser):
|
|||
else:
|
||||
catalog = db
|
||||
db = table
|
||||
# "" used for tsql FROM a..b case
|
||||
table = self._parse_table_part(schema=schema) or ""
|
||||
|
||||
if (
|
||||
wildcard
|
||||
and self._is_connected()
|
||||
and (isinstance(table, exp.Identifier) or not table)
|
||||
and self._match(TokenType.STAR)
|
||||
):
|
||||
if isinstance(table, exp.Identifier):
|
||||
table.args["this"] += "*"
|
||||
else:
|
||||
table = exp.Identifier(this="*")
|
||||
|
||||
if is_db_reference:
|
||||
catalog = db
|
||||
db = table
|
||||
|
@ -2861,6 +2927,9 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
bracket = parse_bracket and self._parse_bracket(None)
|
||||
bracket = self.expression(exp.Table, this=bracket) if bracket else None
|
||||
|
||||
only = self._match(TokenType.ONLY)
|
||||
|
||||
this = t.cast(
|
||||
exp.Expression,
|
||||
bracket
|
||||
|
@ -2869,6 +2938,12 @@ class Parser(metaclass=_Parser):
|
|||
),
|
||||
)
|
||||
|
||||
if only:
|
||||
this.set("only", only)
|
||||
|
||||
# Postgres supports a wildcard (table) suffix operator, which is a no-op in this context
|
||||
self._match_text_seq("*")
|
||||
|
||||
if schema:
|
||||
return self._parse_schema(this=this)
|
||||
|
||||
|
@ -3161,6 +3236,14 @@ class Parser(metaclass=_Parser):
|
|||
def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
|
||||
return [agg.alias for agg in aggregations]
|
||||
|
||||
def _parse_prewhere(self, skip_where_token: bool = False) -> t.Optional[exp.PreWhere]:
|
||||
if not skip_where_token and not self._match(TokenType.PREWHERE):
|
||||
return None
|
||||
|
||||
return self.expression(
|
||||
exp.PreWhere, comments=self._prev_comments, this=self._parse_conjunction()
|
||||
)
|
||||
|
||||
def _parse_where(self, skip_where_token: bool = False) -> t.Optional[exp.Where]:
|
||||
if not skip_where_token and not self._match(TokenType.WHERE):
|
||||
return None
|
||||
|
@ -3291,8 +3374,12 @@ class Parser(metaclass=_Parser):
|
|||
return None
|
||||
return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
|
||||
|
||||
def _parse_ordered(self, parse_method: t.Optional[t.Callable] = None) -> exp.Ordered:
|
||||
def _parse_ordered(
|
||||
self, parse_method: t.Optional[t.Callable] = None
|
||||
) -> t.Optional[exp.Ordered]:
|
||||
this = parse_method() if parse_method else self._parse_conjunction()
|
||||
if not this:
|
||||
return None
|
||||
|
||||
asc = self._match(TokenType.ASC)
|
||||
desc = self._match(TokenType.DESC) or (asc and False)
|
||||
|
@ -3510,7 +3597,7 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
if self._match_text_seq("DISTINCT", "FROM"):
|
||||
klass = exp.NullSafeEQ if negate else exp.NullSafeNEQ
|
||||
return self.expression(klass, this=this, expression=self._parse_conjunction())
|
||||
return self.expression(klass, this=this, expression=self._parse_bitwise())
|
||||
|
||||
expression = self._parse_null() or self._parse_boolean()
|
||||
if not expression:
|
||||
|
@ -3528,7 +3615,7 @@ class Parser(metaclass=_Parser):
|
|||
matched_l_paren = self._prev.token_type == TokenType.L_PAREN
|
||||
expressions = self._parse_csv(lambda: self._parse_select_or_expression(alias=alias))
|
||||
|
||||
if len(expressions) == 1 and isinstance(expressions[0], exp.Subqueryable):
|
||||
if len(expressions) == 1 and isinstance(expressions[0], exp.Query):
|
||||
this = self.expression(exp.In, this=this, query=expressions[0])
|
||||
else:
|
||||
this = self.expression(exp.In, this=this, expressions=expressions)
|
||||
|
@ -3959,7 +4046,7 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
this = self._parse_query_modifiers(seq_get(expressions, 0))
|
||||
|
||||
if isinstance(this, exp.Subqueryable):
|
||||
if isinstance(this, exp.UNWRAPPED_QUERIES):
|
||||
this = self._parse_set_operations(
|
||||
self._parse_subquery(this=this, parse_alias=False)
|
||||
)
|
||||
|
@ -4064,6 +4151,9 @@ class Parser(metaclass=_Parser):
|
|||
alias = upper in self.FUNCTIONS_WITH_ALIASED_ARGS
|
||||
args = self._parse_csv(lambda: self._parse_lambda(alias=alias))
|
||||
|
||||
if alias:
|
||||
args = self._kv_to_prop_eq(args)
|
||||
|
||||
if function and not anonymous:
|
||||
if "dialect" in function.__code__.co_varnames:
|
||||
func = function(args, dialect=self.dialect)
|
||||
|
@ -4076,6 +4166,8 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
this = func
|
||||
else:
|
||||
if token_type == TokenType.IDENTIFIER:
|
||||
this = exp.Identifier(this=this, quoted=True)
|
||||
this = self.expression(exp.Anonymous, this=this, expressions=args)
|
||||
|
||||
if isinstance(this, exp.Expression):
|
||||
|
@ -4084,6 +4176,26 @@ class Parser(metaclass=_Parser):
|
|||
self._match_r_paren(this)
|
||||
return self._parse_window(this)
|
||||
|
||||
def _kv_to_prop_eq(self, expressions: t.List[exp.Expression]) -> t.List[exp.Expression]:
|
||||
transformed = []
|
||||
|
||||
for e in expressions:
|
||||
if isinstance(e, self.KEY_VALUE_DEFINITIONS):
|
||||
if isinstance(e, exp.Alias):
|
||||
e = self.expression(exp.PropertyEQ, this=e.args.get("alias"), expression=e.this)
|
||||
|
||||
if not isinstance(e, exp.PropertyEQ):
|
||||
e = self.expression(
|
||||
exp.PropertyEQ, this=exp.to_identifier(e.name), expression=e.expression
|
||||
)
|
||||
|
||||
if isinstance(e.this, exp.Column):
|
||||
e.this.replace(e.this.this)
|
||||
|
||||
transformed.append(e)
|
||||
|
||||
return transformed
|
||||
|
||||
def _parse_function_parameter(self) -> t.Optional[exp.Expression]:
|
||||
return self._parse_column_def(self._parse_id_var())
|
||||
|
||||
|
@ -4496,7 +4608,7 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
# https://duckdb.org/docs/sql/data_types/struct.html#creating-structs
|
||||
if bracket_kind == TokenType.L_BRACE:
|
||||
this = self.expression(exp.Struct, expressions=expressions)
|
||||
this = self.expression(exp.Struct, expressions=self._kv_to_prop_eq(expressions))
|
||||
elif not this or this.name.upper() == "ARRAY":
|
||||
this = self.expression(exp.Array, expressions=expressions)
|
||||
else:
|
||||
|
@ -4747,12 +4859,10 @@ class Parser(metaclass=_Parser):
|
|||
return None
|
||||
|
||||
@t.overload
|
||||
def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject:
|
||||
...
|
||||
def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
|
||||
|
||||
@t.overload
|
||||
def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg:
|
||||
...
|
||||
def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
|
||||
|
||||
def _parse_json_object(self, agg=False):
|
||||
star = self._parse_star()
|
||||
|
@ -5140,16 +5250,16 @@ class Parser(metaclass=_Parser):
|
|||
return None
|
||||
|
||||
def _parse_string(self) -> t.Optional[exp.Expression]:
|
||||
if self._match_set((TokenType.STRING, TokenType.RAW_STRING)):
|
||||
return self.PRIMARY_PARSERS[self._prev.token_type](self, self._prev)
|
||||
if self._match_set(self.STRING_PARSERS):
|
||||
return self.STRING_PARSERS[self._prev.token_type](self, self._prev)
|
||||
return self._parse_placeholder()
|
||||
|
||||
def _parse_string_as_identifier(self) -> t.Optional[exp.Identifier]:
|
||||
return exp.to_identifier(self._match(TokenType.STRING) and self._prev.text, quoted=True)
|
||||
|
||||
def _parse_number(self) -> t.Optional[exp.Expression]:
|
||||
if self._match(TokenType.NUMBER):
|
||||
return self.PRIMARY_PARSERS[TokenType.NUMBER](self, self._prev)
|
||||
if self._match_set(self.NUMERIC_PARSERS):
|
||||
return self.NUMERIC_PARSERS[self._prev.token_type](self, self._prev)
|
||||
return self._parse_placeholder()
|
||||
|
||||
def _parse_identifier(self) -> t.Optional[exp.Expression]:
|
||||
|
@ -5182,6 +5292,9 @@ class Parser(metaclass=_Parser):
|
|||
def _parse_var_or_string(self) -> t.Optional[exp.Expression]:
|
||||
return self._parse_var() or self._parse_string()
|
||||
|
||||
def _parse_primary_or_var(self) -> t.Optional[exp.Expression]:
|
||||
return self._parse_primary() or self._parse_var(any_token=True)
|
||||
|
||||
def _parse_null(self) -> t.Optional[exp.Expression]:
|
||||
if self._match_set(self.NULL_TOKENS):
|
||||
return self.PRIMARY_PARSERS[TokenType.NULL](self, self._prev)
|
||||
|
@ -5200,16 +5313,12 @@ class Parser(metaclass=_Parser):
|
|||
return self._parse_placeholder()
|
||||
|
||||
def _parse_parameter(self) -> exp.Parameter:
|
||||
def _parse_parameter_part() -> t.Optional[exp.Expression]:
|
||||
return (
|
||||
self._parse_identifier() or self._parse_primary() or self._parse_var(any_token=True)
|
||||
)
|
||||
|
||||
self._match(TokenType.L_BRACE)
|
||||
this = _parse_parameter_part()
|
||||
expression = self._match(TokenType.COLON) and _parse_parameter_part()
|
||||
this = self._parse_identifier() or self._parse_primary_or_var()
|
||||
expression = self._match(TokenType.COLON) and (
|
||||
self._parse_identifier() or self._parse_primary_or_var()
|
||||
)
|
||||
self._match(TokenType.R_BRACE)
|
||||
|
||||
return self.expression(exp.Parameter, this=this, expression=expression)
|
||||
|
||||
def _parse_placeholder(self) -> t.Optional[exp.Expression]:
|
||||
|
@ -5376,35 +5485,15 @@ class Parser(metaclass=_Parser):
|
|||
exp.DropPartition, expressions=self._parse_csv(self._parse_partition), exists=exists
|
||||
)
|
||||
|
||||
def _parse_add_constraint(self) -> exp.AddConstraint:
|
||||
this = None
|
||||
kind = self._prev.token_type
|
||||
|
||||
if kind == TokenType.CONSTRAINT:
|
||||
this = self._parse_id_var()
|
||||
|
||||
if self._match_text_seq("CHECK"):
|
||||
expression = self._parse_wrapped(self._parse_conjunction)
|
||||
enforced = self._match_text_seq("ENFORCED") or False
|
||||
|
||||
return self.expression(
|
||||
exp.AddConstraint, this=this, expression=expression, enforced=enforced
|
||||
)
|
||||
|
||||
if kind == TokenType.FOREIGN_KEY or self._match(TokenType.FOREIGN_KEY):
|
||||
expression = self._parse_foreign_key()
|
||||
elif kind == TokenType.PRIMARY_KEY or self._match(TokenType.PRIMARY_KEY):
|
||||
expression = self._parse_primary_key()
|
||||
else:
|
||||
expression = None
|
||||
|
||||
return self.expression(exp.AddConstraint, this=this, expression=expression)
|
||||
|
||||
def _parse_alter_table_add(self) -> t.List[exp.Expression]:
|
||||
index = self._index - 1
|
||||
|
||||
if self._match_set(self.ADD_CONSTRAINT_TOKENS):
|
||||
return self._parse_csv(self._parse_add_constraint)
|
||||
if self._match_set(self.ADD_CONSTRAINT_TOKENS, advance=False):
|
||||
return self._parse_csv(
|
||||
lambda: self.expression(
|
||||
exp.AddConstraint, expressions=self._parse_csv(self._parse_constraint)
|
||||
)
|
||||
)
|
||||
|
||||
self._retreat(index)
|
||||
if not self.ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN and self._match_text_seq("ADD"):
|
||||
|
@ -5472,6 +5561,7 @@ class Parser(metaclass=_Parser):
|
|||
parser = self.ALTER_PARSERS.get(self._prev.text.upper()) if self._prev else None
|
||||
if parser:
|
||||
actions = ensure_list(parser(self))
|
||||
options = self._parse_csv(self._parse_property)
|
||||
|
||||
if not self._curr and actions:
|
||||
return self.expression(
|
||||
|
@ -5480,6 +5570,7 @@ class Parser(metaclass=_Parser):
|
|||
exists=exists,
|
||||
actions=actions,
|
||||
only=only,
|
||||
options=options,
|
||||
)
|
||||
|
||||
return self._parse_as_command(start)
|
||||
|
@ -5610,11 +5701,34 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
return set_
|
||||
|
||||
def _parse_var_from_options(self, options: t.Collection[str]) -> t.Optional[exp.Var]:
|
||||
for option in options:
|
||||
if self._match_text_seq(*option.split(" ")):
|
||||
return exp.var(option)
|
||||
return None
|
||||
def _parse_var_from_options(
|
||||
self, options: OPTIONS_TYPE, raise_unmatched: bool = True
|
||||
) -> t.Optional[exp.Var]:
|
||||
start = self._curr
|
||||
if not start:
|
||||
return None
|
||||
|
||||
option = start.text.upper()
|
||||
continuations = options.get(option)
|
||||
|
||||
index = self._index
|
||||
self._advance()
|
||||
for keywords in continuations or []:
|
||||
if isinstance(keywords, str):
|
||||
keywords = (keywords,)
|
||||
|
||||
if self._match_text_seq(*keywords):
|
||||
option = f"{option} {' '.join(keywords)}"
|
||||
break
|
||||
else:
|
||||
if continuations or continuations is None:
|
||||
if raise_unmatched:
|
||||
self.raise_error(f"Unknown option {option}")
|
||||
|
||||
self._retreat(index)
|
||||
return None
|
||||
|
||||
return exp.var(option)
|
||||
|
||||
def _parse_as_command(self, start: Token) -> exp.Command:
|
||||
while self._curr:
|
||||
|
@ -5806,14 +5920,12 @@ class Parser(metaclass=_Parser):
|
|||
return True
|
||||
|
||||
@t.overload
|
||||
def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression:
|
||||
...
|
||||
def _replace_columns_with_dots(self, this: exp.Expression) -> exp.Expression: ...
|
||||
|
||||
@t.overload
|
||||
def _replace_columns_with_dots(
|
||||
self, this: t.Optional[exp.Expression]
|
||||
) -> t.Optional[exp.Expression]:
|
||||
...
|
||||
) -> t.Optional[exp.Expression]: ...
|
||||
|
||||
def _replace_columns_with_dots(self, this):
|
||||
if isinstance(this, exp.Dot):
|
||||
|
@ -5849,3 +5961,53 @@ class Parser(metaclass=_Parser):
|
|||
else:
|
||||
column.replace(dot_or_id)
|
||||
return node
|
||||
|
||||
def _parse_truncate_table(self) -> t.Optional[exp.TruncateTable] | exp.Expression:
|
||||
start = self._prev
|
||||
|
||||
# Not to be confused with TRUNCATE(number, decimals) function call
|
||||
if self._match(TokenType.L_PAREN):
|
||||
self._retreat(self._index - 2)
|
||||
return self._parse_function()
|
||||
|
||||
# Clickhouse supports TRUNCATE DATABASE as well
|
||||
is_database = self._match(TokenType.DATABASE)
|
||||
|
||||
self._match(TokenType.TABLE)
|
||||
|
||||
exists = self._parse_exists(not_=False)
|
||||
|
||||
expressions = self._parse_csv(
|
||||
lambda: self._parse_table(schema=True, is_db_reference=is_database)
|
||||
)
|
||||
|
||||
cluster = self._parse_on_property() if self._match(TokenType.ON) else None
|
||||
|
||||
if self._match_text_seq("RESTART", "IDENTITY"):
|
||||
identity = "RESTART"
|
||||
elif self._match_text_seq("CONTINUE", "IDENTITY"):
|
||||
identity = "CONTINUE"
|
||||
else:
|
||||
identity = None
|
||||
|
||||
if self._match_text_seq("CASCADE") or self._match_text_seq("RESTRICT"):
|
||||
option = self._prev.text
|
||||
else:
|
||||
option = None
|
||||
|
||||
partition = self._parse_partition()
|
||||
|
||||
# Fallback case
|
||||
if self._curr:
|
||||
return self._parse_as_command(start)
|
||||
|
||||
return self.expression(
|
||||
exp.TruncateTable,
|
||||
expressions=expressions,
|
||||
is_database=is_database,
|
||||
exists=exists,
|
||||
cluster=cluster,
|
||||
identity=identity,
|
||||
option=option,
|
||||
partition=partition,
|
||||
)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue