1
0
Fork 0

Merging upstream version 11.1.3.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 15:26:26 +01:00
parent 8c1c1864c5
commit fb546b57e5
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
95 changed files with 32569 additions and 30081 deletions

View file

@ -2,6 +2,7 @@ from __future__ import annotations
import logging
import typing as t
from collections import defaultdict
from sqlglot import exp
from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
@ -157,7 +158,6 @@ class Parser(metaclass=_Parser):
ID_VAR_TOKENS = {
TokenType.VAR,
TokenType.ALWAYS,
TokenType.ANTI,
TokenType.APPLY,
TokenType.AUTO_INCREMENT,
@ -186,8 +186,6 @@ class Parser(metaclass=_Parser):
TokenType.FOLLOWING,
TokenType.FORMAT,
TokenType.FUNCTION,
TokenType.GENERATED,
TokenType.IDENTITY,
TokenType.IF,
TokenType.INDEX,
TokenType.ISNULL,
@ -213,7 +211,6 @@ class Parser(metaclass=_Parser):
TokenType.ROW,
TokenType.ROWS,
TokenType.SCHEMA,
TokenType.SCHEMA_COMMENT,
TokenType.SEED,
TokenType.SEMI,
TokenType.SET,
@ -481,9 +478,7 @@ class Parser(metaclass=_Parser):
PLACEHOLDER_PARSERS = {
TokenType.PLACEHOLDER: lambda self: self.expression(exp.Placeholder),
TokenType.PARAMETER: lambda self: self.expression(
exp.Parameter, this=self._parse_var() or self._parse_primary()
),
TokenType.PARAMETER: lambda self: self._parse_parameter(),
TokenType.COLON: lambda self: self.expression(exp.Placeholder, this=self._prev.text)
if self._match_set((TokenType.NUMBER, TokenType.VAR))
else None,
@ -516,6 +511,9 @@ class Parser(metaclass=_Parser):
PROPERTY_PARSERS = {
"AUTO_INCREMENT": lambda self: self._parse_property_assignment(exp.AutoIncrementProperty),
"CHARACTER SET": lambda self: self._parse_character_set(),
"CLUSTER BY": lambda self: self.expression(
exp.Cluster, expressions=self._parse_csv(self._parse_ordered)
),
"LOCATION": lambda self: self._parse_property_assignment(exp.LocationProperty),
"PARTITION BY": lambda self: self._parse_partitioned_by(),
"PARTITIONED BY": lambda self: self._parse_partitioned_by(),
@ -576,20 +574,54 @@ class Parser(metaclass=_Parser):
"BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
"ALGORITHM": lambda self: self._parse_property_assignment(exp.AlgorithmProperty),
"DEFINER": lambda self: self._parse_definer(),
"LOCK": lambda self: self._parse_locking(),
"LOCKING": lambda self: self._parse_locking(),
}
CONSTRAINT_PARSERS = {
TokenType.CHECK: lambda self: self.expression(
exp.Check, this=self._parse_wrapped(self._parse_conjunction)
"AUTOINCREMENT": lambda self: self._parse_auto_increment(),
"AUTO_INCREMENT": lambda self: self._parse_auto_increment(),
"CASESPECIFIC": lambda self: self.expression(exp.CaseSpecificColumnConstraint, not_=False),
"CHARACTER SET": lambda self: self.expression(
exp.CharacterSetColumnConstraint, this=self._parse_var_or_string()
),
TokenType.FOREIGN_KEY: lambda self: self._parse_foreign_key(),
TokenType.UNIQUE: lambda self: self._parse_unique(),
TokenType.LIKE: lambda self: self._parse_create_like(),
"CHECK": lambda self: self.expression(
exp.CheckColumnConstraint, this=self._parse_wrapped(self._parse_conjunction)
),
"COLLATE": lambda self: self.expression(
exp.CollateColumnConstraint, this=self._parse_var()
),
"COMMENT": lambda self: self.expression(
exp.CommentColumnConstraint, this=self._parse_string()
),
"DEFAULT": lambda self: self.expression(
exp.DefaultColumnConstraint, this=self._parse_bitwise()
),
"ENCODE": lambda self: self.expression(exp.EncodeColumnConstraint, this=self._parse_var()),
"FOREIGN KEY": lambda self: self._parse_foreign_key(),
"FORMAT": lambda self: self.expression(
exp.DateFormatColumnConstraint, this=self._parse_var_or_string()
),
"GENERATED": lambda self: self._parse_generated_as_identity(),
"IDENTITY": lambda self: self._parse_auto_increment(),
"LIKE": lambda self: self._parse_create_like(),
"NOT": lambda self: self._parse_not_constraint(),
"NULL": lambda self: self.expression(exp.NotNullColumnConstraint, allow_null=True),
"PATH": lambda self: self.expression(exp.PathColumnConstraint, this=self._parse_string()),
"PRIMARY KEY": lambda self: self._parse_primary_key(),
"TITLE": lambda self: self.expression(
exp.TitleColumnConstraint, this=self._parse_var_or_string()
),
"UNIQUE": lambda self: self._parse_unique(),
"UPPERCASE": lambda self: self.expression(exp.UppercaseColumnConstraint),
}
SCHEMA_UNNAMED_CONSTRAINTS = {"CHECK", "FOREIGN KEY", "LIKE", "PRIMARY KEY", "UNIQUE"}
NO_PAREN_FUNCTION_PARSERS = {
TokenType.CASE: lambda self: self._parse_case(),
TokenType.IF: lambda self: self._parse_if(),
TokenType.ANY: lambda self: self.expression(exp.Any, this=self._parse_bitwise()),
}
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
@ -637,6 +669,8 @@ class Parser(metaclass=_Parser):
TRANSACTION_KIND = {"DEFERRED", "IMMEDIATE", "EXCLUSIVE"}
INSERT_ALTERNATIVES = {"ABORT", "FAIL", "IGNORE", "REPLACE", "ROLLBACK"}
WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
ADD_CONSTRAINT_TOKENS = {TokenType.CONSTRAINT, TokenType.PRIMARY_KEY, TokenType.FOREIGN_KEY}
@ -940,7 +974,9 @@ class Parser(metaclass=_Parser):
def _parse_create(self) -> t.Optional[exp.Expression]:
start = self._prev
replace = self._match_pair(TokenType.OR, TokenType.REPLACE)
replace = self._prev.text.upper() == "REPLACE" or self._match_pair(
TokenType.OR, TokenType.REPLACE
)
set_ = self._match(TokenType.SET) # Teradata
multiset = self._match_text_seq("MULTISET") # Teradata
global_temporary = self._match_text_seq("GLOBAL", "TEMPORARY") # Teradata
@ -958,7 +994,7 @@ class Parser(metaclass=_Parser):
create_token = self._match_set(self.CREATABLES) and self._prev
if not create_token:
properties = self._parse_properties()
properties = self._parse_properties() # exp.Properties.Location.POST_CREATE
create_token = self._match_set(self.CREATABLES) and self._prev
if not properties or not create_token:
@ -994,15 +1030,37 @@ class Parser(metaclass=_Parser):
):
table_parts = self._parse_table_parts(schema=True)
if self._match(TokenType.COMMA): # comma-separated properties before schema definition
properties = self._parse_properties(before=True)
# exp.Properties.Location.POST_NAME
if self._match(TokenType.COMMA):
temp_properties = self._parse_properties(before=True)
if properties and temp_properties:
properties.expressions.append(temp_properties.expressions)
elif temp_properties:
properties = temp_properties
this = self._parse_schema(this=table_parts)
if not properties: # properties after schema definition
properties = self._parse_properties()
# exp.Properties.Location.POST_SCHEMA and POST_WITH
temp_properties = self._parse_properties()
if properties and temp_properties:
properties.expressions.append(temp_properties.expressions)
elif temp_properties:
properties = temp_properties
self._match(TokenType.ALIAS)
# exp.Properties.Location.POST_ALIAS
if not (
self._match(TokenType.SELECT, advance=False)
or self._match(TokenType.WITH, advance=False)
or self._match(TokenType.L_PAREN, advance=False)
):
temp_properties = self._parse_properties()
if properties and temp_properties:
properties.expressions.append(temp_properties.expressions)
elif temp_properties:
properties = temp_properties
expression = self._parse_ddl_select()
if create_token.token_type == TokenType.TABLE:
@ -1022,12 +1080,13 @@ class Parser(metaclass=_Parser):
while True:
index = self._parse_create_table_index()
# post index PARTITION BY property
# exp.Properties.Location.POST_INDEX
if self._match(TokenType.PARTITION_BY, advance=False):
if properties:
properties.expressions.append(self._parse_property())
else:
properties = self._parse_properties()
temp_properties = self._parse_properties()
if properties and temp_properties:
properties.expressions.append(temp_properties.expressions)
elif temp_properties:
properties = temp_properties
if not index:
break
@ -1080,7 +1139,7 @@ class Parser(metaclass=_Parser):
return self.PROPERTY_PARSERS[self._prev.text.upper()](self)
if self._match_pair(TokenType.DEFAULT, TokenType.CHARACTER_SET):
return self._parse_character_set(True)
return self._parse_character_set(default=True)
if self._match_pair(TokenType.COMPOUND, TokenType.SORTKEY):
return self._parse_sortkey(compound=True)
@ -1240,7 +1299,7 @@ class Parser(metaclass=_Parser):
def _parse_blockcompression(self) -> exp.Expression:
self._match_text_seq("BLOCKCOMPRESSION")
self._match(TokenType.EQ)
always = self._match(TokenType.ALWAYS)
always = self._match_text_seq("ALWAYS")
manual = self._match_text_seq("MANUAL")
never = self._match_text_seq("NEVER")
default = self._match_text_seq("DEFAULT")
@ -1274,6 +1333,56 @@ class Parser(metaclass=_Parser):
for_none=for_none,
)
def _parse_locking(self) -> exp.Expression:
if self._match(TokenType.TABLE):
kind = "TABLE"
elif self._match(TokenType.VIEW):
kind = "VIEW"
elif self._match(TokenType.ROW):
kind = "ROW"
elif self._match_text_seq("DATABASE"):
kind = "DATABASE"
else:
kind = None
if kind in ("DATABASE", "TABLE", "VIEW"):
this = self._parse_table_parts()
else:
this = None
if self._match(TokenType.FOR):
for_or_in = "FOR"
elif self._match(TokenType.IN):
for_or_in = "IN"
else:
for_or_in = None
if self._match_text_seq("ACCESS"):
lock_type = "ACCESS"
elif self._match_texts(("EXCL", "EXCLUSIVE")):
lock_type = "EXCLUSIVE"
elif self._match_text_seq("SHARE"):
lock_type = "SHARE"
elif self._match_text_seq("READ"):
lock_type = "READ"
elif self._match_text_seq("WRITE"):
lock_type = "WRITE"
elif self._match_text_seq("CHECKSUM"):
lock_type = "CHECKSUM"
else:
lock_type = None
override = self._match_text_seq("OVERRIDE")
return self.expression(
exp.LockingProperty,
this=this,
kind=kind,
for_or_in=for_or_in,
lock_type=lock_type,
override=override,
)
def _parse_partition_by(self) -> t.List[t.Optional[exp.Expression]]:
if self._match(TokenType.PARTITION_BY):
return self._parse_csv(self._parse_conjunction)
@ -1351,6 +1460,7 @@ class Parser(metaclass=_Parser):
this: t.Optional[exp.Expression]
alternative = None
if self._match_text_seq("DIRECTORY"):
this = self.expression(
exp.Directory,
@ -1359,6 +1469,9 @@ class Parser(metaclass=_Parser):
row_format=self._parse_row_format(match_row=True),
)
else:
if self._match(TokenType.OR):
alternative = self._match_texts(self.INSERT_ALTERNATIVES) and self._prev.text
self._match(TokenType.INTO)
self._match(TokenType.TABLE)
this = self._parse_table(schema=True)
@ -1370,6 +1483,7 @@ class Parser(metaclass=_Parser):
partition=self._parse_partition(),
expression=self._parse_ddl_select(),
overwrite=overwrite,
alternative=alternative,
)
def _parse_row(self) -> t.Optional[exp.Expression]:
@ -1607,7 +1721,7 @@ class Parser(metaclass=_Parser):
index = self._index
if self._match(TokenType.L_PAREN):
columns = self._parse_csv(lambda: self._parse_column_def(self._parse_id_var()))
columns = self._parse_csv(self._parse_function_parameter)
self._match_r_paren() if columns else self._retreat(index)
else:
columns = None
@ -2080,27 +2194,33 @@ class Parser(metaclass=_Parser):
if not skip_group_by_token and not self._match(TokenType.GROUP_BY):
return None
expressions = self._parse_csv(self._parse_conjunction)
grouping_sets = self._parse_grouping_sets()
elements = defaultdict(list)
self._match(TokenType.COMMA)
with_ = self._match(TokenType.WITH)
cube = self._match(TokenType.CUBE) and (
with_ or self._parse_wrapped_csv(self._parse_column)
)
while True:
expressions = self._parse_csv(self._parse_conjunction)
if expressions:
elements["expressions"].extend(expressions)
self._match(TokenType.COMMA)
rollup = self._match(TokenType.ROLLUP) and (
with_ or self._parse_wrapped_csv(self._parse_column)
)
grouping_sets = self._parse_grouping_sets()
if grouping_sets:
elements["grouping_sets"].extend(grouping_sets)
return self.expression(
exp.Group,
expressions=expressions,
grouping_sets=grouping_sets,
cube=cube,
rollup=rollup,
)
rollup = None
cube = None
with_ = self._match(TokenType.WITH)
if self._match(TokenType.ROLLUP):
rollup = with_ or self._parse_wrapped_csv(self._parse_column)
elements["rollup"].extend(ensure_list(rollup))
if self._match(TokenType.CUBE):
cube = with_ or self._parse_wrapped_csv(self._parse_column)
elements["cube"].extend(ensure_list(cube))
if not (expressions or grouping_sets or rollup or cube):
break
return self.expression(exp.Group, **elements) # type: ignore
def _parse_grouping_sets(self) -> t.Optional[t.List[t.Optional[exp.Expression]]]:
if not self._match(TokenType.GROUPING_SETS):
@ -2357,6 +2477,8 @@ class Parser(metaclass=_Parser):
def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
index = self._index
prefix = self._match_text_seq("SYSUDTLIB", ".")
if not self._match_set(self.TYPE_TOKENS):
return None
@ -2458,6 +2580,7 @@ class Parser(metaclass=_Parser):
expressions=expressions,
nested=nested,
values=values,
prefix=prefix,
)
def _parse_struct_kwargs(self) -> t.Optional[exp.Expression]:
@ -2512,8 +2635,14 @@ class Parser(metaclass=_Parser):
if op:
this = op(self, this, field)
elif isinstance(this, exp.Column) and not this.table:
this = self.expression(exp.Column, this=field, table=this.this)
elif isinstance(this, exp.Column) and not this.args.get("catalog"):
this = self.expression(
exp.Column,
this=field,
table=this.this,
db=this.args.get("table"),
catalog=this.args.get("db"),
)
else:
this = self.expression(exp.Dot, this=this, expression=field)
this = self._parse_bracket(this)
@ -2632,6 +2761,9 @@ class Parser(metaclass=_Parser):
self._match_r_paren(this)
return self._parse_window(this)
def _parse_function_parameter(self) -> t.Optional[exp.Expression]:
return self._parse_column_def(self._parse_id_var())
def _parse_user_defined_function(
self, kind: t.Optional[TokenType] = None
) -> t.Optional[exp.Expression]:
@ -2643,7 +2775,7 @@ class Parser(metaclass=_Parser):
if not self._match(TokenType.L_PAREN):
return this
expressions = self._parse_csv(self._parse_udf_kwarg)
expressions = self._parse_csv(self._parse_function_parameter)
self._match_r_paren()
return self.expression(
exp.UserDefinedFunction, this=this, expressions=expressions, wrapped=True
@ -2669,15 +2801,6 @@ class Parser(metaclass=_Parser):
return self.expression(exp.SessionParameter, this=this, kind=kind)
def _parse_udf_kwarg(self) -> t.Optional[exp.Expression]:
this = self._parse_id_var()
kind = self._parse_types()
if not kind:
return this
return self.expression(exp.UserDefinedFunctionKwarg, this=this, kind=kind)
def _parse_lambda(self) -> t.Optional[exp.Expression]:
index = self._index
@ -2726,6 +2849,9 @@ class Parser(metaclass=_Parser):
def _parse_column_def(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
kind = self._parse_types()
if self._match_text_seq("FOR", "ORDINALITY"):
return self.expression(exp.ColumnDef, this=this, ordinality=True)
constraints = []
while True:
constraint = self._parse_column_constraint()
@ -2738,79 +2864,78 @@ class Parser(metaclass=_Parser):
return self.expression(exp.ColumnDef, this=this, kind=kind, constraints=constraints)
def _parse_auto_increment(self) -> exp.Expression:
start = None
increment = None
if self._match(TokenType.L_PAREN, advance=False):
args = self._parse_wrapped_csv(self._parse_bitwise)
start = seq_get(args, 0)
increment = seq_get(args, 1)
elif self._match_text_seq("START"):
start = self._parse_bitwise()
self._match_text_seq("INCREMENT")
increment = self._parse_bitwise()
if start and increment:
return exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
return exp.AutoIncrementColumnConstraint()
def _parse_generated_as_identity(self) -> exp.Expression:
if self._match(TokenType.BY_DEFAULT):
this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=False)
else:
self._match_text_seq("ALWAYS")
this = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
self._match_text_seq("AS", "IDENTITY")
if self._match(TokenType.L_PAREN):
if self._match_text_seq("START", "WITH"):
this.set("start", self._parse_bitwise())
if self._match_text_seq("INCREMENT", "BY"):
this.set("increment", self._parse_bitwise())
if self._match_text_seq("MINVALUE"):
this.set("minvalue", self._parse_bitwise())
if self._match_text_seq("MAXVALUE"):
this.set("maxvalue", self._parse_bitwise())
if self._match_text_seq("CYCLE"):
this.set("cycle", True)
elif self._match_text_seq("NO", "CYCLE"):
this.set("cycle", False)
self._match_r_paren()
return this
def _parse_not_constraint(self) -> t.Optional[exp.Expression]:
if self._match_text_seq("NULL"):
return self.expression(exp.NotNullColumnConstraint)
if self._match_text_seq("CASESPECIFIC"):
return self.expression(exp.CaseSpecificColumnConstraint, not_=True)
return None
def _parse_column_constraint(self) -> t.Optional[exp.Expression]:
this = self._parse_references()
if this:
return this
if self._match(TokenType.CONSTRAINT):
this = self._parse_id_var()
kind: exp.Expression
if self._match_texts(self.CONSTRAINT_PARSERS):
return self.expression(
exp.ColumnConstraint,
this=this,
kind=self.CONSTRAINT_PARSERS[self._prev.text.upper()](self),
)
if self._match_set((TokenType.AUTO_INCREMENT, TokenType.IDENTITY)):
start = None
increment = None
if self._match(TokenType.L_PAREN, advance=False):
args = self._parse_wrapped_csv(self._parse_bitwise)
start = seq_get(args, 0)
increment = seq_get(args, 1)
elif self._match_text_seq("START"):
start = self._parse_bitwise()
self._match_text_seq("INCREMENT")
increment = self._parse_bitwise()
if start and increment:
kind = exp.GeneratedAsIdentityColumnConstraint(start=start, increment=increment)
else:
kind = exp.AutoIncrementColumnConstraint()
elif self._match(TokenType.CHECK):
constraint = self._parse_wrapped(self._parse_conjunction)
kind = self.expression(exp.CheckColumnConstraint, this=constraint)
elif self._match(TokenType.COLLATE):
kind = self.expression(exp.CollateColumnConstraint, this=self._parse_var())
elif self._match(TokenType.ENCODE):
kind = self.expression(exp.EncodeColumnConstraint, this=self._parse_var())
elif self._match(TokenType.DEFAULT):
kind = self.expression(exp.DefaultColumnConstraint, this=self._parse_bitwise())
elif self._match_pair(TokenType.NOT, TokenType.NULL):
kind = exp.NotNullColumnConstraint()
elif self._match(TokenType.NULL):
kind = exp.NotNullColumnConstraint(allow_null=True)
elif self._match(TokenType.SCHEMA_COMMENT):
kind = self.expression(exp.CommentColumnConstraint, this=self._parse_string())
elif self._match(TokenType.PRIMARY_KEY):
desc = None
if self._match(TokenType.ASC) or self._match(TokenType.DESC):
desc = self._prev.token_type == TokenType.DESC
kind = exp.PrimaryKeyColumnConstraint(desc=desc)
elif self._match(TokenType.UNIQUE):
kind = exp.UniqueColumnConstraint()
elif self._match(TokenType.GENERATED):
if self._match(TokenType.BY_DEFAULT):
kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=False)
else:
self._match(TokenType.ALWAYS)
kind = self.expression(exp.GeneratedAsIdentityColumnConstraint, this=True)
self._match_pair(TokenType.ALIAS, TokenType.IDENTITY)
if self._match(TokenType.L_PAREN):
if self._match_text_seq("START", "WITH"):
kind.set("start", self._parse_bitwise())
if self._match_text_seq("INCREMENT", "BY"):
kind.set("increment", self._parse_bitwise())
self._match_r_paren()
else:
return this
return self.expression(exp.ColumnConstraint, this=this, kind=kind)
return this
def _parse_constraint(self) -> t.Optional[exp.Expression]:
if not self._match(TokenType.CONSTRAINT):
return self._parse_unnamed_constraint()
return self._parse_unnamed_constraint(constraints=self.SCHEMA_UNNAMED_CONSTRAINTS)
this = self._parse_id_var()
expressions = []
@ -2823,12 +2948,21 @@ class Parser(metaclass=_Parser):
return self.expression(exp.Constraint, this=this, expressions=expressions)
def _parse_unnamed_constraint(self) -> t.Optional[exp.Expression]:
if not self._match_set(self.CONSTRAINT_PARSERS):
def _parse_unnamed_constraint(
self, constraints: t.Optional[t.Collection[str]] = None
) -> t.Optional[exp.Expression]:
if not self._match_texts(constraints or self.CONSTRAINT_PARSERS):
return None
return self.CONSTRAINT_PARSERS[self._prev.token_type](self)
constraint = self._prev.text.upper()
if constraint not in self.CONSTRAINT_PARSERS:
self.raise_error(f"No parser found for schema constraint {constraint}.")
return self.CONSTRAINT_PARSERS[constraint](self)
def _parse_unique(self) -> exp.Expression:
if not self._match(TokenType.L_PAREN, advance=False):
return self.expression(exp.UniqueColumnConstraint)
return self.expression(exp.Unique, expressions=self._parse_wrapped_id_vars())
def _parse_key_constraint_options(self) -> t.List[str]:
@ -2908,6 +3042,14 @@ class Parser(metaclass=_Parser):
)
def _parse_primary_key(self) -> exp.Expression:
desc = (
self._match_set((TokenType.ASC, TokenType.DESC))
and self._prev.token_type == TokenType.DESC
)
if not self._match(TokenType.L_PAREN, advance=False):
return self.expression(exp.PrimaryKeyColumnConstraint, desc=desc)
expressions = self._parse_wrapped_id_vars()
options = self._parse_key_constraint_options()
return self.expression(exp.PrimaryKey, expressions=expressions, options=options)
@ -3306,6 +3448,12 @@ class Parser(metaclass=_Parser):
return self.PRIMARY_PARSERS[TokenType.STAR](self, self._prev)
return None
def _parse_parameter(self) -> exp.Expression:
wrapped = self._match(TokenType.L_BRACE)
this = self._parse_var() or self._parse_primary()
self._match(TokenType.R_BRACE)
return self.expression(exp.Parameter, this=this, wrapped=wrapped)
def _parse_placeholder(self) -> t.Optional[exp.Expression]:
if self._match_set(self.PLACEHOLDER_PARSERS):
placeholder = self.PLACEHOLDER_PARSERS[self._prev.token_type](self)
@ -3449,7 +3597,7 @@ class Parser(metaclass=_Parser):
if kind == TokenType.CONSTRAINT:
this = self._parse_id_var()
if self._match(TokenType.CHECK):
if self._match_text_seq("CHECK"):
expression = self._parse_wrapped(self._parse_conjunction)
enforced = self._match_text_seq("ENFORCED")