Merging upstream version 16.4.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
8a4abed982
commit
71f21d9752
90 changed files with 35638 additions and 33343 deletions
|
@ -9,7 +9,7 @@ from sqlglot.errors import ErrorLevel, ParseError, concat_messages, merge_errors
|
|||
from sqlglot.helper import apply_index_offset, ensure_list, seq_get
|
||||
from sqlglot.time import format_time
|
||||
from sqlglot.tokens import Token, Tokenizer, TokenType
|
||||
from sqlglot.trie import in_trie, new_trie
|
||||
from sqlglot.trie import TrieResult, in_trie, new_trie
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from sqlglot._typing import E
|
||||
|
@ -177,6 +177,7 @@ class Parser(metaclass=_Parser):
|
|||
TokenType.BIGSERIAL,
|
||||
TokenType.XML,
|
||||
TokenType.UNIQUEIDENTIFIER,
|
||||
TokenType.USERDEFINED,
|
||||
TokenType.MONEY,
|
||||
TokenType.SMALLMONEY,
|
||||
TokenType.ROWVERSION,
|
||||
|
@ -465,7 +466,7 @@ class Parser(metaclass=_Parser):
|
|||
}
|
||||
|
||||
EXPRESSION_PARSERS = {
|
||||
exp.Cluster: lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
|
||||
exp.Cluster: lambda self: self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
|
||||
exp.Column: lambda self: self._parse_column(),
|
||||
exp.Condition: lambda self: self._parse_conjunction(),
|
||||
exp.DataType: lambda self: self._parse_types(),
|
||||
|
@ -484,7 +485,7 @@ class Parser(metaclass=_Parser):
|
|||
exp.Properties: lambda self: self._parse_properties(),
|
||||
exp.Qualify: lambda self: self._parse_qualify(),
|
||||
exp.Returning: lambda self: self._parse_returning(),
|
||||
exp.Sort: lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
|
||||
exp.Sort: lambda self: self._parse_sort(exp.Sort, TokenType.SORT_BY),
|
||||
exp.Table: lambda self: self._parse_table_parts(),
|
||||
exp.TableAlias: lambda self: self._parse_table_alias(),
|
||||
exp.Where: lambda self: self._parse_where(),
|
||||
|
@ -540,8 +541,7 @@ class Parser(metaclass=_Parser):
|
|||
exp.Literal, this=token.text, is_string=False
|
||||
),
|
||||
TokenType.STAR: lambda self, _: self.expression(
|
||||
exp.Star,
|
||||
**{"except": self._parse_except(), "replace": self._parse_replace()},
|
||||
exp.Star, **{"except": self._parse_except(), "replace": self._parse_replace()}
|
||||
),
|
||||
TokenType.NULL: lambda self, _: self.expression(exp.Null),
|
||||
TokenType.TRUE: lambda self, _: self.expression(exp.Boolean, this=True),
|
||||
|
@ -584,9 +584,10 @@ class Parser(metaclass=_Parser):
|
|||
"BLOCKCOMPRESSION": lambda self: self._parse_blockcompression(),
|
||||
"CHARACTER SET": lambda self: self._parse_character_set(),
|
||||
"CHECKSUM": lambda self: self._parse_checksum(),
|
||||
"CLUSTER": lambda self: self._parse_cluster(),
|
||||
"CLUSTER BY": lambda self: self._parse_cluster(),
|
||||
"COLLATE": lambda self: self._parse_property_assignment(exp.CollateProperty),
|
||||
"COMMENT": lambda self: self._parse_property_assignment(exp.SchemaCommentProperty),
|
||||
"COPY": lambda self: self._parse_copy_property(),
|
||||
"DATABLOCKSIZE": lambda self, **kwargs: self._parse_datablocksize(**kwargs),
|
||||
"DEFINER": lambda self: self._parse_definer(),
|
||||
"DETERMINISTIC": lambda self: self.expression(
|
||||
|
@ -780,6 +781,8 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
CLONE_KINDS = {"TIMESTAMP", "OFFSET", "STATEMENT"}
|
||||
|
||||
TABLE_INDEX_HINT_TOKENS = {TokenType.FORCE, TokenType.IGNORE, TokenType.USE}
|
||||
|
||||
WINDOW_ALIAS_TOKENS = ID_VAR_TOKENS - {TokenType.ROWS}
|
||||
WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER}
|
||||
WINDOW_SIDES = {"FOLLOWING", "PRECEDING"}
|
||||
|
@ -788,7 +791,8 @@ class Parser(metaclass=_Parser):
|
|||
|
||||
STRICT_CAST = True
|
||||
|
||||
CONCAT_NULL_OUTPUTS_STRING = False # A NULL arg in CONCAT yields NULL by default
|
||||
# A NULL arg in CONCAT yields NULL by default
|
||||
CONCAT_NULL_OUTPUTS_STRING = False
|
||||
|
||||
CONVERT_TYPE_FIRST = False
|
||||
|
||||
|
@ -1423,11 +1427,14 @@ class Parser(metaclass=_Parser):
|
|||
return self.expression(exp.ChecksumProperty, on=on, default=self._match(TokenType.DEFAULT))
|
||||
|
||||
def _parse_cluster(self) -> t.Optional[exp.Cluster]:
|
||||
if not self._match_text_seq("BY"):
|
||||
return self.expression(exp.Cluster, expressions=self._parse_csv(self._parse_ordered))
|
||||
|
||||
def _parse_copy_property(self) -> t.Optional[exp.CopyGrantsProperty]:
|
||||
if not self._match_text_seq("GRANTS"):
|
||||
self._retreat(self._index - 1)
|
||||
return None
|
||||
|
||||
return self.expression(exp.Cluster, expressions=self._parse_csv(self._parse_ordered))
|
||||
return self.expression(exp.CopyGrantsProperty)
|
||||
|
||||
def _parse_freespace(self) -> exp.FreespaceProperty:
|
||||
self._match(TokenType.EQ)
|
||||
|
@ -1779,6 +1786,7 @@ class Parser(metaclass=_Parser):
|
|||
using=self._parse_csv(lambda: self._match(TokenType.USING) and self._parse_table()),
|
||||
where=self._parse_where(),
|
||||
returning=self._parse_returning(),
|
||||
limit=self._parse_limit(),
|
||||
)
|
||||
|
||||
def _parse_update(self) -> exp.Update:
|
||||
|
@ -1790,6 +1798,7 @@ class Parser(metaclass=_Parser):
|
|||
"from": self._parse_from(modifiers=True),
|
||||
"where": self._parse_where(),
|
||||
"returning": self._parse_returning(),
|
||||
"limit": self._parse_limit(),
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -2268,6 +2277,33 @@ class Parser(metaclass=_Parser):
|
|||
partition_by=self._parse_partition_by(),
|
||||
)
|
||||
|
||||
def _parse_table_hints(self) -> t.Optional[t.List[exp.Expression]]:
|
||||
hints: t.List[exp.Expression] = []
|
||||
if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
|
||||
# https://learn.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-ver16
|
||||
hints.append(
|
||||
self.expression(
|
||||
exp.WithTableHint,
|
||||
expressions=self._parse_csv(
|
||||
lambda: self._parse_function() or self._parse_var(any_token=True)
|
||||
),
|
||||
)
|
||||
)
|
||||
self._match_r_paren()
|
||||
else:
|
||||
# https://dev.mysql.com/doc/refman/8.0/en/index-hints.html
|
||||
while self._match_set(self.TABLE_INDEX_HINT_TOKENS):
|
||||
hint = exp.IndexTableHint(this=self._prev.text.upper())
|
||||
|
||||
self._match_texts({"INDEX", "KEY"})
|
||||
if self._match(TokenType.FOR):
|
||||
hint.set("target", self._advance_any() and self._prev.text.upper())
|
||||
|
||||
hint.set("expressions", self._parse_wrapped_id_vars())
|
||||
hints.append(hint)
|
||||
|
||||
return hints or None
|
||||
|
||||
def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
|
||||
return (
|
||||
(not schema and self._parse_function(optional_parens=False))
|
||||
|
@ -2335,12 +2371,7 @@ class Parser(metaclass=_Parser):
|
|||
if not this.args.get("pivots"):
|
||||
this.set("pivots", self._parse_pivots())
|
||||
|
||||
if self._match_pair(TokenType.WITH, TokenType.L_PAREN):
|
||||
this.set(
|
||||
"hints",
|
||||
self._parse_csv(lambda: self._parse_function() or self._parse_var(any_token=True)),
|
||||
)
|
||||
self._match_r_paren()
|
||||
this.set("hints", self._parse_table_hints())
|
||||
|
||||
if not self.ALIAS_POST_TABLESAMPLE:
|
||||
table_sample = self._parse_table_sample()
|
||||
|
@ -2610,8 +2641,8 @@ class Parser(metaclass=_Parser):
|
|||
exp.Order, this=this, expressions=self._parse_csv(self._parse_ordered)
|
||||
)
|
||||
|
||||
def _parse_sort(self, exp_class: t.Type[E], *texts: str) -> t.Optional[E]:
|
||||
if not self._match_text_seq(*texts):
|
||||
def _parse_sort(self, exp_class: t.Type[E], token: TokenType) -> t.Optional[E]:
|
||||
if not self._match(token):
|
||||
return None
|
||||
return self.expression(exp_class, expressions=self._parse_csv(self._parse_ordered))
|
||||
|
||||
|
@ -3655,7 +3686,11 @@ class Parser(metaclass=_Parser):
|
|||
def _parse_concat(self) -> t.Optional[exp.Expression]:
|
||||
args = self._parse_csv(self._parse_conjunction)
|
||||
if self.CONCAT_NULL_OUTPUTS_STRING:
|
||||
args = [exp.func("COALESCE", arg, exp.Literal.string("")) for arg in args]
|
||||
args = [
|
||||
exp.func("COALESCE", exp.cast(arg, "text"), exp.Literal.string(""))
|
||||
for arg in args
|
||||
if arg
|
||||
]
|
||||
|
||||
# Some dialects (e.g. Trino) don't allow a single-argument CONCAT call, so when
|
||||
# we find such a call we replace it with its argument.
|
||||
|
@ -4553,13 +4588,16 @@ class Parser(metaclass=_Parser):
|
|||
curr = self._curr.text.upper()
|
||||
key = curr.split(" ")
|
||||
this.append(curr)
|
||||
|
||||
self._advance()
|
||||
result, trie = in_trie(trie, key)
|
||||
if result == 0:
|
||||
if result == TrieResult.FAILED:
|
||||
break
|
||||
if result == 2:
|
||||
|
||||
if result == TrieResult.EXISTS:
|
||||
subparser = parsers[" ".join(this)]
|
||||
return subparser
|
||||
|
||||
self._retreat(index)
|
||||
return None
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue