Adding upstream version 16.4.0.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
d61627452f
commit
cac8fd11fe
90 changed files with 35638 additions and 33343 deletions
|
@ -86,13 +86,17 @@ def _date_diff_sql(self: generator.Generator, expression: exp.DateDiff) -> str:
|
|||
|
||||
def _json_format_sql(self: generator.Generator, expression: exp.JSONFormat) -> str:
|
||||
this = expression.this
|
||||
if not this.type:
|
||||
from sqlglot.optimizer.annotate_types import annotate_types
|
||||
if isinstance(this, exp.Cast) and this.is_type("json") and this.this.is_string:
|
||||
# Since FROM_JSON requires a nested type, we always wrap the json string with
|
||||
# an array to ensure that "naked" strings like "'a'" will be handled correctly
|
||||
wrapped_json = exp.Literal.string(f"[{this.this.name}]")
|
||||
|
||||
annotate_types(this)
|
||||
from_json = self.func("FROM_JSON", wrapped_json, self.func("SCHEMA_OF_JSON", wrapped_json))
|
||||
to_json = self.func("TO_JSON", from_json)
|
||||
|
||||
# This strips the [, ] delimiters of the dummy array printed by TO_JSON
|
||||
return self.func("REGEXP_EXTRACT", to_json, "'^.(.*).$'", "1")
|
||||
|
||||
if this.type.is_type("json"):
|
||||
return self.sql(this)
|
||||
return self.func("TO_JSON", this, expression.args.get("options"))
|
||||
|
||||
|
||||
|
@ -153,6 +157,9 @@ class Hive(Dialect):
|
|||
ALIAS_POST_TABLESAMPLE = True
|
||||
IDENTIFIERS_CAN_START_WITH_DIGIT = True
|
||||
|
||||
# https://spark.apache.org/docs/latest/sql-ref-identifier.html#description
|
||||
RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
|
||||
|
||||
TIME_MAPPING = {
|
||||
"y": "%Y",
|
||||
"Y": "%Y",
|
||||
|
@ -268,9 +275,9 @@ class Hive(Dialect):
|
|||
|
||||
QUERY_MODIFIER_PARSERS = {
|
||||
**parser.Parser.QUERY_MODIFIER_PARSERS,
|
||||
"distribute": lambda self: self._parse_sort(exp.Distribute, "DISTRIBUTE", "BY"),
|
||||
"sort": lambda self: self._parse_sort(exp.Sort, "SORT", "BY"),
|
||||
"cluster": lambda self: self._parse_sort(exp.Cluster, "CLUSTER", "BY"),
|
||||
"cluster": lambda self: self._parse_sort(exp.Cluster, TokenType.CLUSTER_BY),
|
||||
"distribute": lambda self: self._parse_sort(exp.Distribute, TokenType.DISTRIBUTE_BY),
|
||||
"sort": lambda self: self._parse_sort(exp.Sort, TokenType.SORT_BY),
|
||||
}
|
||||
|
||||
def _parse_types(
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue