2025-02-13 14:53:05 +01:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
import logging
|
2025-02-13 15:26:26 +01:00
|
|
|
import re
|
2025-02-13 15:23:26 +01:00
|
|
|
import typing as t
|
|
|
|
|
2025-02-13 15:03:38 +01:00
|
|
|
from sqlglot import exp, generator, parser, tokens, transforms
|
2025-02-13 06:15:54 +01:00
|
|
|
from sqlglot.dialects.dialect import (
|
|
|
|
Dialect,
|
2025-02-13 21:17:09 +01:00
|
|
|
NormalizationStrategy,
|
2025-02-13 21:09:41 +01:00
|
|
|
arg_max_or_min_no_count,
|
2025-02-13 20:46:55 +01:00
|
|
|
binary_from_function,
|
2025-02-13 21:02:36 +01:00
|
|
|
date_add_interval_sql,
|
2025-02-13 15:01:55 +01:00
|
|
|
datestrtodate_sql,
|
2025-02-13 21:28:36 +01:00
|
|
|
build_formatted_time,
|
|
|
|
filter_array_using_unnest,
|
2025-02-13 21:03:38 +01:00
|
|
|
if_sql,
|
2025-02-13 21:31:23 +01:00
|
|
|
inline_array_unless_query,
|
2025-02-13 15:48:10 +01:00
|
|
|
max_or_greatest,
|
2025-02-13 15:43:32 +01:00
|
|
|
min_or_least,
|
2025-02-13 06:15:54 +01:00
|
|
|
no_ilike_sql,
|
2025-02-13 21:28:36 +01:00
|
|
|
build_date_delta_with_interval,
|
2025-02-13 20:46:55 +01:00
|
|
|
regexp_replace_sql,
|
2025-02-13 06:15:54 +01:00
|
|
|
rename_func,
|
2025-02-13 21:39:30 +01:00
|
|
|
sha256_sql,
|
2025-02-13 15:01:55 +01:00
|
|
|
timestrtotime_sql,
|
2025-02-13 21:17:09 +01:00
|
|
|
ts_or_ds_add_cast,
|
2025-02-13 21:30:28 +01:00
|
|
|
unit_to_var,
|
2025-02-13 21:57:20 +01:00
|
|
|
str_position_sql,
|
2025-02-13 06:15:54 +01:00
|
|
|
)
|
2025-02-13 15:57:23 +01:00
|
|
|
from sqlglot.helper import seq_get, split_num_words
|
2025-02-13 14:53:05 +01:00
|
|
|
from sqlglot.tokens import TokenType
|
2025-02-13 21:56:38 +01:00
|
|
|
from sqlglot.generator import unsupported_args
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 21:19:14 +01:00
|
|
|
if t.TYPE_CHECKING:
|
2025-02-13 21:19:58 +01:00
|
|
|
from sqlglot._typing import E, Lit
|
2025-02-13 21:19:14 +01:00
|
|
|
|
2025-02-13 21:55:40 +01:00
|
|
|
from sqlglot.optimizer.annotate_types import TypeAnnotator
|
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
logger = logging.getLogger("sqlglot")
|
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 20:58:22 +01:00
|
|
|
def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
|
2025-02-13 20:46:55 +01:00
|
|
|
if not expression.find_ancestor(exp.From, exp.Join):
|
2025-02-13 14:43:32 +01:00
|
|
|
return self.values_sql(expression)
|
2025-02-13 15:53:39 +01:00
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
structs = []
|
2025-02-13 15:53:39 +01:00
|
|
|
alias = expression.args.get("alias")
|
2025-02-13 21:28:36 +01:00
|
|
|
for tup in expression.find_all(exp.Tuple):
|
2025-02-13 21:30:28 +01:00
|
|
|
field_aliases = (
|
|
|
|
alias.columns
|
|
|
|
if alias and alias.columns
|
|
|
|
else (f"_c{i}" for i in range(len(tup.expressions)))
|
|
|
|
)
|
2025-02-13 21:29:39 +01:00
|
|
|
expressions = [
|
|
|
|
exp.PropertyEQ(this=exp.to_identifier(name), expression=fld)
|
|
|
|
for name, fld in zip(field_aliases, tup.expressions)
|
|
|
|
]
|
2025-02-13 21:28:36 +01:00
|
|
|
structs.append(exp.Struct(expressions=expressions))
|
2025-02-13 15:53:39 +01:00
|
|
|
|
2025-02-13 21:30:28 +01:00
|
|
|
# Due to `UNNEST_COLUMN_ONLY`, it is expected that the table alias be contained in the columns expression
|
|
|
|
alias_name_only = exp.TableAlias(columns=[alias.this]) if alias else None
|
|
|
|
return self.unnest_sql(
|
|
|
|
exp.Unnest(expressions=[exp.array(*structs, copy=False)], alias=alias_name_only)
|
|
|
|
)
|
2025-02-13 14:40:43 +01:00
|
|
|
|
|
|
|
|
2025-02-13 20:58:22 +01:00
|
|
|
def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
|
2025-02-13 14:56:25 +01:00
|
|
|
this = expression.this
|
|
|
|
if isinstance(this, exp.Schema):
|
2025-02-13 21:28:36 +01:00
|
|
|
this = f"{self.sql(this, 'this')} <{self.expressions(this)}>"
|
2025-02-13 14:40:43 +01:00
|
|
|
else:
|
2025-02-13 14:56:25 +01:00
|
|
|
this = self.sql(this)
|
|
|
|
return f"RETURNS {this}"
|
2025-02-13 14:40:43 +01:00
|
|
|
|
|
|
|
|
2025-02-13 20:58:22 +01:00
|
|
|
def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
|
2025-02-13 14:40:43 +01:00
|
|
|
returns = expression.find(exp.ReturnsProperty)
|
2025-02-13 21:28:36 +01:00
|
|
|
if expression.kind == "FUNCTION" and returns and returns.args.get("is_table"):
|
2025-02-13 14:40:43 +01:00
|
|
|
expression.set("kind", "TABLE FUNCTION")
|
2025-02-13 20:51:40 +01:00
|
|
|
|
|
|
|
if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
|
2025-02-13 14:40:43 +01:00
|
|
|
expression.set("expression", expression.expression.this)
|
|
|
|
|
|
|
|
return self.create_sql(expression)
|
|
|
|
|
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
# https://issuetracker.google.com/issues/162294746
|
|
|
|
# workaround for bigquery bug when grouping by an expression and then ordering
|
|
|
|
# WITH x AS (SELECT 1 y)
|
|
|
|
# SELECT y + 1 z
|
|
|
|
# FROM x
|
|
|
|
# GROUP BY x + 1
|
|
|
|
# ORDER by z
|
|
|
|
def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
|
|
|
|
if isinstance(expression, exp.Select):
|
|
|
|
group = expression.args.get("group")
|
|
|
|
order = expression.args.get("order")
|
|
|
|
|
|
|
|
if group and order:
|
|
|
|
aliases = {
|
|
|
|
select.this: select.args["alias"]
|
|
|
|
for select in expression.selects
|
|
|
|
if isinstance(select, exp.Alias)
|
|
|
|
}
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
for grouped in group.expressions:
|
2025-02-13 21:29:39 +01:00
|
|
|
if grouped.is_int:
|
|
|
|
continue
|
2025-02-13 21:28:36 +01:00
|
|
|
alias = aliases.get(grouped)
|
2025-02-13 20:21:40 +01:00
|
|
|
if alias:
|
2025-02-13 21:28:36 +01:00
|
|
|
grouped.replace(exp.column(alias))
|
2025-02-13 20:21:40 +01:00
|
|
|
|
|
|
|
return expression
|
|
|
|
|
|
|
|
|
|
|
|
def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
|
|
|
|
"""BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
|
|
|
|
if isinstance(expression, exp.CTE) and expression.alias_column_names:
|
|
|
|
cte_query = expression.this
|
|
|
|
|
|
|
|
if cte_query.is_star:
|
|
|
|
logger.warning(
|
|
|
|
"Can't push down CTE column names for star queries. Run the query through"
|
|
|
|
" the optimizer or use 'qualify' to expand the star projections first."
|
|
|
|
)
|
|
|
|
return expression
|
|
|
|
|
|
|
|
column_names = expression.alias_column_names
|
|
|
|
expression.args["alias"].set("columns", None)
|
|
|
|
|
|
|
|
for name, select in zip(column_names, cte_query.selects):
|
|
|
|
to_replace = select
|
|
|
|
|
|
|
|
if isinstance(select, exp.Alias):
|
|
|
|
select = select.this
|
|
|
|
|
|
|
|
# Inner aliases are shadowed by the CTE column names
|
|
|
|
to_replace.replace(exp.alias_(select, name))
|
|
|
|
|
|
|
|
return expression
|
|
|
|
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
def _build_parse_timestamp(args: t.List) -> exp.StrToTime:
|
|
|
|
this = build_formatted_time(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
|
2025-02-13 20:43:05 +01:00
|
|
|
this.set("zone", seq_get(args, 2))
|
|
|
|
return this
|
|
|
|
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
def _build_timestamp(args: t.List) -> exp.Timestamp:
|
2025-02-13 21:27:51 +01:00
|
|
|
timestamp = exp.Timestamp.from_arg_list(args)
|
|
|
|
timestamp.set("with_tz", True)
|
|
|
|
return timestamp
|
|
|
|
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
def _build_date(args: t.List) -> exp.Date | exp.DateFromParts:
|
2025-02-13 20:43:05 +01:00
|
|
|
expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
|
|
|
|
return expr_type.from_arg_list(args)
|
|
|
|
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
def _build_to_hex(args: t.List) -> exp.Hex | exp.MD5:
|
2025-02-13 20:44:18 +01:00
|
|
|
# TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
|
|
|
|
arg = seq_get(args, 0)
|
2025-02-13 21:35:32 +01:00
|
|
|
return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.LowerHex(this=arg)
|
2025-02-13 20:44:18 +01:00
|
|
|
|
|
|
|
|
2025-02-13 21:17:09 +01:00
|
|
|
def _array_contains_sql(self: BigQuery.Generator, expression: exp.ArrayContains) -> str:
|
|
|
|
return self.sql(
|
|
|
|
exp.Exists(
|
|
|
|
this=exp.select("1")
|
|
|
|
.from_(exp.Unnest(expressions=[expression.left]).as_("_unnest", table=["_col"]))
|
|
|
|
.where(exp.column("_col").eq(expression.right))
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def _ts_or_ds_add_sql(self: BigQuery.Generator, expression: exp.TsOrDsAdd) -> str:
|
|
|
|
return date_add_interval_sql("DATE", "ADD")(self, ts_or_ds_add_cast(expression))
|
|
|
|
|
|
|
|
|
|
|
|
def _ts_or_ds_diff_sql(self: BigQuery.Generator, expression: exp.TsOrDsDiff) -> str:
|
2025-02-13 21:31:23 +01:00
|
|
|
expression.this.replace(exp.cast(expression.this, exp.DataType.Type.TIMESTAMP))
|
|
|
|
expression.expression.replace(exp.cast(expression.expression, exp.DataType.Type.TIMESTAMP))
|
2025-02-13 21:30:28 +01:00
|
|
|
unit = unit_to_var(expression)
|
2025-02-13 21:17:09 +01:00
|
|
|
return self.func("DATE_DIFF", expression.this, expression.expression, unit)
|
|
|
|
|
|
|
|
|
|
|
|
def _unix_to_time_sql(self: BigQuery.Generator, expression: exp.UnixToTime) -> str:
|
|
|
|
scale = expression.args.get("scale")
|
2025-02-13 21:28:36 +01:00
|
|
|
timestamp = expression.this
|
|
|
|
|
2025-02-13 21:17:09 +01:00
|
|
|
if scale in (None, exp.UnixToTime.SECONDS):
|
2025-02-13 21:28:36 +01:00
|
|
|
return self.func("TIMESTAMP_SECONDS", timestamp)
|
2025-02-13 21:17:09 +01:00
|
|
|
if scale == exp.UnixToTime.MILLIS:
|
2025-02-13 21:28:36 +01:00
|
|
|
return self.func("TIMESTAMP_MILLIS", timestamp)
|
2025-02-13 21:17:09 +01:00
|
|
|
if scale == exp.UnixToTime.MICROS:
|
2025-02-13 21:28:36 +01:00
|
|
|
return self.func("TIMESTAMP_MICROS", timestamp)
|
2025-02-13 21:17:09 +01:00
|
|
|
|
2025-02-13 21:31:23 +01:00
|
|
|
unix_seconds = exp.cast(
|
|
|
|
exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)), exp.DataType.Type.BIGINT
|
|
|
|
)
|
2025-02-13 21:28:36 +01:00
|
|
|
return self.func("TIMESTAMP_SECONDS", unix_seconds)
|
2025-02-13 21:19:14 +01:00
|
|
|
|
|
|
|
|
2025-02-13 21:28:36 +01:00
|
|
|
def _build_time(args: t.List) -> exp.Func:
|
2025-02-13 21:19:14 +01:00
|
|
|
if len(args) == 1:
|
|
|
|
return exp.TsOrDsToTime(this=args[0])
|
2025-02-13 21:41:14 +01:00
|
|
|
if len(args) == 2:
|
|
|
|
return exp.Time.from_arg_list(args)
|
|
|
|
return exp.TimeFromParts.from_arg_list(args)
|
2025-02-13 21:19:14 +01:00
|
|
|
|
2025-02-13 21:41:14 +01:00
|
|
|
|
|
|
|
def _build_datetime(args: t.List) -> exp.Func:
|
|
|
|
if len(args) == 1:
|
2025-02-13 21:57:20 +01:00
|
|
|
return exp.TsOrDsToDatetime.from_arg_list(args)
|
2025-02-13 21:41:14 +01:00
|
|
|
if len(args) == 2:
|
|
|
|
return exp.Datetime.from_arg_list(args)
|
|
|
|
return exp.TimestampFromParts.from_arg_list(args)
|
|
|
|
|
|
|
|
|
2025-02-13 21:56:38 +01:00
|
|
|
def _build_regexp_extract(
|
|
|
|
expr_type: t.Type[E], default_group: t.Optional[exp.Expression] = None
|
|
|
|
) -> t.Callable[[t.List], E]:
|
|
|
|
def _builder(args: t.List) -> E:
|
|
|
|
try:
|
|
|
|
group = re.compile(args[1].name).groups == 1
|
|
|
|
except re.error:
|
|
|
|
group = False
|
|
|
|
|
|
|
|
# Default group is used for the transpilation of REGEXP_EXTRACT_ALL
|
|
|
|
return expr_type(
|
|
|
|
this=seq_get(args, 0),
|
|
|
|
expression=seq_get(args, 1),
|
|
|
|
position=seq_get(args, 2),
|
|
|
|
occurrence=seq_get(args, 3),
|
|
|
|
group=exp.Literal.number(1) if group else default_group,
|
|
|
|
)
|
2025-02-13 21:55:19 +01:00
|
|
|
|
2025-02-13 21:56:38 +01:00
|
|
|
return _builder
|
2025-02-13 21:55:19 +01:00
|
|
|
|
|
|
|
|
2025-02-13 21:56:38 +01:00
|
|
|
def _build_extract_json_with_default_path(expr_type: t.Type[E]) -> t.Callable[[t.List, Dialect], E]:
|
|
|
|
def _builder(args: t.List, dialect: Dialect) -> E:
|
|
|
|
if len(args) == 1:
|
|
|
|
# The default value for the JSONPath is '$' i.e all of the data
|
|
|
|
args.append(exp.Literal.string("$"))
|
|
|
|
return parser.build_extract_json_with_path(expr_type)(args, dialect)
|
|
|
|
|
|
|
|
return _builder
|
2025-02-13 21:56:19 +01:00
|
|
|
|
|
|
|
|
2025-02-13 21:41:14 +01:00
|
|
|
def _str_to_datetime_sql(
|
|
|
|
self: BigQuery.Generator, expression: exp.StrToDate | exp.StrToTime
|
|
|
|
) -> str:
|
|
|
|
this = self.sql(expression, "this")
|
|
|
|
dtype = "DATE" if isinstance(expression, exp.StrToDate) else "TIMESTAMP"
|
|
|
|
|
|
|
|
if expression.args.get("safe"):
|
|
|
|
fmt = self.format_time(
|
|
|
|
expression,
|
|
|
|
self.dialect.INVERSE_FORMAT_MAPPING,
|
|
|
|
self.dialect.INVERSE_FORMAT_TRIE,
|
|
|
|
)
|
|
|
|
return f"SAFE_CAST({this} AS {dtype} FORMAT {fmt})"
|
|
|
|
|
|
|
|
fmt = self.format_time(expression)
|
|
|
|
return self.func(f"PARSE_{dtype}", fmt, this, expression.args.get("zone"))
|
2025-02-13 21:17:09 +01:00
|
|
|
|
|
|
|
|
2025-02-13 21:55:40 +01:00
|
|
|
def _annotate_math_functions(self: TypeAnnotator, expression: E) -> E:
|
|
|
|
"""
|
|
|
|
Many BigQuery math functions such as CEIL, FLOOR etc follow this return type convention:
|
|
|
|
+---------+---------+---------+------------+---------+
|
|
|
|
| INPUT | INT64 | NUMERIC | BIGNUMERIC | FLOAT64 |
|
|
|
|
+---------+---------+---------+------------+---------+
|
|
|
|
| OUTPUT | FLOAT64 | NUMERIC | BIGNUMERIC | FLOAT64 |
|
|
|
|
+---------+---------+---------+------------+---------+
|
|
|
|
"""
|
|
|
|
self._annotate_args(expression)
|
|
|
|
|
|
|
|
this: exp.Expression = expression.this
|
|
|
|
|
|
|
|
self._set_type(
|
|
|
|
expression,
|
|
|
|
exp.DataType.Type.DOUBLE if this.is_type(*exp.DataType.INTEGER_TYPES) else this.type,
|
|
|
|
)
|
|
|
|
return expression
|
|
|
|
|
|
|
|
|
2025-02-13 21:56:38 +01:00
|
|
|
@unsupported_args("ins_cost", "del_cost", "sub_cost")
|
|
|
|
def _levenshtein_sql(self: BigQuery.Generator, expression: exp.Levenshtein) -> str:
|
|
|
|
max_dist = expression.args.get("max_dist")
|
|
|
|
if max_dist:
|
|
|
|
max_dist = exp.Kwarg(this=exp.var("max_distance"), expression=max_dist)
|
|
|
|
|
|
|
|
return self.func("EDIT_DISTANCE", expression.this, expression.expression, max_dist)
|
|
|
|
|
|
|
|
|
|
|
|
def _build_levenshtein(args: t.List) -> exp.Levenshtein:
|
|
|
|
max_dist = seq_get(args, 2)
|
|
|
|
return exp.Levenshtein(
|
|
|
|
this=seq_get(args, 0),
|
|
|
|
expression=seq_get(args, 1),
|
|
|
|
max_dist=max_dist.expression if max_dist else None,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2025-02-13 21:57:20 +01:00
|
|
|
def _build_format_time(expr_type: t.Type[exp.Expression]) -> t.Callable[[t.List], exp.TimeToStr]:
|
|
|
|
def _builder(args: t.List) -> exp.TimeToStr:
|
|
|
|
return exp.TimeToStr(this=expr_type(this=seq_get(args, 1)), format=seq_get(args, 0))
|
|
|
|
|
|
|
|
return _builder
|
|
|
|
|
|
|
|
|
|
|
|
def _build_contains_substring(args: t.List) -> exp.Contains | exp.Anonymous:
|
|
|
|
if len(args) == 3:
|
|
|
|
return exp.Anonymous(this="CONTAINS_SUBSTRING", expressions=args)
|
|
|
|
|
|
|
|
# Lowercase the operands in case of transpilation, as exp.Contains
|
|
|
|
# is case-sensitive on other dialects
|
|
|
|
this = exp.Lower(this=seq_get(args, 0))
|
|
|
|
expr = exp.Lower(this=seq_get(args, 1))
|
|
|
|
|
|
|
|
return exp.Contains(this=this, expression=expr)
|
|
|
|
|
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
class BigQuery(Dialect):
|
2025-02-13 21:17:51 +01:00
|
|
|
WEEK_OFFSET = -1
|
2025-02-13 16:00:51 +01:00
|
|
|
UNNEST_COLUMN_ONLY = True
|
2025-02-13 21:02:36 +01:00
|
|
|
SUPPORTS_USER_DEFINED_TYPES = False
|
2025-02-13 21:03:38 +01:00
|
|
|
SUPPORTS_SEMI_ANTI_JOIN = False
|
|
|
|
LOG_BASE_FIRST = False
|
2025-02-13 21:35:32 +01:00
|
|
|
HEX_LOWERCASE = True
|
2025-02-13 21:41:14 +01:00
|
|
|
FORCE_EARLY_ALIAS_REF_EXPANSION = True
|
|
|
|
EXPAND_ALIAS_REFS_EARLY_ONLY_IN_GROUP_BY = True
|
2025-02-13 21:57:37 +01:00
|
|
|
PRESERVE_ORIGINAL_NAMES = True
|
2025-02-13 16:00:51 +01:00
|
|
|
|
2025-02-13 20:04:59 +01:00
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
|
2025-02-13 21:17:09 +01:00
|
|
|
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
|
2025-02-13 20:04:59 +01:00
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
# bigquery udfs are case sensitive
|
|
|
|
NORMALIZE_FUNCTIONS = False
|
|
|
|
|
2025-02-13 21:29:39 +01:00
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
|
2025-02-13 16:00:51 +01:00
|
|
|
TIME_MAPPING = {
|
|
|
|
"%D": "%m/%d/%y",
|
2025-02-13 21:29:39 +01:00
|
|
|
"%E6S": "%S.%f",
|
2025-02-13 21:52:32 +01:00
|
|
|
"%e": "%-d",
|
2025-02-13 16:00:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
FORMAT_MAPPING = {
|
|
|
|
"DD": "%d",
|
|
|
|
"MM": "%m",
|
|
|
|
"MON": "%b",
|
|
|
|
"MONTH": "%B",
|
|
|
|
"YYYY": "%Y",
|
|
|
|
"YY": "%y",
|
|
|
|
"HH": "%I",
|
|
|
|
"HH12": "%I",
|
|
|
|
"HH24": "%H",
|
|
|
|
"MI": "%M",
|
|
|
|
"SS": "%S",
|
|
|
|
"SSSSS": "%f",
|
|
|
|
"TZH": "%z",
|
2025-02-13 14:48:46 +01:00
|
|
|
}
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 20:48:36 +01:00
|
|
|
# The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
|
|
|
|
# https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
|
|
|
|
PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
|
|
|
|
|
2025-02-13 21:54:13 +01:00
|
|
|
# All set operations require either a DISTINCT or ALL specifier
|
|
|
|
SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
|
|
|
|
|
2025-02-13 21:55:40 +01:00
|
|
|
ANNOTATORS = {
|
|
|
|
**Dialect.ANNOTATORS,
|
|
|
|
**{
|
|
|
|
expr_type: lambda self, e: _annotate_math_functions(self, e)
|
|
|
|
for expr_type in (exp.Floor, exp.Ceil, exp.Log, exp.Ln, exp.Sqrt, exp.Exp, exp.Round)
|
|
|
|
},
|
2025-02-13 21:56:02 +01:00
|
|
|
**{
|
|
|
|
expr_type: lambda self, e: self._annotate_by_args(e, "this")
|
|
|
|
for expr_type in (
|
|
|
|
exp.Left,
|
|
|
|
exp.Right,
|
|
|
|
exp.Lower,
|
|
|
|
exp.Upper,
|
|
|
|
exp.Pad,
|
|
|
|
exp.Trim,
|
|
|
|
exp.RegexpExtract,
|
|
|
|
exp.RegexpReplace,
|
|
|
|
exp.Repeat,
|
|
|
|
exp.Substring,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
exp.Concat: lambda self, e: self._annotate_by_args(e, "expressions"),
|
2025-02-13 21:55:40 +01:00
|
|
|
exp.Sign: lambda self, e: self._annotate_by_args(e, "this"),
|
2025-02-13 21:56:02 +01:00
|
|
|
exp.Split: lambda self, e: self._annotate_by_args(e, "this", array=True),
|
2025-02-13 21:55:40 +01:00
|
|
|
}
|
|
|
|
|
2025-02-13 21:17:09 +01:00
|
|
|
def normalize_identifier(self, expression: E) -> E:
|
2025-02-13 21:41:14 +01:00
|
|
|
if (
|
|
|
|
isinstance(expression, exp.Identifier)
|
|
|
|
and self.normalization_strategy is not NormalizationStrategy.CASE_SENSITIVE
|
|
|
|
):
|
2025-02-13 20:21:40 +01:00
|
|
|
parent = expression.parent
|
|
|
|
while isinstance(parent, exp.Dot):
|
|
|
|
parent = parent.parent
|
|
|
|
|
2025-02-13 21:29:39 +01:00
|
|
|
# In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
|
|
|
|
# by default. The following check uses a heuristic to detect tables based on whether
|
|
|
|
# they are qualified. This should generally be correct, because tables in BigQuery
|
|
|
|
# must be qualified with at least a dataset, unless @@dataset_id is set.
|
|
|
|
case_sensitive = (
|
|
|
|
isinstance(parent, exp.UserDefinedFunction)
|
|
|
|
or (
|
|
|
|
isinstance(parent, exp.Table)
|
|
|
|
and parent.db
|
|
|
|
and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
|
|
|
|
)
|
|
|
|
or expression.meta.get("is_table")
|
|
|
|
)
|
|
|
|
if not case_sensitive:
|
2025-02-13 20:21:40 +01:00
|
|
|
expression.set("this", expression.this.lower())
|
2025-02-13 20:04:59 +01:00
|
|
|
|
|
|
|
return expression
|
|
|
|
|
2025-02-13 14:53:05 +01:00
|
|
|
class Tokenizer(tokens.Tokenizer):
|
2025-02-13 15:57:23 +01:00
|
|
|
QUOTES = ["'", '"', '"""', "'''"]
|
2025-02-13 14:53:05 +01:00
|
|
|
COMMENTS = ["--", "#", ("/*", "*/")]
|
2025-02-13 06:15:54 +01:00
|
|
|
IDENTIFIERS = ["`"]
|
2025-02-13 15:23:26 +01:00
|
|
|
STRING_ESCAPES = ["\\"]
|
2025-02-13 15:57:23 +01:00
|
|
|
|
2025-02-13 08:04:41 +01:00
|
|
|
HEX_STRINGS = [("0x", ""), ("0X", "")]
|
2025-02-13 15:57:23 +01:00
|
|
|
|
|
|
|
BYTE_STRINGS = [
|
|
|
|
(prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
|
|
|
|
]
|
|
|
|
|
|
|
|
RAW_STRINGS = [
|
|
|
|
(prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
|
|
|
|
]
|
2025-02-13 06:15:54 +01:00
|
|
|
|
|
|
|
KEYWORDS = {
|
2025-02-13 14:53:05 +01:00
|
|
|
**tokens.Tokenizer.KEYWORDS,
|
2025-02-13 15:52:09 +01:00
|
|
|
"ANY TYPE": TokenType.VARIANT,
|
2025-02-13 14:58:37 +01:00
|
|
|
"BEGIN": TokenType.COMMAND,
|
|
|
|
"BEGIN TRANSACTION": TokenType.BEGIN,
|
2025-02-13 21:52:10 +01:00
|
|
|
"BYTEINT": TokenType.INT,
|
2025-02-13 15:57:23 +01:00
|
|
|
"BYTES": TokenType.BINARY,
|
2025-02-13 21:06:11 +01:00
|
|
|
"CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
|
2025-02-13 21:30:28 +01:00
|
|
|
"DATETIME": TokenType.TIMESTAMP,
|
2025-02-13 15:03:38 +01:00
|
|
|
"DECLARE": TokenType.COMMAND,
|
2025-02-13 21:29:39 +01:00
|
|
|
"ELSEIF": TokenType.COMMAND,
|
2025-02-13 21:28:36 +01:00
|
|
|
"EXCEPTION": TokenType.COMMAND,
|
2025-02-13 06:15:54 +01:00
|
|
|
"FLOAT64": TokenType.DOUBLE,
|
2025-02-13 21:06:11 +01:00
|
|
|
"FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
|
|
|
|
"MODEL": TokenType.MODEL,
|
|
|
|
"NOT DETERMINISTIC": TokenType.VOLATILE,
|
2025-02-13 15:57:23 +01:00
|
|
|
"RECORD": TokenType.STRUCT,
|
2025-02-13 16:00:51 +01:00
|
|
|
"TIMESTAMP": TokenType.TIMESTAMPTZ,
|
2025-02-13 06:15:54 +01:00
|
|
|
}
|
2025-02-13 14:53:05 +01:00
|
|
|
KEYWORDS.pop("DIV")
|
2025-02-13 21:27:51 +01:00
|
|
|
KEYWORDS.pop("VALUES")
|
2025-02-13 21:41:14 +01:00
|
|
|
KEYWORDS.pop("/*+")
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 14:53:05 +01:00
|
|
|
class Parser(parser.Parser):
|
2025-02-13 15:52:09 +01:00
|
|
|
PREFIXED_PIVOT_COLUMNS = True
|
|
|
|
LOG_DEFAULTS_TO_LN = True
|
2025-02-13 21:29:39 +01:00
|
|
|
SUPPORTS_IMPLICIT_UNNEST = True
|
2025-02-13 15:52:09 +01:00
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
FUNCTIONS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**parser.Parser.FUNCTIONS,
|
2025-02-13 21:57:20 +01:00
|
|
|
"CONTAINS_SUBSTRING": _build_contains_substring,
|
2025-02-13 21:28:36 +01:00
|
|
|
"DATE": _build_date,
|
|
|
|
"DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
|
|
|
|
"DATE_SUB": build_date_delta_with_interval(exp.DateSub),
|
2025-02-13 15:26:26 +01:00
|
|
|
"DATE_TRUNC": lambda args: exp.DateTrunc(
|
2025-02-13 15:53:39 +01:00
|
|
|
unit=exp.Literal.string(str(seq_get(args, 1))),
|
2025-02-13 15:26:26 +01:00
|
|
|
this=seq_get(args, 0),
|
2025-02-13 21:56:19 +01:00
|
|
|
zone=seq_get(args, 2),
|
2025-02-13 15:26:26 +01:00
|
|
|
),
|
2025-02-13 21:41:14 +01:00
|
|
|
"DATETIME": _build_datetime,
|
2025-02-13 21:28:36 +01:00
|
|
|
"DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
|
|
|
|
"DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
|
2025-02-13 20:46:55 +01:00
|
|
|
"DIV": binary_from_function(exp.IntDiv),
|
2025-02-13 21:56:38 +01:00
|
|
|
"EDIT_DISTANCE": _build_levenshtein,
|
2025-02-13 21:57:20 +01:00
|
|
|
"FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
|
2025-02-13 20:04:59 +01:00
|
|
|
"GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
|
2025-02-13 21:56:38 +01:00
|
|
|
"JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
|
|
|
|
"JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
|
2025-02-13 21:56:19 +01:00
|
|
|
"JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
|
2025-02-13 21:56:38 +01:00
|
|
|
"JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
|
|
|
|
"JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
|
|
|
|
"JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
|
2025-02-13 21:41:14 +01:00
|
|
|
"LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
|
2025-02-13 20:44:18 +01:00
|
|
|
"MD5": exp.MD5Digest.from_arg_list,
|
2025-02-13 21:28:36 +01:00
|
|
|
"TO_HEX": _build_to_hex,
|
|
|
|
"PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
|
2025-02-13 16:00:51 +01:00
|
|
|
[seq_get(args, 1), seq_get(args, 0)]
|
|
|
|
),
|
2025-02-13 21:28:36 +01:00
|
|
|
"PARSE_TIMESTAMP": _build_parse_timestamp,
|
2025-02-13 14:58:37 +01:00
|
|
|
"REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
|
2025-02-13 21:56:38 +01:00
|
|
|
"REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
|
|
|
|
"REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
|
|
|
|
"REGEXP_EXTRACT_ALL": _build_regexp_extract(
|
|
|
|
exp.RegexpExtractAll, default_group=exp.Literal.number(0)
|
|
|
|
),
|
2025-02-13 20:58:22 +01:00
|
|
|
"SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
|
|
|
|
"SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
|
2025-02-13 16:00:51 +01:00
|
|
|
"SPLIT": lambda args: exp.Split(
|
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
|
|
|
|
this=seq_get(args, 0),
|
|
|
|
expression=seq_get(args, 1) or exp.Literal.string(","),
|
|
|
|
),
|
2025-02-13 21:57:20 +01:00
|
|
|
"STRPOS": exp.StrPosition.from_arg_list,
|
2025-02-13 21:28:36 +01:00
|
|
|
"TIME": _build_time,
|
|
|
|
"TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
|
|
|
|
"TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
|
|
|
|
"TIMESTAMP": _build_timestamp,
|
|
|
|
"TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
|
|
|
|
"TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
|
2025-02-13 21:17:09 +01:00
|
|
|
"TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
|
|
|
|
this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
|
|
|
|
),
|
|
|
|
"TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
|
|
|
|
this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
|
|
|
|
),
|
2025-02-13 21:19:14 +01:00
|
|
|
"TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
|
2025-02-13 20:04:59 +01:00
|
|
|
"TO_JSON_STRING": exp.JSONFormat.from_arg_list,
|
2025-02-13 21:57:20 +01:00
|
|
|
"FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
|
|
|
|
"FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
|
2025-02-13 06:15:54 +01:00
|
|
|
}
|
|
|
|
|
2025-02-13 14:56:25 +01:00
|
|
|
FUNCTION_PARSERS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**parser.Parser.FUNCTION_PARSERS,
|
2025-02-13 14:58:37 +01:00
|
|
|
"ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
|
2025-02-13 21:57:20 +01:00
|
|
|
"MAKE_INTERVAL": lambda self: self._parse_make_interval(),
|
2025-02-13 21:57:37 +01:00
|
|
|
"FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
|
2025-02-13 14:56:25 +01:00
|
|
|
}
|
|
|
|
FUNCTION_PARSERS.pop("TRIM")
|
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
NO_PAREN_FUNCTIONS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**parser.Parser.NO_PAREN_FUNCTIONS,
|
2025-02-13 06:15:54 +01:00
|
|
|
TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
|
|
|
|
}
|
|
|
|
|
2025-02-13 14:40:43 +01:00
|
|
|
NESTED_TYPE_TOKENS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
*parser.Parser.NESTED_TYPE_TOKENS,
|
2025-02-13 14:40:43 +01:00
|
|
|
TokenType.TABLE,
|
|
|
|
}
|
|
|
|
|
2025-02-13 15:08:15 +01:00
|
|
|
PROPERTY_PARSERS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**parser.Parser.PROPERTY_PARSERS,
|
2025-02-13 15:08:15 +01:00
|
|
|
"NOT DETERMINISTIC": lambda self: self.expression(
|
2025-02-13 15:52:09 +01:00
|
|
|
exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
|
2025-02-13 15:08:15 +01:00
|
|
|
),
|
2025-02-13 15:53:39 +01:00
|
|
|
"OPTIONS": lambda self: self._parse_with_property(),
|
|
|
|
}
|
|
|
|
|
|
|
|
CONSTRAINT_PARSERS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**parser.Parser.CONSTRAINT_PARSERS,
|
2025-02-13 15:53:39 +01:00
|
|
|
"OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
|
2025-02-13 15:08:15 +01:00
|
|
|
}
|
|
|
|
|
2025-02-13 21:03:38 +01:00
|
|
|
RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
|
2025-02-13 21:28:36 +01:00
|
|
|
RANGE_PARSERS.pop(TokenType.OVERLAPS)
|
2025-02-13 21:03:38 +01:00
|
|
|
|
|
|
|
NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
|
|
|
|
|
2025-02-13 21:17:09 +01:00
|
|
|
STATEMENT_PARSERS = {
|
|
|
|
**parser.Parser.STATEMENT_PARSERS,
|
2025-02-13 21:29:39 +01:00
|
|
|
TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
|
2025-02-13 21:17:09 +01:00
|
|
|
TokenType.END: lambda self: self._parse_as_command(self._prev),
|
|
|
|
TokenType.FOR: lambda self: self._parse_for_in(),
|
|
|
|
}
|
|
|
|
|
|
|
|
BRACKET_OFFSETS = {
|
|
|
|
"OFFSET": (0, False),
|
|
|
|
"ORDINAL": (1, False),
|
|
|
|
"SAFE_OFFSET": (0, True),
|
|
|
|
"SAFE_ORDINAL": (1, True),
|
|
|
|
}
|
|
|
|
|
|
|
|
def _parse_for_in(self) -> exp.ForIn:
|
|
|
|
this = self._parse_range()
|
|
|
|
self._match_text_seq("DO")
|
|
|
|
return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
|
|
|
|
|
2025-02-13 15:57:23 +01:00
|
|
|
def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
|
2025-02-13 20:58:22 +01:00
|
|
|
this = super()._parse_table_part(schema=schema) or self._parse_number()
|
2025-02-13 15:57:23 +01:00
|
|
|
|
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
|
|
|
|
if isinstance(this, exp.Identifier):
|
|
|
|
table_name = this.name
|
|
|
|
while self._match(TokenType.DASH, advance=False) and self._next:
|
2025-02-13 21:29:39 +01:00
|
|
|
text = ""
|
2025-02-13 21:55:19 +01:00
|
|
|
while self._is_connected() and self._curr.token_type != TokenType.DOT:
|
2025-02-13 21:29:39 +01:00
|
|
|
self._advance()
|
|
|
|
text += self._prev.text
|
|
|
|
table_name += text
|
2025-02-13 15:57:23 +01:00
|
|
|
|
|
|
|
this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
|
2025-02-13 20:58:22 +01:00
|
|
|
elif isinstance(this, exp.Literal):
|
|
|
|
table_name = this.name
|
|
|
|
|
2025-02-13 21:17:51 +01:00
|
|
|
if self._is_connected() and self._parse_var(any_token=True):
|
2025-02-13 20:58:22 +01:00
|
|
|
table_name += self._prev.text
|
|
|
|
|
|
|
|
this = exp.Identifier(this=table_name, quoted=True)
|
2025-02-13 15:57:23 +01:00
|
|
|
|
|
|
|
return this
|
|
|
|
|
2025-02-13 21:20:36 +01:00
|
|
|
def _parse_table_parts(
|
2025-02-13 21:29:39 +01:00
|
|
|
self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
|
2025-02-13 21:20:36 +01:00
|
|
|
) -> exp.Table:
|
2025-02-13 21:29:39 +01:00
|
|
|
table = super()._parse_table_parts(
|
|
|
|
schema=schema, is_db_reference=is_db_reference, wildcard=True
|
|
|
|
)
|
|
|
|
|
|
|
|
# proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
|
|
|
|
if not table.catalog:
|
|
|
|
if table.db:
|
|
|
|
parts = table.db.split(".")
|
|
|
|
if len(parts) == 2 and not table.args["db"].quoted:
|
|
|
|
table.set("catalog", exp.Identifier(this=parts[0]))
|
|
|
|
table.set("db", exp.Identifier(this=parts[1]))
|
|
|
|
else:
|
|
|
|
parts = table.name.split(".")
|
|
|
|
if len(parts) == 2 and not table.this.quoted:
|
|
|
|
table.set("db", exp.Identifier(this=parts[0]))
|
|
|
|
table.set("this", exp.Identifier(this=parts[1]))
|
|
|
|
|
2025-02-13 21:52:32 +01:00
|
|
|
if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
|
2025-02-13 21:56:19 +01:00
|
|
|
alias = table.this
|
2025-02-13 15:57:23 +01:00
|
|
|
catalog, db, this, *rest = (
|
2025-02-13 21:30:28 +01:00
|
|
|
exp.to_identifier(p, quoted=True)
|
|
|
|
for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
|
2025-02-13 15:57:23 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
if rest and this:
|
2025-02-13 21:30:28 +01:00
|
|
|
this = exp.Dot.build([this, *rest]) # type: ignore
|
2025-02-13 15:57:23 +01:00
|
|
|
|
2025-02-13 21:32:41 +01:00
|
|
|
table = exp.Table(
|
|
|
|
this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
|
|
|
|
)
|
2025-02-13 21:29:39 +01:00
|
|
|
table.meta["quoted_table"] = True
|
2025-02-13 21:56:19 +01:00
|
|
|
else:
|
|
|
|
alias = None
|
|
|
|
|
|
|
|
# The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
|
|
|
|
# dataset, so if the project identifier is omitted we need to fix the ast so that
|
|
|
|
# the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
|
|
|
|
# Otherwise, we wouldn't correctly qualify a `Table` node that references these
|
|
|
|
# views, because it would seem like the "catalog" part is set, when it'd actually
|
|
|
|
# be the region/dataset. Merging the two identifiers into a single one is done to
|
|
|
|
# avoid producing a 4-part Table reference, which would cause issues in the schema
|
|
|
|
# module, when there are 3-part table names mixed with information schema views.
|
|
|
|
#
|
|
|
|
# See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
|
|
|
|
table_parts = table.parts
|
|
|
|
if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
|
|
|
|
# We need to alias the table here to avoid breaking existing qualified columns.
|
|
|
|
# This is expected to be safe, because if there's an actual alias coming up in
|
|
|
|
# the token stream, it will overwrite this one. If there isn't one, we are only
|
|
|
|
# exposing the name that can be used to reference the view explicitly (a no-op).
|
|
|
|
exp.alias_(
|
|
|
|
table,
|
|
|
|
t.cast(exp.Identifier, alias or table_parts[-1]),
|
|
|
|
table=True,
|
|
|
|
copy=False,
|
|
|
|
)
|
|
|
|
|
|
|
|
info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
|
|
|
|
table.set("this", exp.Identifier(this=info_schema_view, quoted=True))
|
|
|
|
table.set("db", seq_get(table_parts, -3))
|
|
|
|
table.set("catalog", seq_get(table_parts, -4))
|
2025-02-13 15:57:23 +01:00
|
|
|
|
|
|
|
return table
|
|
|
|
|
2025-02-13 21:32:41 +01:00
|
|
|
def _parse_column(self) -> t.Optional[exp.Expression]:
|
|
|
|
column = super()._parse_column()
|
|
|
|
if isinstance(column, exp.Column):
|
|
|
|
parts = column.parts
|
|
|
|
if any("." in p.name for p in parts):
|
|
|
|
catalog, db, table, this, *rest = (
|
|
|
|
exp.to_identifier(p, quoted=True)
|
|
|
|
for p in split_num_words(".".join(p.name for p in parts), ".", 4)
|
|
|
|
)
|
|
|
|
|
|
|
|
if rest and this:
|
|
|
|
this = exp.Dot.build([this, *rest]) # type: ignore
|
|
|
|
|
|
|
|
column = exp.Column(this=this, table=table, db=db, catalog=catalog)
|
|
|
|
column.meta["quoted_column"] = True
|
|
|
|
|
|
|
|
return column
|
|
|
|
|
2025-02-13 21:19:14 +01:00
|
|
|
@t.overload
|
2025-02-13 21:29:39 +01:00
|
|
|
def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
|
2025-02-13 21:19:14 +01:00
|
|
|
|
|
|
|
@t.overload
|
2025-02-13 21:29:39 +01:00
|
|
|
def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
|
2025-02-13 21:19:14 +01:00
|
|
|
|
|
|
|
def _parse_json_object(self, agg=False):
|
2025-02-13 20:58:22 +01:00
|
|
|
json_object = super()._parse_json_object()
|
|
|
|
array_kv_pair = seq_get(json_object.expressions, 0)
|
|
|
|
|
|
|
|
# Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
|
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
|
|
|
|
if (
|
|
|
|
array_kv_pair
|
|
|
|
and isinstance(array_kv_pair.this, exp.Array)
|
|
|
|
and isinstance(array_kv_pair.expression, exp.Array)
|
|
|
|
):
|
|
|
|
keys = array_kv_pair.this.expressions
|
|
|
|
values = array_kv_pair.expression.expressions
|
|
|
|
|
|
|
|
json_object.set(
|
|
|
|
"expressions",
|
|
|
|
[exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
|
|
|
|
)
|
|
|
|
|
|
|
|
return json_object
|
|
|
|
|
2025-02-13 21:30:28 +01:00
|
|
|
def _parse_bracket(
|
|
|
|
self, this: t.Optional[exp.Expression] = None
|
|
|
|
) -> t.Optional[exp.Expression]:
|
2025-02-13 21:17:09 +01:00
|
|
|
bracket = super()._parse_bracket(this)
|
|
|
|
|
|
|
|
if this is bracket:
|
|
|
|
return bracket
|
|
|
|
|
|
|
|
if isinstance(bracket, exp.Bracket):
|
|
|
|
for expression in bracket.expressions:
|
|
|
|
name = expression.name.upper()
|
|
|
|
|
|
|
|
if name not in self.BRACKET_OFFSETS:
|
|
|
|
break
|
|
|
|
|
|
|
|
offset, safe = self.BRACKET_OFFSETS[name]
|
|
|
|
bracket.set("offset", offset)
|
|
|
|
bracket.set("safe", safe)
|
|
|
|
expression.replace(expression.expressions[0])
|
|
|
|
|
|
|
|
return bracket
|
|
|
|
|
2025-02-13 21:52:32 +01:00
|
|
|
def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
|
|
|
|
unnest = super()._parse_unnest(with_alias=with_alias)
|
|
|
|
|
|
|
|
if not unnest:
|
|
|
|
return None
|
|
|
|
|
|
|
|
unnest_expr = seq_get(unnest.expressions, 0)
|
|
|
|
if unnest_expr:
|
|
|
|
from sqlglot.optimizer.annotate_types import annotate_types
|
|
|
|
|
|
|
|
unnest_expr = annotate_types(unnest_expr)
|
|
|
|
|
|
|
|
# Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
|
|
|
|
# in contrast to other dialects such as DuckDB which flattens only the array by default
|
|
|
|
if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
|
|
|
|
array_elem.is_type(exp.DataType.Type.STRUCT)
|
|
|
|
for array_elem in unnest_expr._type.expressions
|
|
|
|
):
|
|
|
|
unnest.set("explode_array", True)
|
|
|
|
|
|
|
|
return unnest
|
|
|
|
|
2025-02-13 21:57:37 +01:00
|
|
|
def _parse_make_interval(self) -> exp.MakeInterval:
|
2025-02-13 21:57:20 +01:00
|
|
|
expr = exp.MakeInterval()
|
|
|
|
|
|
|
|
for arg_key in expr.arg_types:
|
|
|
|
value = self._parse_lambda()
|
|
|
|
|
|
|
|
if not value:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Non-named arguments are filled sequentially, (optionally) followed by named arguments
|
|
|
|
# that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
|
|
|
|
if isinstance(value, exp.Kwarg):
|
|
|
|
arg_key = value.this.name
|
|
|
|
|
|
|
|
expr.set(arg_key, value)
|
|
|
|
|
|
|
|
self._match(TokenType.COMMA)
|
|
|
|
|
|
|
|
return expr
|
|
|
|
|
2025-02-13 21:57:37 +01:00
|
|
|
def _parse_features_at_time(self) -> exp.FeaturesAtTime:
|
|
|
|
expr = self.expression(
|
|
|
|
exp.FeaturesAtTime,
|
|
|
|
this=(self._match(TokenType.TABLE) and self._parse_table())
|
|
|
|
or self._parse_select(nested=True),
|
|
|
|
)
|
|
|
|
|
|
|
|
while self._match(TokenType.COMMA):
|
|
|
|
arg = self._parse_lambda()
|
|
|
|
|
|
|
|
# Get the LHS of the Kwarg and set the arg to that value, e.g
|
|
|
|
# "num_rows => 1" sets the expr's `num_rows` arg
|
|
|
|
if arg:
|
|
|
|
expr.set(arg.this.name, arg)
|
|
|
|
|
|
|
|
return expr
|
|
|
|
|
2025-02-13 14:53:05 +01:00
|
|
|
class Generator(generator.Generator):
|
2025-02-13 15:52:09 +01:00
|
|
|
INTERVAL_ALLOWS_PLURAL_FORM = False
|
|
|
|
JOIN_HINTS = False
|
2025-02-13 20:43:05 +01:00
|
|
|
QUERY_HINTS = False
|
2025-02-13 15:52:09 +01:00
|
|
|
TABLE_HINTS = False
|
|
|
|
LIMIT_FETCH = "LIMIT"
|
2025-02-13 15:57:23 +01:00
|
|
|
RENAME_TABLE_WITH_DB = False
|
2025-02-13 20:58:22 +01:00
|
|
|
NVL2_SUPPORTED = False
|
2025-02-13 21:03:38 +01:00
|
|
|
UNNEST_WITH_ORDINALITY = False
|
2025-02-13 21:04:58 +01:00
|
|
|
COLLATE_IS_FUNC = True
|
2025-02-13 21:17:09 +01:00
|
|
|
LIMIT_ONLY_LITERALS = True
|
2025-02-13 21:19:14 +01:00
|
|
|
SUPPORTS_TABLE_ALIAS_COLUMNS = False
|
|
|
|
UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
|
|
|
|
JSON_KEY_VALUE_PAIR_SEP = ","
|
|
|
|
NULL_ORDERING_SUPPORTED = False
|
2025-02-13 21:20:36 +01:00
|
|
|
IGNORE_NULLS_IN_FUNC = True
|
|
|
|
JSON_PATH_SINGLE_QUOTE_ESCAPE = True
|
2025-02-13 21:28:36 +01:00
|
|
|
CAN_IMPLEMENT_ARRAY_ANY = True
|
2025-02-13 21:30:28 +01:00
|
|
|
SUPPORTS_TO_NUMBER = False
|
2025-02-13 21:29:39 +01:00
|
|
|
NAMED_PLACEHOLDER_TOKEN = "@"
|
2025-02-13 21:35:32 +01:00
|
|
|
HEX_FUNC = "TO_HEX"
|
|
|
|
WITH_PROPERTIES_PREFIX = "OPTIONS"
|
2025-02-13 21:52:32 +01:00
|
|
|
SUPPORTS_EXPLODING_PROJECTIONS = False
|
2025-02-13 21:54:13 +01:00
|
|
|
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
|
2025-02-13 21:56:38 +01:00
|
|
|
SUPPORTS_UNIX_SECONDS = True
|
2025-02-13 15:52:09 +01:00
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
TRANSFORMS = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**generator.Generator.TRANSFORMS,
|
|
|
|
exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
|
2025-02-13 21:09:41 +01:00
|
|
|
exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
|
|
|
|
exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
|
2025-02-13 21:31:23 +01:00
|
|
|
exp.Array: inline_array_unless_query,
|
2025-02-13 21:17:09 +01:00
|
|
|
exp.ArrayContains: _array_contains_sql,
|
2025-02-13 21:28:36 +01:00
|
|
|
exp.ArrayFilter: filter_array_using_unnest,
|
2025-02-13 15:53:39 +01:00
|
|
|
exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
|
2025-02-13 21:19:58 +01:00
|
|
|
exp.CollateProperty: lambda self, e: (
|
|
|
|
f"DEFAULT COLLATE {self.sql(e, 'this')}"
|
|
|
|
if e.args.get("default")
|
|
|
|
else f"COLLATE {self.sql(e, 'this')}"
|
|
|
|
),
|
2025-02-13 21:28:36 +01:00
|
|
|
exp.Commit: lambda *_: "COMMIT TRANSACTION",
|
2025-02-13 21:19:14 +01:00
|
|
|
exp.CountIf: rename_func("COUNTIF"),
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.Create: _create_sql,
|
2025-02-13 20:21:40 +01:00
|
|
|
exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
|
2025-02-13 21:02:36 +01:00
|
|
|
exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
|
2025-02-13 21:28:36 +01:00
|
|
|
exp.DateDiff: lambda self, e: self.func(
|
2025-02-13 21:30:28 +01:00
|
|
|
"DATE_DIFF", e.this, e.expression, unit_to_var(e)
|
2025-02-13 21:28:36 +01:00
|
|
|
),
|
2025-02-13 20:43:05 +01:00
|
|
|
exp.DateFromParts: rename_func("DATE"),
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.DateStrToDate: datestrtodate_sql,
|
2025-02-13 21:02:36 +01:00
|
|
|
exp.DateSub: date_add_interval_sql("DATE", "SUB"),
|
|
|
|
exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
|
|
|
|
exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
|
2025-02-13 21:56:19 +01:00
|
|
|
exp.DateTrunc: lambda self, e: self.func(
|
|
|
|
"DATE_TRUNC", e.this, e.text("unit"), e.args.get("zone")
|
|
|
|
),
|
2025-02-13 21:19:58 +01:00
|
|
|
exp.FromTimeZone: lambda self, e: self.func(
|
|
|
|
"DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
|
|
|
|
),
|
2025-02-13 20:04:59 +01:00
|
|
|
exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
|
2025-02-13 15:01:55 +01:00
|
|
|
exp.GroupConcat: rename_func("STRING_AGG"),
|
2025-02-13 21:35:32 +01:00
|
|
|
exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
|
2025-02-13 21:03:38 +01:00
|
|
|
exp.If: if_sql(false_value="NULL"),
|
2025-02-13 06:15:54 +01:00
|
|
|
exp.ILike: no_ilike_sql,
|
2025-02-13 14:53:05 +01:00
|
|
|
exp.IntDiv: rename_func("DIV"),
|
2025-02-13 21:57:20 +01:00
|
|
|
exp.Int64: rename_func("INT64"),
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.JSONFormat: rename_func("TO_JSON_STRING"),
|
2025-02-13 21:56:38 +01:00
|
|
|
exp.Levenshtein: _levenshtein_sql,
|
2025-02-13 15:48:10 +01:00
|
|
|
exp.Max: max_or_greatest,
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
|
|
|
|
exp.MD5Digest: rename_func("MD5"),
|
2025-02-13 15:43:32 +01:00
|
|
|
exp.Min: min_or_least,
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
|
2025-02-13 16:00:51 +01:00
|
|
|
exp.RegexpExtract: lambda self, e: self.func(
|
|
|
|
"REGEXP_EXTRACT",
|
|
|
|
e.this,
|
|
|
|
e.expression,
|
|
|
|
e.args.get("position"),
|
|
|
|
e.args.get("occurrence"),
|
|
|
|
),
|
2025-02-13 21:56:38 +01:00
|
|
|
exp.RegexpExtractAll: lambda self, e: self.func(
|
|
|
|
"REGEXP_EXTRACT_ALL", e.this, e.expression
|
|
|
|
),
|
2025-02-13 20:46:55 +01:00
|
|
|
exp.RegexpReplace: regexp_replace_sql,
|
2025-02-13 16:00:51 +01:00
|
|
|
exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.ReturnsProperty: _returnsproperty_sql,
|
2025-02-13 21:28:36 +01:00
|
|
|
exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
|
2025-02-13 15:53:39 +01:00
|
|
|
exp.Select: transforms.preprocess(
|
2025-02-13 20:21:40 +01:00
|
|
|
[
|
2025-02-13 21:03:38 +01:00
|
|
|
transforms.explode_to_unnest(),
|
2025-02-13 21:31:23 +01:00
|
|
|
transforms.unqualify_unnest,
|
2025-02-13 20:21:40 +01:00
|
|
|
transforms.eliminate_distinct_on,
|
|
|
|
_alias_ordered_group,
|
2025-02-13 21:03:38 +01:00
|
|
|
transforms.eliminate_semi_and_anti_joins,
|
2025-02-13 20:21:40 +01:00
|
|
|
]
|
2025-02-13 15:53:39 +01:00
|
|
|
),
|
2025-02-13 21:35:32 +01:00
|
|
|
exp.SHA: rename_func("SHA1"),
|
2025-02-13 21:39:30 +01:00
|
|
|
exp.SHA2: sha256_sql,
|
2025-02-13 21:19:58 +01:00
|
|
|
exp.StabilityProperty: lambda self, e: (
|
2025-02-13 21:20:36 +01:00
|
|
|
"DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
|
2025-02-13 21:19:58 +01:00
|
|
|
),
|
2025-02-13 21:56:19 +01:00
|
|
|
exp.String: rename_func("STRING"),
|
2025-02-13 21:57:20 +01:00
|
|
|
exp.StrPosition: str_position_sql,
|
2025-02-13 21:41:14 +01:00
|
|
|
exp.StrToDate: _str_to_datetime_sql,
|
|
|
|
exp.StrToTime: _str_to_datetime_sql,
|
2025-02-13 21:02:36 +01:00
|
|
|
exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
|
2025-02-13 21:19:14 +01:00
|
|
|
exp.TimeFromParts: rename_func("TIME"),
|
2025-02-13 21:39:30 +01:00
|
|
|
exp.TimestampFromParts: rename_func("DATETIME"),
|
2025-02-13 21:02:36 +01:00
|
|
|
exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
|
|
|
|
exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
|
2025-02-13 21:20:36 +01:00
|
|
|
exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
|
2025-02-13 21:02:36 +01:00
|
|
|
exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
|
2025-02-13 15:01:55 +01:00
|
|
|
exp.TimeStrToTime: timestrtotime_sql,
|
2025-02-13 21:28:36 +01:00
|
|
|
exp.Transaction: lambda *_: "BEGIN TRANSACTION",
|
2025-02-13 21:17:09 +01:00
|
|
|
exp.TsOrDsAdd: _ts_or_ds_add_sql,
|
|
|
|
exp.TsOrDsDiff: _ts_or_ds_diff_sql,
|
2025-02-13 21:19:14 +01:00
|
|
|
exp.TsOrDsToTime: rename_func("TIME"),
|
2025-02-13 21:57:20 +01:00
|
|
|
exp.TsOrDsToDatetime: rename_func("DATETIME"),
|
|
|
|
exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.Unhex: rename_func("FROM_HEX"),
|
2025-02-13 21:19:14 +01:00
|
|
|
exp.UnixDate: rename_func("UNIX_DATE"),
|
2025-02-13 21:17:09 +01:00
|
|
|
exp.UnixToTime: _unix_to_time_sql,
|
2025-02-13 21:54:47 +01:00
|
|
|
exp.Uuid: lambda *_: "GENERATE_UUID()",
|
2025-02-13 14:43:32 +01:00
|
|
|
exp.Values: _derived_table_values_to_unnest,
|
2025-02-13 20:44:18 +01:00
|
|
|
exp.VariancePop: rename_func("VAR_POP"),
|
2025-02-13 06:15:54 +01:00
|
|
|
}
|
|
|
|
|
2025-02-13 21:20:36 +01:00
|
|
|
SUPPORTED_JSON_PATH_PARTS = {
|
|
|
|
exp.JSONPathKey,
|
|
|
|
exp.JSONPathRoot,
|
|
|
|
exp.JSONPathSubscript,
|
|
|
|
}
|
|
|
|
|
2025-02-13 06:15:54 +01:00
|
|
|
TYPE_MAPPING = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**generator.Generator.TYPE_MAPPING,
|
2025-02-13 15:52:09 +01:00
|
|
|
exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
|
2025-02-13 06:15:54 +01:00
|
|
|
exp.DataType.Type.BIGINT: "INT64",
|
2025-02-13 15:53:39 +01:00
|
|
|
exp.DataType.Type.BINARY: "BYTES",
|
2025-02-13 15:48:10 +01:00
|
|
|
exp.DataType.Type.BOOLEAN: "BOOL",
|
|
|
|
exp.DataType.Type.CHAR: "STRING",
|
2025-02-13 06:15:54 +01:00
|
|
|
exp.DataType.Type.DECIMAL: "NUMERIC",
|
|
|
|
exp.DataType.Type.DOUBLE: "FLOAT64",
|
2025-02-13 15:48:10 +01:00
|
|
|
exp.DataType.Type.FLOAT: "FLOAT64",
|
|
|
|
exp.DataType.Type.INT: "INT64",
|
|
|
|
exp.DataType.Type.NCHAR: "STRING",
|
|
|
|
exp.DataType.Type.NVARCHAR: "STRING",
|
|
|
|
exp.DataType.Type.SMALLINT: "INT64",
|
2025-02-13 06:15:54 +01:00
|
|
|
exp.DataType.Type.TEXT: "STRING",
|
2025-02-13 15:52:09 +01:00
|
|
|
exp.DataType.Type.TIMESTAMP: "DATETIME",
|
|
|
|
exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
|
2025-02-13 16:00:51 +01:00
|
|
|
exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
|
2025-02-13 15:48:10 +01:00
|
|
|
exp.DataType.Type.TINYINT: "INT64",
|
2025-02-13 21:32:41 +01:00
|
|
|
exp.DataType.Type.ROWVERSION: "BYTES",
|
2025-02-13 21:54:47 +01:00
|
|
|
exp.DataType.Type.UUID: "STRING",
|
|
|
|
exp.DataType.Type.VARBINARY: "BYTES",
|
2025-02-13 06:15:54 +01:00
|
|
|
exp.DataType.Type.VARCHAR: "STRING",
|
2025-02-13 15:52:09 +01:00
|
|
|
exp.DataType.Type.VARIANT: "ANY TYPE",
|
2025-02-13 06:15:54 +01:00
|
|
|
}
|
2025-02-13 15:52:09 +01:00
|
|
|
|
2025-02-13 15:26:26 +01:00
|
|
|
PROPERTIES_LOCATION = {
|
2025-02-13 15:57:23 +01:00
|
|
|
**generator.Generator.PROPERTIES_LOCATION,
|
2025-02-13 15:26:26 +01:00
|
|
|
exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
|
2025-02-13 15:52:09 +01:00
|
|
|
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
|
2025-02-13 15:26:26 +01:00
|
|
|
}
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 21:43:00 +01:00
|
|
|
# WINDOW comes after QUALIFY
|
|
|
|
# https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
|
|
|
|
AFTER_HAVING_MODIFIER_TRANSFORMS = {
|
|
|
|
"qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
|
|
|
|
"windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
|
|
|
|
}
|
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
# from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
|
|
|
|
RESERVED_KEYWORDS = {
|
|
|
|
"all",
|
|
|
|
"and",
|
|
|
|
"any",
|
|
|
|
"array",
|
|
|
|
"as",
|
|
|
|
"asc",
|
|
|
|
"assert_rows_modified",
|
|
|
|
"at",
|
|
|
|
"between",
|
|
|
|
"by",
|
|
|
|
"case",
|
|
|
|
"cast",
|
|
|
|
"collate",
|
|
|
|
"contains",
|
|
|
|
"create",
|
|
|
|
"cross",
|
|
|
|
"cube",
|
|
|
|
"current",
|
|
|
|
"default",
|
|
|
|
"define",
|
|
|
|
"desc",
|
|
|
|
"distinct",
|
|
|
|
"else",
|
|
|
|
"end",
|
|
|
|
"enum",
|
|
|
|
"escape",
|
|
|
|
"except",
|
|
|
|
"exclude",
|
|
|
|
"exists",
|
|
|
|
"extract",
|
|
|
|
"false",
|
|
|
|
"fetch",
|
|
|
|
"following",
|
|
|
|
"for",
|
|
|
|
"from",
|
|
|
|
"full",
|
|
|
|
"group",
|
|
|
|
"grouping",
|
|
|
|
"groups",
|
|
|
|
"hash",
|
|
|
|
"having",
|
|
|
|
"if",
|
|
|
|
"ignore",
|
|
|
|
"in",
|
|
|
|
"inner",
|
|
|
|
"intersect",
|
|
|
|
"interval",
|
|
|
|
"into",
|
|
|
|
"is",
|
|
|
|
"join",
|
|
|
|
"lateral",
|
|
|
|
"left",
|
|
|
|
"like",
|
|
|
|
"limit",
|
|
|
|
"lookup",
|
|
|
|
"merge",
|
|
|
|
"natural",
|
|
|
|
"new",
|
|
|
|
"no",
|
|
|
|
"not",
|
|
|
|
"null",
|
|
|
|
"nulls",
|
|
|
|
"of",
|
|
|
|
"on",
|
|
|
|
"or",
|
|
|
|
"order",
|
|
|
|
"outer",
|
|
|
|
"over",
|
|
|
|
"partition",
|
|
|
|
"preceding",
|
|
|
|
"proto",
|
|
|
|
"qualify",
|
|
|
|
"range",
|
|
|
|
"recursive",
|
|
|
|
"respect",
|
|
|
|
"right",
|
|
|
|
"rollup",
|
|
|
|
"rows",
|
|
|
|
"select",
|
|
|
|
"set",
|
|
|
|
"some",
|
|
|
|
"struct",
|
|
|
|
"tablesample",
|
|
|
|
"then",
|
|
|
|
"to",
|
|
|
|
"treat",
|
|
|
|
"true",
|
|
|
|
"unbounded",
|
|
|
|
"union",
|
|
|
|
"unnest",
|
|
|
|
"using",
|
|
|
|
"when",
|
|
|
|
"where",
|
|
|
|
"window",
|
|
|
|
"with",
|
|
|
|
"within",
|
|
|
|
}
|
|
|
|
|
2025-02-13 21:35:32 +01:00
|
|
|
def mod_sql(self, expression: exp.Mod) -> str:
|
|
|
|
this = expression.this
|
|
|
|
expr = expression.expression
|
|
|
|
return self.func(
|
|
|
|
"MOD",
|
|
|
|
this.unnest() if isinstance(this, exp.Paren) else this,
|
|
|
|
expr.unnest() if isinstance(expr, exp.Paren) else expr,
|
|
|
|
)
|
|
|
|
|
2025-02-13 21:32:41 +01:00
|
|
|
def column_parts(self, expression: exp.Column) -> str:
|
|
|
|
if expression.meta.get("quoted_column"):
|
|
|
|
# If a column reference is of the form `dataset.table`.name, we need
|
|
|
|
# to preserve the quoted table path, otherwise the reference breaks
|
|
|
|
table_parts = ".".join(p.name for p in expression.parts[:-1])
|
|
|
|
table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
|
|
|
|
return f"{table_path}.{self.sql(expression, 'this')}"
|
|
|
|
|
|
|
|
return super().column_parts(expression)
|
|
|
|
|
2025-02-13 21:29:39 +01:00
|
|
|
def table_parts(self, expression: exp.Table) -> str:
|
|
|
|
# Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
|
|
|
|
# we need to make sure the correct quoting is used in each case.
|
|
|
|
#
|
|
|
|
# For example, if there is a CTE x that clashes with a schema name, then the former will
|
|
|
|
# return the table y in that schema, whereas the latter will return the CTE's y column:
|
|
|
|
#
|
|
|
|
# - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y` -> cross join
|
|
|
|
# - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
|
|
|
|
if expression.meta.get("quoted_table"):
|
|
|
|
table_parts = ".".join(p.name for p in expression.parts)
|
|
|
|
return self.sql(exp.Identifier(this=table_parts, quoted=True))
|
|
|
|
|
|
|
|
return super().table_parts(expression)
|
|
|
|
|
2025-02-13 21:19:14 +01:00
|
|
|
def timetostr_sql(self, expression: exp.TimeToStr) -> str:
|
2025-02-13 21:57:20 +01:00
|
|
|
this = expression.this
|
|
|
|
if isinstance(this, exp.TsOrDsToDatetime):
|
2025-02-13 21:52:32 +01:00
|
|
|
func_name = "FORMAT_DATETIME"
|
2025-02-13 21:57:20 +01:00
|
|
|
elif isinstance(this, exp.TsOrDsToTimestamp):
|
|
|
|
func_name = "FORMAT_TIMESTAMP"
|
2025-02-13 21:52:32 +01:00
|
|
|
else:
|
|
|
|
func_name = "FORMAT_DATE"
|
2025-02-13 21:57:20 +01:00
|
|
|
|
|
|
|
time_expr = (
|
|
|
|
this
|
|
|
|
if isinstance(this, (exp.TsOrDsToDatetime, exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
|
2025-02-13 21:52:32 +01:00
|
|
|
else expression
|
|
|
|
)
|
2025-02-13 21:57:20 +01:00
|
|
|
return self.func(func_name, self.format_time(expression), time_expr.this)
|
2025-02-13 21:19:14 +01:00
|
|
|
|
2025-02-13 21:09:41 +01:00
|
|
|
def eq_sql(self, expression: exp.EQ) -> str:
|
|
|
|
# Operands of = cannot be NULL in BigQuery
|
|
|
|
if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
|
2025-02-13 21:18:34 +01:00
|
|
|
if not isinstance(expression.parent, exp.Update):
|
|
|
|
return "NULL"
|
2025-02-13 21:09:41 +01:00
|
|
|
|
|
|
|
return self.binary(expression, "=")
|
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
|
2025-02-13 20:43:05 +01:00
|
|
|
parent = expression.parent
|
|
|
|
|
|
|
|
# BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
|
|
|
|
# Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
|
|
|
|
if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
|
2025-02-13 20:21:40 +01:00
|
|
|
return self.func(
|
|
|
|
"TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
|
|
|
|
)
|
2025-02-13 20:43:05 +01:00
|
|
|
|
2025-02-13 20:21:40 +01:00
|
|
|
return super().attimezone_sql(expression)
|
|
|
|
|
|
|
|
def trycast_sql(self, expression: exp.TryCast) -> str:
|
|
|
|
return self.cast_sql(expression, safe_prefix="SAFE_")
|
|
|
|
|
2025-02-13 21:17:09 +01:00
|
|
|
def bracket_sql(self, expression: exp.Bracket) -> str:
|
2025-02-13 21:30:28 +01:00
|
|
|
this = expression.this
|
2025-02-13 21:17:09 +01:00
|
|
|
expressions = expression.expressions
|
2025-02-13 21:19:14 +01:00
|
|
|
|
2025-02-13 21:30:28 +01:00
|
|
|
if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
|
2025-02-13 21:19:14 +01:00
|
|
|
arg = expressions[0]
|
|
|
|
if arg.type is None:
|
|
|
|
from sqlglot.optimizer.annotate_types import annotate_types
|
|
|
|
|
|
|
|
arg = annotate_types(arg)
|
|
|
|
|
|
|
|
if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
|
2025-02-13 21:30:28 +01:00
|
|
|
# BQ doesn't support bracket syntax with string values for structs
|
|
|
|
return f"{self.sql(this)}.{arg.name}"
|
2025-02-13 21:19:14 +01:00
|
|
|
|
2025-02-13 21:30:28 +01:00
|
|
|
expressions_sql = self.expressions(expression, flat=True)
|
2025-02-13 21:17:09 +01:00
|
|
|
offset = expression.args.get("offset")
|
|
|
|
|
|
|
|
if offset == 0:
|
|
|
|
expressions_sql = f"OFFSET({expressions_sql})"
|
|
|
|
elif offset == 1:
|
|
|
|
expressions_sql = f"ORDINAL({expressions_sql})"
|
2025-02-13 21:19:14 +01:00
|
|
|
elif offset is not None:
|
2025-02-13 21:17:09 +01:00
|
|
|
self.unsupported(f"Unsupported array offset: {offset}")
|
|
|
|
|
|
|
|
if expression.args.get("safe"):
|
|
|
|
expressions_sql = f"SAFE_{expressions_sql}"
|
|
|
|
|
2025-02-13 21:30:28 +01:00
|
|
|
return f"{self.sql(this)}[{expressions_sql}]"
|
2025-02-13 21:17:09 +01:00
|
|
|
|
2025-02-13 14:58:37 +01:00
|
|
|
def in_unnest_op(self, expression: exp.Unnest) -> str:
|
|
|
|
return self.sql(expression)
|
2025-02-13 06:15:54 +01:00
|
|
|
|
2025-02-13 20:58:22 +01:00
|
|
|
def version_sql(self, expression: exp.Version) -> str:
|
|
|
|
if expression.name == "TIMESTAMP":
|
|
|
|
expression.set("this", "SYSTEM_TIME")
|
|
|
|
return super().version_sql(expression)
|
2025-02-13 21:57:20 +01:00
|
|
|
|
|
|
|
def contains_sql(self, expression: exp.Contains) -> str:
|
|
|
|
this = expression.this
|
|
|
|
expr = expression.expression
|
|
|
|
|
|
|
|
if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
|
|
|
|
this = this.this
|
|
|
|
expr = expr.this
|
|
|
|
|
|
|
|
return self.func("CONTAINS_SUBSTRING", this, expr)
|