Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    date_add_interval_sql,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    if_sql,
 16    inline_array_sql,
 17    json_keyvalue_comma_sql,
 18    max_or_greatest,
 19    min_or_least,
 20    no_ilike_sql,
 21    parse_date_delta_with_interval,
 22    regexp_replace_sql,
 23    rename_func,
 24    timestrtotime_sql,
 25    ts_or_ds_to_date_sql,
 26)
 27from sqlglot.helper import seq_get, split_num_words
 28from sqlglot.tokens import TokenType
 29
 30logger = logging.getLogger("sqlglot")
 31
 32
 33def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
 34    if not expression.find_ancestor(exp.From, exp.Join):
 35        return self.values_sql(expression)
 36
 37    alias = expression.args.get("alias")
 38
 39    structs = [
 40        exp.Struct(
 41            expressions=[
 42                exp.alias_(value, column_name)
 43                for value, column_name in zip(
 44                    t.expressions,
 45                    alias.columns
 46                    if alias and alias.columns
 47                    else (f"_c{i}" for i in range(len(t.expressions))),
 48                )
 49            ]
 50        )
 51        for t in expression.find_all(exp.Tuple)
 52    ]
 53
 54    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 55
 56
 57def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
 58    this = expression.this
 59    if isinstance(this, exp.Schema):
 60        this = f"{this.this} <{self.expressions(this)}>"
 61    else:
 62        this = self.sql(this)
 63    return f"RETURNS {this}"
 64
 65
 66def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
 67    kind = expression.args["kind"]
 68    returns = expression.find(exp.ReturnsProperty)
 69
 70    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 71        expression = expression.copy()
 72        expression.set("kind", "TABLE FUNCTION")
 73
 74        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 75            expression.set("expression", expression.expression.this)
 76
 77        return self.create_sql(expression)
 78
 79    return self.create_sql(expression)
 80
 81
 82def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 83    """Remove references to unnest table aliases since bigquery doesn't allow them.
 84
 85    These are added by the optimizer's qualify_column step.
 86    """
 87    from sqlglot.optimizer.scope import find_all_in_scope
 88
 89    if isinstance(expression, exp.Select):
 90        unnest_aliases = {
 91            unnest.alias
 92            for unnest in find_all_in_scope(expression, exp.Unnest)
 93            if isinstance(unnest.parent, (exp.From, exp.Join))
 94        }
 95        if unnest_aliases:
 96            for column in expression.find_all(exp.Column):
 97                if column.table in unnest_aliases:
 98                    column.set("table", None)
 99                elif column.db in unnest_aliases:
100                    column.set("db", None)
101
102    return expression
103
104
105# https://issuetracker.google.com/issues/162294746
106# workaround for bigquery bug when grouping by an expression and then ordering
107# WITH x AS (SELECT 1 y)
108# SELECT y + 1 z
109# FROM x
110# GROUP BY x + 1
111# ORDER by z
112def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
113    if isinstance(expression, exp.Select):
114        group = expression.args.get("group")
115        order = expression.args.get("order")
116
117        if group and order:
118            aliases = {
119                select.this: select.args["alias"]
120                for select in expression.selects
121                if isinstance(select, exp.Alias)
122            }
123
124            for e in group.expressions:
125                alias = aliases.get(e)
126
127                if alias:
128                    e.replace(exp.column(alias))
129
130    return expression
131
132
133def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
134    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
135    if isinstance(expression, exp.CTE) and expression.alias_column_names:
136        cte_query = expression.this
137
138        if cte_query.is_star:
139            logger.warning(
140                "Can't push down CTE column names for star queries. Run the query through"
141                " the optimizer or use 'qualify' to expand the star projections first."
142            )
143            return expression
144
145        column_names = expression.alias_column_names
146        expression.args["alias"].set("columns", None)
147
148        for name, select in zip(column_names, cte_query.selects):
149            to_replace = select
150
151            if isinstance(select, exp.Alias):
152                select = select.this
153
154            # Inner aliases are shadowed by the CTE column names
155            to_replace.replace(exp.alias_(select, name))
156
157    return expression
158
159
160def _parse_timestamp(args: t.List) -> exp.StrToTime:
161    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
162    this.set("zone", seq_get(args, 2))
163    return this
164
165
166def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
167    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
168    return expr_type.from_arg_list(args)
169
170
171def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
172    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
173    arg = seq_get(args, 0)
174    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
175
176
177class BigQuery(Dialect):
178    UNNEST_COLUMN_ONLY = True
179    SUPPORTS_USER_DEFINED_TYPES = False
180    SUPPORTS_SEMI_ANTI_JOIN = False
181
182    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
183    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
184
185    # bigquery udfs are case sensitive
186    NORMALIZE_FUNCTIONS = False
187
188    TIME_MAPPING = {
189        "%D": "%m/%d/%y",
190    }
191
192    FORMAT_MAPPING = {
193        "DD": "%d",
194        "MM": "%m",
195        "MON": "%b",
196        "MONTH": "%B",
197        "YYYY": "%Y",
198        "YY": "%y",
199        "HH": "%I",
200        "HH12": "%I",
201        "HH24": "%H",
202        "MI": "%M",
203        "SS": "%S",
204        "SSSSS": "%f",
205        "TZH": "%z",
206    }
207
208    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
209    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
210    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
211
212    @classmethod
213    def normalize_identifier(cls, expression: E) -> E:
214        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
215        # The following check is essentially a heuristic to detect tables based on whether or
216        # not they're qualified.
217        if isinstance(expression, exp.Identifier):
218            parent = expression.parent
219
220            while isinstance(parent, exp.Dot):
221                parent = parent.parent
222
223            if (
224                not isinstance(parent, exp.UserDefinedFunction)
225                and not (isinstance(parent, exp.Table) and parent.db)
226                and not expression.meta.get("is_table")
227            ):
228                expression.set("this", expression.this.lower())
229
230        return expression
231
232    class Tokenizer(tokens.Tokenizer):
233        QUOTES = ["'", '"', '"""', "'''"]
234        COMMENTS = ["--", "#", ("/*", "*/")]
235        IDENTIFIERS = ["`"]
236        STRING_ESCAPES = ["\\"]
237
238        HEX_STRINGS = [("0x", ""), ("0X", "")]
239
240        BYTE_STRINGS = [
241            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
242        ]
243
244        RAW_STRINGS = [
245            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
246        ]
247
248        KEYWORDS = {
249            **tokens.Tokenizer.KEYWORDS,
250            "ANY TYPE": TokenType.VARIANT,
251            "BEGIN": TokenType.COMMAND,
252            "BEGIN TRANSACTION": TokenType.BEGIN,
253            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
254            "BYTES": TokenType.BINARY,
255            "DECLARE": TokenType.COMMAND,
256            "FLOAT64": TokenType.DOUBLE,
257            "INT64": TokenType.BIGINT,
258            "RECORD": TokenType.STRUCT,
259            "TIMESTAMP": TokenType.TIMESTAMPTZ,
260            "NOT DETERMINISTIC": TokenType.VOLATILE,
261            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
262        }
263        KEYWORDS.pop("DIV")
264
265    class Parser(parser.Parser):
266        PREFIXED_PIVOT_COLUMNS = True
267
268        LOG_BASE_FIRST = False
269        LOG_DEFAULTS_TO_LN = True
270
271        FUNCTIONS = {
272            **parser.Parser.FUNCTIONS,
273            "DATE": _parse_date,
274            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
275            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
276            "DATE_TRUNC": lambda args: exp.DateTrunc(
277                unit=exp.Literal.string(str(seq_get(args, 1))),
278                this=seq_get(args, 0),
279            ),
280            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
281            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
282            "DIV": binary_from_function(exp.IntDiv),
283            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
284            "MD5": exp.MD5Digest.from_arg_list,
285            "TO_HEX": _parse_to_hex,
286            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
287                [seq_get(args, 1), seq_get(args, 0)]
288            ),
289            "PARSE_TIMESTAMP": _parse_timestamp,
290            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
291            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
292                this=seq_get(args, 0),
293                expression=seq_get(args, 1),
294                position=seq_get(args, 2),
295                occurrence=seq_get(args, 3),
296                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
297            ),
298            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
299            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
300            "SPLIT": lambda args: exp.Split(
301                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
302                this=seq_get(args, 0),
303                expression=seq_get(args, 1) or exp.Literal.string(","),
304            ),
305            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
306            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
307            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
308            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
309            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
310        }
311
312        FUNCTION_PARSERS = {
313            **parser.Parser.FUNCTION_PARSERS,
314            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
315        }
316        FUNCTION_PARSERS.pop("TRIM")
317
318        NO_PAREN_FUNCTIONS = {
319            **parser.Parser.NO_PAREN_FUNCTIONS,
320            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
321        }
322
323        NESTED_TYPE_TOKENS = {
324            *parser.Parser.NESTED_TYPE_TOKENS,
325            TokenType.TABLE,
326        }
327
328        ID_VAR_TOKENS = {
329            *parser.Parser.ID_VAR_TOKENS,
330            TokenType.VALUES,
331        }
332
333        PROPERTY_PARSERS = {
334            **parser.Parser.PROPERTY_PARSERS,
335            "NOT DETERMINISTIC": lambda self: self.expression(
336                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
337            ),
338            "OPTIONS": lambda self: self._parse_with_property(),
339        }
340
341        CONSTRAINT_PARSERS = {
342            **parser.Parser.CONSTRAINT_PARSERS,
343            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
344        }
345
346        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
347        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
348
349        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
350
351        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
352            this = super()._parse_table_part(schema=schema) or self._parse_number()
353
354            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
355            if isinstance(this, exp.Identifier):
356                table_name = this.name
357                while self._match(TokenType.DASH, advance=False) and self._next:
358                    self._advance(2)
359                    table_name += f"-{self._prev.text}"
360
361                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
362            elif isinstance(this, exp.Literal):
363                table_name = this.name
364
365                if (
366                    self._curr
367                    and self._prev.end == self._curr.start - 1
368                    and self._parse_var(any_token=True)
369                ):
370                    table_name += self._prev.text
371
372                this = exp.Identifier(this=table_name, quoted=True)
373
374            return this
375
376        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
377            table = super()._parse_table_parts(schema=schema)
378            if isinstance(table.this, exp.Identifier) and "." in table.name:
379                catalog, db, this, *rest = (
380                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
381                    for x in split_num_words(table.name, ".", 3)
382                )
383
384                if rest and this:
385                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
386
387                table = exp.Table(this=this, db=db, catalog=catalog)
388
389            return table
390
391        def _parse_json_object(self) -> exp.JSONObject:
392            json_object = super()._parse_json_object()
393            array_kv_pair = seq_get(json_object.expressions, 0)
394
395            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
396            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
397            if (
398                array_kv_pair
399                and isinstance(array_kv_pair.this, exp.Array)
400                and isinstance(array_kv_pair.expression, exp.Array)
401            ):
402                keys = array_kv_pair.this.expressions
403                values = array_kv_pair.expression.expressions
404
405                json_object.set(
406                    "expressions",
407                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
408                )
409
410            return json_object
411
412    class Generator(generator.Generator):
413        EXPLICIT_UNION = True
414        INTERVAL_ALLOWS_PLURAL_FORM = False
415        JOIN_HINTS = False
416        QUERY_HINTS = False
417        TABLE_HINTS = False
418        LIMIT_FETCH = "LIMIT"
419        RENAME_TABLE_WITH_DB = False
420        NVL2_SUPPORTED = False
421        UNNEST_WITH_ORDINALITY = False
422
423        TRANSFORMS = {
424            **generator.Generator.TRANSFORMS,
425            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
426            exp.ArraySize: rename_func("ARRAY_LENGTH"),
427            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
428            exp.Create: _create_sql,
429            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
430            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
431            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
432            exp.DateFromParts: rename_func("DATE"),
433            exp.DateStrToDate: datestrtodate_sql,
434            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
435            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
436            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
437            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
438            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
439            exp.GroupConcat: rename_func("STRING_AGG"),
440            exp.Hex: rename_func("TO_HEX"),
441            exp.If: if_sql(false_value="NULL"),
442            exp.ILike: no_ilike_sql,
443            exp.IntDiv: rename_func("DIV"),
444            exp.JSONFormat: rename_func("TO_JSON_STRING"),
445            exp.JSONKeyValue: json_keyvalue_comma_sql,
446            exp.Max: max_or_greatest,
447            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
448            exp.MD5Digest: rename_func("MD5"),
449            exp.Min: min_or_least,
450            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
451            exp.RegexpExtract: lambda self, e: self.func(
452                "REGEXP_EXTRACT",
453                e.this,
454                e.expression,
455                e.args.get("position"),
456                e.args.get("occurrence"),
457            ),
458            exp.RegexpReplace: regexp_replace_sql,
459            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
460            exp.ReturnsProperty: _returnsproperty_sql,
461            exp.Select: transforms.preprocess(
462                [
463                    transforms.explode_to_unnest(),
464                    _unqualify_unnest,
465                    transforms.eliminate_distinct_on,
466                    _alias_ordered_group,
467                    transforms.eliminate_semi_and_anti_joins,
468                ]
469            ),
470            exp.SHA2: lambda self, e: self.func(
471                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
472            ),
473            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
474            if e.name == "IMMUTABLE"
475            else "NOT DETERMINISTIC",
476            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
477            exp.StrToTime: lambda self, e: self.func(
478                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
479            ),
480            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
481            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
482            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
483            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
484            exp.TimeStrToTime: timestrtotime_sql,
485            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
486            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
487            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
488            exp.Unhex: rename_func("FROM_HEX"),
489            exp.Values: _derived_table_values_to_unnest,
490            exp.VariancePop: rename_func("VAR_POP"),
491        }
492
493        TYPE_MAPPING = {
494            **generator.Generator.TYPE_MAPPING,
495            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
496            exp.DataType.Type.BIGINT: "INT64",
497            exp.DataType.Type.BINARY: "BYTES",
498            exp.DataType.Type.BOOLEAN: "BOOL",
499            exp.DataType.Type.CHAR: "STRING",
500            exp.DataType.Type.DECIMAL: "NUMERIC",
501            exp.DataType.Type.DOUBLE: "FLOAT64",
502            exp.DataType.Type.FLOAT: "FLOAT64",
503            exp.DataType.Type.INT: "INT64",
504            exp.DataType.Type.NCHAR: "STRING",
505            exp.DataType.Type.NVARCHAR: "STRING",
506            exp.DataType.Type.SMALLINT: "INT64",
507            exp.DataType.Type.TEXT: "STRING",
508            exp.DataType.Type.TIMESTAMP: "DATETIME",
509            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
510            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
511            exp.DataType.Type.TINYINT: "INT64",
512            exp.DataType.Type.VARBINARY: "BYTES",
513            exp.DataType.Type.VARCHAR: "STRING",
514            exp.DataType.Type.VARIANT: "ANY TYPE",
515        }
516
517        PROPERTIES_LOCATION = {
518            **generator.Generator.PROPERTIES_LOCATION,
519            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
520            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
521        }
522
523        UNESCAPED_SEQUENCE_TABLE = str.maketrans(  # type: ignore
524            {
525                "\a": "\\a",
526                "\b": "\\b",
527                "\f": "\\f",
528                "\n": "\\n",
529                "\r": "\\r",
530                "\t": "\\t",
531                "\v": "\\v",
532            }
533        )
534
535        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
536        RESERVED_KEYWORDS = {
537            *generator.Generator.RESERVED_KEYWORDS,
538            "all",
539            "and",
540            "any",
541            "array",
542            "as",
543            "asc",
544            "assert_rows_modified",
545            "at",
546            "between",
547            "by",
548            "case",
549            "cast",
550            "collate",
551            "contains",
552            "create",
553            "cross",
554            "cube",
555            "current",
556            "default",
557            "define",
558            "desc",
559            "distinct",
560            "else",
561            "end",
562            "enum",
563            "escape",
564            "except",
565            "exclude",
566            "exists",
567            "extract",
568            "false",
569            "fetch",
570            "following",
571            "for",
572            "from",
573            "full",
574            "group",
575            "grouping",
576            "groups",
577            "hash",
578            "having",
579            "if",
580            "ignore",
581            "in",
582            "inner",
583            "intersect",
584            "interval",
585            "into",
586            "is",
587            "join",
588            "lateral",
589            "left",
590            "like",
591            "limit",
592            "lookup",
593            "merge",
594            "natural",
595            "new",
596            "no",
597            "not",
598            "null",
599            "nulls",
600            "of",
601            "on",
602            "or",
603            "order",
604            "outer",
605            "over",
606            "partition",
607            "preceding",
608            "proto",
609            "qualify",
610            "range",
611            "recursive",
612            "respect",
613            "right",
614            "rollup",
615            "rows",
616            "select",
617            "set",
618            "some",
619            "struct",
620            "tablesample",
621            "then",
622            "to",
623            "treat",
624            "true",
625            "unbounded",
626            "union",
627            "unnest",
628            "using",
629            "when",
630            "where",
631            "window",
632            "with",
633            "within",
634        }
635
636        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
637            parent = expression.parent
638
639            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
640            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
641            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
642                return self.func(
643                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
644                )
645
646            return super().attimezone_sql(expression)
647
648        def trycast_sql(self, expression: exp.TryCast) -> str:
649            return self.cast_sql(expression, safe_prefix="SAFE_")
650
651        def cte_sql(self, expression: exp.CTE) -> str:
652            if expression.alias_column_names:
653                self.unsupported("Column names in CTE definition are not supported.")
654            return super().cte_sql(expression)
655
656        def array_sql(self, expression: exp.Array) -> str:
657            first_arg = seq_get(expression.expressions, 0)
658            if isinstance(first_arg, exp.Subqueryable):
659                return f"ARRAY{self.wrap(self.sql(first_arg))}"
660
661            return inline_array_sql(self, expression)
662
663        def transaction_sql(self, *_) -> str:
664            return "BEGIN TRANSACTION"
665
666        def commit_sql(self, *_) -> str:
667            return "COMMIT TRANSACTION"
668
669        def rollback_sql(self, *_) -> str:
670            return "ROLLBACK TRANSACTION"
671
672        def in_unnest_op(self, expression: exp.Unnest) -> str:
673            return self.sql(expression)
674
675        def except_op(self, expression: exp.Except) -> str:
676            if not expression.args.get("distinct", False):
677                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
678            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
679
680        def intersect_op(self, expression: exp.Intersect) -> str:
681            if not expression.args.get("distinct", False):
682                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
683            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
684
685        def with_properties(self, properties: exp.Properties) -> str:
686            return self.properties(properties, prefix=self.seg("OPTIONS"))
687
688        def version_sql(self, expression: exp.Version) -> str:
689            if expression.name == "TIMESTAMP":
690                expression = expression.copy()
691                expression.set("this", "SYSTEM_TIME")
692            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
178class BigQuery(Dialect):
179    UNNEST_COLUMN_ONLY = True
180    SUPPORTS_USER_DEFINED_TYPES = False
181    SUPPORTS_SEMI_ANTI_JOIN = False
182
183    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
184    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
185
186    # bigquery udfs are case sensitive
187    NORMALIZE_FUNCTIONS = False
188
189    TIME_MAPPING = {
190        "%D": "%m/%d/%y",
191    }
192
193    FORMAT_MAPPING = {
194        "DD": "%d",
195        "MM": "%m",
196        "MON": "%b",
197        "MONTH": "%B",
198        "YYYY": "%Y",
199        "YY": "%y",
200        "HH": "%I",
201        "HH12": "%I",
202        "HH24": "%H",
203        "MI": "%M",
204        "SS": "%S",
205        "SSSSS": "%f",
206        "TZH": "%z",
207    }
208
209    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
210    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
211    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
212
213    @classmethod
214    def normalize_identifier(cls, expression: E) -> E:
215        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
216        # The following check is essentially a heuristic to detect tables based on whether or
217        # not they're qualified.
218        if isinstance(expression, exp.Identifier):
219            parent = expression.parent
220
221            while isinstance(parent, exp.Dot):
222                parent = parent.parent
223
224            if (
225                not isinstance(parent, exp.UserDefinedFunction)
226                and not (isinstance(parent, exp.Table) and parent.db)
227                and not expression.meta.get("is_table")
228            ):
229                expression.set("this", expression.this.lower())
230
231        return expression
232
233    class Tokenizer(tokens.Tokenizer):
234        QUOTES = ["'", '"', '"""', "'''"]
235        COMMENTS = ["--", "#", ("/*", "*/")]
236        IDENTIFIERS = ["`"]
237        STRING_ESCAPES = ["\\"]
238
239        HEX_STRINGS = [("0x", ""), ("0X", "")]
240
241        BYTE_STRINGS = [
242            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
243        ]
244
245        RAW_STRINGS = [
246            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
247        ]
248
249        KEYWORDS = {
250            **tokens.Tokenizer.KEYWORDS,
251            "ANY TYPE": TokenType.VARIANT,
252            "BEGIN": TokenType.COMMAND,
253            "BEGIN TRANSACTION": TokenType.BEGIN,
254            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
255            "BYTES": TokenType.BINARY,
256            "DECLARE": TokenType.COMMAND,
257            "FLOAT64": TokenType.DOUBLE,
258            "INT64": TokenType.BIGINT,
259            "RECORD": TokenType.STRUCT,
260            "TIMESTAMP": TokenType.TIMESTAMPTZ,
261            "NOT DETERMINISTIC": TokenType.VOLATILE,
262            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
263        }
264        KEYWORDS.pop("DIV")
265
266    class Parser(parser.Parser):
267        PREFIXED_PIVOT_COLUMNS = True
268
269        LOG_BASE_FIRST = False
270        LOG_DEFAULTS_TO_LN = True
271
272        FUNCTIONS = {
273            **parser.Parser.FUNCTIONS,
274            "DATE": _parse_date,
275            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
276            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
277            "DATE_TRUNC": lambda args: exp.DateTrunc(
278                unit=exp.Literal.string(str(seq_get(args, 1))),
279                this=seq_get(args, 0),
280            ),
281            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
282            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
283            "DIV": binary_from_function(exp.IntDiv),
284            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
285            "MD5": exp.MD5Digest.from_arg_list,
286            "TO_HEX": _parse_to_hex,
287            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
288                [seq_get(args, 1), seq_get(args, 0)]
289            ),
290            "PARSE_TIMESTAMP": _parse_timestamp,
291            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
292            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
293                this=seq_get(args, 0),
294                expression=seq_get(args, 1),
295                position=seq_get(args, 2),
296                occurrence=seq_get(args, 3),
297                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
298            ),
299            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
300            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
301            "SPLIT": lambda args: exp.Split(
302                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1) or exp.Literal.string(","),
305            ),
306            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
307            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
308            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
309            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
310            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
311        }
312
313        FUNCTION_PARSERS = {
314            **parser.Parser.FUNCTION_PARSERS,
315            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
316        }
317        FUNCTION_PARSERS.pop("TRIM")
318
319        NO_PAREN_FUNCTIONS = {
320            **parser.Parser.NO_PAREN_FUNCTIONS,
321            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
322        }
323
324        NESTED_TYPE_TOKENS = {
325            *parser.Parser.NESTED_TYPE_TOKENS,
326            TokenType.TABLE,
327        }
328
329        ID_VAR_TOKENS = {
330            *parser.Parser.ID_VAR_TOKENS,
331            TokenType.VALUES,
332        }
333
334        PROPERTY_PARSERS = {
335            **parser.Parser.PROPERTY_PARSERS,
336            "NOT DETERMINISTIC": lambda self: self.expression(
337                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
338            ),
339            "OPTIONS": lambda self: self._parse_with_property(),
340        }
341
342        CONSTRAINT_PARSERS = {
343            **parser.Parser.CONSTRAINT_PARSERS,
344            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
345        }
346
347        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
348        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
349
350        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
351
352        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
353            this = super()._parse_table_part(schema=schema) or self._parse_number()
354
355            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
356            if isinstance(this, exp.Identifier):
357                table_name = this.name
358                while self._match(TokenType.DASH, advance=False) and self._next:
359                    self._advance(2)
360                    table_name += f"-{self._prev.text}"
361
362                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
363            elif isinstance(this, exp.Literal):
364                table_name = this.name
365
366                if (
367                    self._curr
368                    and self._prev.end == self._curr.start - 1
369                    and self._parse_var(any_token=True)
370                ):
371                    table_name += self._prev.text
372
373                this = exp.Identifier(this=table_name, quoted=True)
374
375            return this
376
377        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
378            table = super()._parse_table_parts(schema=schema)
379            if isinstance(table.this, exp.Identifier) and "." in table.name:
380                catalog, db, this, *rest = (
381                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
382                    for x in split_num_words(table.name, ".", 3)
383                )
384
385                if rest and this:
386                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
387
388                table = exp.Table(this=this, db=db, catalog=catalog)
389
390            return table
391
392        def _parse_json_object(self) -> exp.JSONObject:
393            json_object = super()._parse_json_object()
394            array_kv_pair = seq_get(json_object.expressions, 0)
395
396            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
397            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
398            if (
399                array_kv_pair
400                and isinstance(array_kv_pair.this, exp.Array)
401                and isinstance(array_kv_pair.expression, exp.Array)
402            ):
403                keys = array_kv_pair.this.expressions
404                values = array_kv_pair.expression.expressions
405
406                json_object.set(
407                    "expressions",
408                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
409                )
410
411            return json_object
412
413    class Generator(generator.Generator):
414        EXPLICIT_UNION = True
415        INTERVAL_ALLOWS_PLURAL_FORM = False
416        JOIN_HINTS = False
417        QUERY_HINTS = False
418        TABLE_HINTS = False
419        LIMIT_FETCH = "LIMIT"
420        RENAME_TABLE_WITH_DB = False
421        NVL2_SUPPORTED = False
422        UNNEST_WITH_ORDINALITY = False
423
424        TRANSFORMS = {
425            **generator.Generator.TRANSFORMS,
426            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
427            exp.ArraySize: rename_func("ARRAY_LENGTH"),
428            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
429            exp.Create: _create_sql,
430            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
431            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
432            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
433            exp.DateFromParts: rename_func("DATE"),
434            exp.DateStrToDate: datestrtodate_sql,
435            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
436            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
437            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
438            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
439            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
440            exp.GroupConcat: rename_func("STRING_AGG"),
441            exp.Hex: rename_func("TO_HEX"),
442            exp.If: if_sql(false_value="NULL"),
443            exp.ILike: no_ilike_sql,
444            exp.IntDiv: rename_func("DIV"),
445            exp.JSONFormat: rename_func("TO_JSON_STRING"),
446            exp.JSONKeyValue: json_keyvalue_comma_sql,
447            exp.Max: max_or_greatest,
448            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
449            exp.MD5Digest: rename_func("MD5"),
450            exp.Min: min_or_least,
451            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
452            exp.RegexpExtract: lambda self, e: self.func(
453                "REGEXP_EXTRACT",
454                e.this,
455                e.expression,
456                e.args.get("position"),
457                e.args.get("occurrence"),
458            ),
459            exp.RegexpReplace: regexp_replace_sql,
460            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
461            exp.ReturnsProperty: _returnsproperty_sql,
462            exp.Select: transforms.preprocess(
463                [
464                    transforms.explode_to_unnest(),
465                    _unqualify_unnest,
466                    transforms.eliminate_distinct_on,
467                    _alias_ordered_group,
468                    transforms.eliminate_semi_and_anti_joins,
469                ]
470            ),
471            exp.SHA2: lambda self, e: self.func(
472                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
473            ),
474            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
475            if e.name == "IMMUTABLE"
476            else "NOT DETERMINISTIC",
477            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
478            exp.StrToTime: lambda self, e: self.func(
479                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
480            ),
481            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
482            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
483            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
484            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
485            exp.TimeStrToTime: timestrtotime_sql,
486            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
487            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
488            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
489            exp.Unhex: rename_func("FROM_HEX"),
490            exp.Values: _derived_table_values_to_unnest,
491            exp.VariancePop: rename_func("VAR_POP"),
492        }
493
494        TYPE_MAPPING = {
495            **generator.Generator.TYPE_MAPPING,
496            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
497            exp.DataType.Type.BIGINT: "INT64",
498            exp.DataType.Type.BINARY: "BYTES",
499            exp.DataType.Type.BOOLEAN: "BOOL",
500            exp.DataType.Type.CHAR: "STRING",
501            exp.DataType.Type.DECIMAL: "NUMERIC",
502            exp.DataType.Type.DOUBLE: "FLOAT64",
503            exp.DataType.Type.FLOAT: "FLOAT64",
504            exp.DataType.Type.INT: "INT64",
505            exp.DataType.Type.NCHAR: "STRING",
506            exp.DataType.Type.NVARCHAR: "STRING",
507            exp.DataType.Type.SMALLINT: "INT64",
508            exp.DataType.Type.TEXT: "STRING",
509            exp.DataType.Type.TIMESTAMP: "DATETIME",
510            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
511            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
512            exp.DataType.Type.TINYINT: "INT64",
513            exp.DataType.Type.VARBINARY: "BYTES",
514            exp.DataType.Type.VARCHAR: "STRING",
515            exp.DataType.Type.VARIANT: "ANY TYPE",
516        }
517
518        PROPERTIES_LOCATION = {
519            **generator.Generator.PROPERTIES_LOCATION,
520            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
521            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
522        }
523
524        UNESCAPED_SEQUENCE_TABLE = str.maketrans(  # type: ignore
525            {
526                "\a": "\\a",
527                "\b": "\\b",
528                "\f": "\\f",
529                "\n": "\\n",
530                "\r": "\\r",
531                "\t": "\\t",
532                "\v": "\\v",
533            }
534        )
535
536        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
537        RESERVED_KEYWORDS = {
538            *generator.Generator.RESERVED_KEYWORDS,
539            "all",
540            "and",
541            "any",
542            "array",
543            "as",
544            "asc",
545            "assert_rows_modified",
546            "at",
547            "between",
548            "by",
549            "case",
550            "cast",
551            "collate",
552            "contains",
553            "create",
554            "cross",
555            "cube",
556            "current",
557            "default",
558            "define",
559            "desc",
560            "distinct",
561            "else",
562            "end",
563            "enum",
564            "escape",
565            "except",
566            "exclude",
567            "exists",
568            "extract",
569            "false",
570            "fetch",
571            "following",
572            "for",
573            "from",
574            "full",
575            "group",
576            "grouping",
577            "groups",
578            "hash",
579            "having",
580            "if",
581            "ignore",
582            "in",
583            "inner",
584            "intersect",
585            "interval",
586            "into",
587            "is",
588            "join",
589            "lateral",
590            "left",
591            "like",
592            "limit",
593            "lookup",
594            "merge",
595            "natural",
596            "new",
597            "no",
598            "not",
599            "null",
600            "nulls",
601            "of",
602            "on",
603            "or",
604            "order",
605            "outer",
606            "over",
607            "partition",
608            "preceding",
609            "proto",
610            "qualify",
611            "range",
612            "recursive",
613            "respect",
614            "right",
615            "rollup",
616            "rows",
617            "select",
618            "set",
619            "some",
620            "struct",
621            "tablesample",
622            "then",
623            "to",
624            "treat",
625            "true",
626            "unbounded",
627            "union",
628            "unnest",
629            "using",
630            "when",
631            "where",
632            "window",
633            "with",
634            "within",
635        }
636
637        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
638            parent = expression.parent
639
640            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
641            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
642            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
643                return self.func(
644                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
645                )
646
647            return super().attimezone_sql(expression)
648
649        def trycast_sql(self, expression: exp.TryCast) -> str:
650            return self.cast_sql(expression, safe_prefix="SAFE_")
651
652        def cte_sql(self, expression: exp.CTE) -> str:
653            if expression.alias_column_names:
654                self.unsupported("Column names in CTE definition are not supported.")
655            return super().cte_sql(expression)
656
657        def array_sql(self, expression: exp.Array) -> str:
658            first_arg = seq_get(expression.expressions, 0)
659            if isinstance(first_arg, exp.Subqueryable):
660                return f"ARRAY{self.wrap(self.sql(first_arg))}"
661
662            return inline_array_sql(self, expression)
663
664        def transaction_sql(self, *_) -> str:
665            return "BEGIN TRANSACTION"
666
667        def commit_sql(self, *_) -> str:
668            return "COMMIT TRANSACTION"
669
670        def rollback_sql(self, *_) -> str:
671            return "ROLLBACK TRANSACTION"
672
673        def in_unnest_op(self, expression: exp.Unnest) -> str:
674            return self.sql(expression)
675
676        def except_op(self, expression: exp.Except) -> str:
677            if not expression.args.get("distinct", False):
678                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
679            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
680
681        def intersect_op(self, expression: exp.Intersect) -> str:
682            if not expression.args.get("distinct", False):
683                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
684            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
685
686        def with_properties(self, properties: exp.Properties) -> str:
687            return self.properties(properties, prefix=self.seg("OPTIONS"))
688
689        def version_sql(self, expression: exp.Version) -> str:
690            if expression.name == "TIMESTAMP":
691                expression = expression.copy()
692                expression.set("this", "SYSTEM_TIME")
693            return super().version_sql(expression)
UNNEST_COLUMN_ONLY = True
SUPPORTS_USER_DEFINED_TYPES = False
SUPPORTS_SEMI_ANTI_JOIN = False
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
213    @classmethod
214    def normalize_identifier(cls, expression: E) -> E:
215        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
216        # The following check is essentially a heuristic to detect tables based on whether or
217        # not they're qualified.
218        if isinstance(expression, exp.Identifier):
219            parent = expression.parent
220
221            while isinstance(parent, exp.Dot):
222                parent = parent.parent
223
224            if (
225                not isinstance(parent, exp.UserDefinedFunction)
226                and not (isinstance(parent, exp.Table) and parent.db)
227                and not expression.meta.get("is_table")
228            ):
229                expression.set("this", expression.this.lower())
230
231        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'BigQuery.Tokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
233    class Tokenizer(tokens.Tokenizer):
234        QUOTES = ["'", '"', '"""', "'''"]
235        COMMENTS = ["--", "#", ("/*", "*/")]
236        IDENTIFIERS = ["`"]
237        STRING_ESCAPES = ["\\"]
238
239        HEX_STRINGS = [("0x", ""), ("0X", "")]
240
241        BYTE_STRINGS = [
242            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
243        ]
244
245        RAW_STRINGS = [
246            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
247        ]
248
249        KEYWORDS = {
250            **tokens.Tokenizer.KEYWORDS,
251            "ANY TYPE": TokenType.VARIANT,
252            "BEGIN": TokenType.COMMAND,
253            "BEGIN TRANSACTION": TokenType.BEGIN,
254            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
255            "BYTES": TokenType.BINARY,
256            "DECLARE": TokenType.COMMAND,
257            "FLOAT64": TokenType.DOUBLE,
258            "INT64": TokenType.BIGINT,
259            "RECORD": TokenType.STRUCT,
260            "TIMESTAMP": TokenType.TIMESTAMPTZ,
261            "NOT DETERMINISTIC": TokenType.VOLATILE,
262            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
263        }
264        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
266    class Parser(parser.Parser):
267        PREFIXED_PIVOT_COLUMNS = True
268
269        LOG_BASE_FIRST = False
270        LOG_DEFAULTS_TO_LN = True
271
272        FUNCTIONS = {
273            **parser.Parser.FUNCTIONS,
274            "DATE": _parse_date,
275            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
276            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
277            "DATE_TRUNC": lambda args: exp.DateTrunc(
278                unit=exp.Literal.string(str(seq_get(args, 1))),
279                this=seq_get(args, 0),
280            ),
281            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
282            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
283            "DIV": binary_from_function(exp.IntDiv),
284            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
285            "MD5": exp.MD5Digest.from_arg_list,
286            "TO_HEX": _parse_to_hex,
287            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
288                [seq_get(args, 1), seq_get(args, 0)]
289            ),
290            "PARSE_TIMESTAMP": _parse_timestamp,
291            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
292            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
293                this=seq_get(args, 0),
294                expression=seq_get(args, 1),
295                position=seq_get(args, 2),
296                occurrence=seq_get(args, 3),
297                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
298            ),
299            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
300            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
301            "SPLIT": lambda args: exp.Split(
302                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1) or exp.Literal.string(","),
305            ),
306            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
307            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
308            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
309            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
310            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
311        }
312
313        FUNCTION_PARSERS = {
314            **parser.Parser.FUNCTION_PARSERS,
315            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
316        }
317        FUNCTION_PARSERS.pop("TRIM")
318
319        NO_PAREN_FUNCTIONS = {
320            **parser.Parser.NO_PAREN_FUNCTIONS,
321            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
322        }
323
324        NESTED_TYPE_TOKENS = {
325            *parser.Parser.NESTED_TYPE_TOKENS,
326            TokenType.TABLE,
327        }
328
329        ID_VAR_TOKENS = {
330            *parser.Parser.ID_VAR_TOKENS,
331            TokenType.VALUES,
332        }
333
334        PROPERTY_PARSERS = {
335            **parser.Parser.PROPERTY_PARSERS,
336            "NOT DETERMINISTIC": lambda self: self.expression(
337                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
338            ),
339            "OPTIONS": lambda self: self._parse_with_property(),
340        }
341
342        CONSTRAINT_PARSERS = {
343            **parser.Parser.CONSTRAINT_PARSERS,
344            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
345        }
346
347        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
348        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
349
350        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
351
352        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
353            this = super()._parse_table_part(schema=schema) or self._parse_number()
354
355            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
356            if isinstance(this, exp.Identifier):
357                table_name = this.name
358                while self._match(TokenType.DASH, advance=False) and self._next:
359                    self._advance(2)
360                    table_name += f"-{self._prev.text}"
361
362                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
363            elif isinstance(this, exp.Literal):
364                table_name = this.name
365
366                if (
367                    self._curr
368                    and self._prev.end == self._curr.start - 1
369                    and self._parse_var(any_token=True)
370                ):
371                    table_name += self._prev.text
372
373                this = exp.Identifier(this=table_name, quoted=True)
374
375            return this
376
377        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
378            table = super()._parse_table_parts(schema=schema)
379            if isinstance(table.this, exp.Identifier) and "." in table.name:
380                catalog, db, this, *rest = (
381                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
382                    for x in split_num_words(table.name, ".", 3)
383                )
384
385                if rest and this:
386                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
387
388                table = exp.Table(this=this, db=db, catalog=catalog)
389
390            return table
391
392        def _parse_json_object(self) -> exp.JSONObject:
393            json_object = super()._parse_json_object()
394            array_kv_pair = seq_get(json_object.expressions, 0)
395
396            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
397            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
398            if (
399                array_kv_pair
400                and isinstance(array_kv_pair.this, exp.Array)
401                and isinstance(array_kv_pair.expression, exp.Array)
402            ):
403                keys = array_kv_pair.this.expressions
404                values = array_kv_pair.expression.expressions
405
406                json_object.set(
407                    "expressions",
408                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
409                )
410
411            return json_object

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.MAP: 'MAP'>, <TokenType.NESTED: 'NESTED'>}
ID_VAR_TOKENS = {<TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SOME: 'SOME'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ASC: 'ASC'>, <TokenType.TOP: 'TOP'>, <TokenType.FIRST: 'FIRST'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.APPLY: 'APPLY'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWS: 'ROWS'>, <TokenType.INT: 'INT'>, <TokenType.KILL: 'KILL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.UINT: 'UINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.END: 'END'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VALUES: 'VALUES'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DESC: 'DESC'>, <TokenType.TRUE: 'TRUE'>, <TokenType.MAP: 'MAP'>, <TokenType.INT256: 'INT256'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ALL: 'ALL'>, <TokenType.SET: 'SET'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.XML: 'XML'>, <TokenType.BIT: 'BIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIME: 'TIME'>, <TokenType.FILTER: 'FILTER'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ROW: 'ROW'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.CASE: 'CASE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ANY: 'ANY'>, <TokenType.IS: 'IS'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FULL: 'FULL'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.MERGE: 'MERGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.UUID: 'UUID'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.NEXT: 'NEXT'>, <TokenType.NULL: 'NULL'>, <TokenType.LOAD: 'LOAD'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UINT128: 'UINT128'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.VAR: 'VAR'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BINARY: 'BINARY'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.DATE: 'DATE'>, <TokenType.INT128: 'INT128'>, <TokenType.SEMI: 'SEMI'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
NULL_TOKENS = {<TokenType.NULL: 'NULL'>, <TokenType.UNKNOWN: 'UNKNOWN'>}
TABLE_ALIAS_TOKENS = {<TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SOME: 'SOME'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ASC: 'ASC'>, <TokenType.TOP: 'TOP'>, <TokenType.FIRST: 'FIRST'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.ROWS: 'ROWS'>, <TokenType.INT: 'INT'>, <TokenType.KILL: 'KILL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.UINT: 'UINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.END: 'END'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DESC: 'DESC'>, <TokenType.TRUE: 'TRUE'>, <TokenType.MAP: 'MAP'>, <TokenType.INT256: 'INT256'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.ALL: 'ALL'>, <TokenType.SET: 'SET'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.XML: 'XML'>, <TokenType.BIT: 'BIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIME: 'TIME'>, <TokenType.FILTER: 'FILTER'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ROW: 'ROW'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.CASE: 'CASE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ANY: 'ANY'>, <TokenType.IS: 'IS'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.MERGE: 'MERGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.UUID: 'UUID'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.NEXT: 'NEXT'>, <TokenType.NULL: 'NULL'>, <TokenType.LOAD: 'LOAD'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UINT128: 'UINT128'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.VAR: 'VAR'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BINARY: 'BINARY'>, <TokenType.INET: 'INET'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.DATE: 'DATE'>, <TokenType.INT128: 'INT128'>, <TokenType.SEMI: 'SEMI'>}
TOKENIZER_CLASS: Type[sqlglot.tokens.Tokenizer] = <class 'BigQuery.Tokenizer'>
UNNEST_COLUMN_ONLY: bool = True
SUPPORTS_USER_DEFINED_TYPES = False
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
413    class Generator(generator.Generator):
414        EXPLICIT_UNION = True
415        INTERVAL_ALLOWS_PLURAL_FORM = False
416        JOIN_HINTS = False
417        QUERY_HINTS = False
418        TABLE_HINTS = False
419        LIMIT_FETCH = "LIMIT"
420        RENAME_TABLE_WITH_DB = False
421        NVL2_SUPPORTED = False
422        UNNEST_WITH_ORDINALITY = False
423
424        TRANSFORMS = {
425            **generator.Generator.TRANSFORMS,
426            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
427            exp.ArraySize: rename_func("ARRAY_LENGTH"),
428            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
429            exp.Create: _create_sql,
430            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
431            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
432            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
433            exp.DateFromParts: rename_func("DATE"),
434            exp.DateStrToDate: datestrtodate_sql,
435            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
436            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
437            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
438            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
439            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
440            exp.GroupConcat: rename_func("STRING_AGG"),
441            exp.Hex: rename_func("TO_HEX"),
442            exp.If: if_sql(false_value="NULL"),
443            exp.ILike: no_ilike_sql,
444            exp.IntDiv: rename_func("DIV"),
445            exp.JSONFormat: rename_func("TO_JSON_STRING"),
446            exp.JSONKeyValue: json_keyvalue_comma_sql,
447            exp.Max: max_or_greatest,
448            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
449            exp.MD5Digest: rename_func("MD5"),
450            exp.Min: min_or_least,
451            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
452            exp.RegexpExtract: lambda self, e: self.func(
453                "REGEXP_EXTRACT",
454                e.this,
455                e.expression,
456                e.args.get("position"),
457                e.args.get("occurrence"),
458            ),
459            exp.RegexpReplace: regexp_replace_sql,
460            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
461            exp.ReturnsProperty: _returnsproperty_sql,
462            exp.Select: transforms.preprocess(
463                [
464                    transforms.explode_to_unnest(),
465                    _unqualify_unnest,
466                    transforms.eliminate_distinct_on,
467                    _alias_ordered_group,
468                    transforms.eliminate_semi_and_anti_joins,
469                ]
470            ),
471            exp.SHA2: lambda self, e: self.func(
472                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
473            ),
474            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
475            if e.name == "IMMUTABLE"
476            else "NOT DETERMINISTIC",
477            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
478            exp.StrToTime: lambda self, e: self.func(
479                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
480            ),
481            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
482            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
483            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
484            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
485            exp.TimeStrToTime: timestrtotime_sql,
486            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
487            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
488            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
489            exp.Unhex: rename_func("FROM_HEX"),
490            exp.Values: _derived_table_values_to_unnest,
491            exp.VariancePop: rename_func("VAR_POP"),
492        }
493
494        TYPE_MAPPING = {
495            **generator.Generator.TYPE_MAPPING,
496            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
497            exp.DataType.Type.BIGINT: "INT64",
498            exp.DataType.Type.BINARY: "BYTES",
499            exp.DataType.Type.BOOLEAN: "BOOL",
500            exp.DataType.Type.CHAR: "STRING",
501            exp.DataType.Type.DECIMAL: "NUMERIC",
502            exp.DataType.Type.DOUBLE: "FLOAT64",
503            exp.DataType.Type.FLOAT: "FLOAT64",
504            exp.DataType.Type.INT: "INT64",
505            exp.DataType.Type.NCHAR: "STRING",
506            exp.DataType.Type.NVARCHAR: "STRING",
507            exp.DataType.Type.SMALLINT: "INT64",
508            exp.DataType.Type.TEXT: "STRING",
509            exp.DataType.Type.TIMESTAMP: "DATETIME",
510            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
511            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
512            exp.DataType.Type.TINYINT: "INT64",
513            exp.DataType.Type.VARBINARY: "BYTES",
514            exp.DataType.Type.VARCHAR: "STRING",
515            exp.DataType.Type.VARIANT: "ANY TYPE",
516        }
517
518        PROPERTIES_LOCATION = {
519            **generator.Generator.PROPERTIES_LOCATION,
520            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
521            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
522        }
523
524        UNESCAPED_SEQUENCE_TABLE = str.maketrans(  # type: ignore
525            {
526                "\a": "\\a",
527                "\b": "\\b",
528                "\f": "\\f",
529                "\n": "\\n",
530                "\r": "\\r",
531                "\t": "\\t",
532                "\v": "\\v",
533            }
534        )
535
536        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
537        RESERVED_KEYWORDS = {
538            *generator.Generator.RESERVED_KEYWORDS,
539            "all",
540            "and",
541            "any",
542            "array",
543            "as",
544            "asc",
545            "assert_rows_modified",
546            "at",
547            "between",
548            "by",
549            "case",
550            "cast",
551            "collate",
552            "contains",
553            "create",
554            "cross",
555            "cube",
556            "current",
557            "default",
558            "define",
559            "desc",
560            "distinct",
561            "else",
562            "end",
563            "enum",
564            "escape",
565            "except",
566            "exclude",
567            "exists",
568            "extract",
569            "false",
570            "fetch",
571            "following",
572            "for",
573            "from",
574            "full",
575            "group",
576            "grouping",
577            "groups",
578            "hash",
579            "having",
580            "if",
581            "ignore",
582            "in",
583            "inner",
584            "intersect",
585            "interval",
586            "into",
587            "is",
588            "join",
589            "lateral",
590            "left",
591            "like",
592            "limit",
593            "lookup",
594            "merge",
595            "natural",
596            "new",
597            "no",
598            "not",
599            "null",
600            "nulls",
601            "of",
602            "on",
603            "or",
604            "order",
605            "outer",
606            "over",
607            "partition",
608            "preceding",
609            "proto",
610            "qualify",
611            "range",
612            "recursive",
613            "respect",
614            "right",
615            "rollup",
616            "rows",
617            "select",
618            "set",
619            "some",
620            "struct",
621            "tablesample",
622            "then",
623            "to",
624            "treat",
625            "true",
626            "unbounded",
627            "union",
628            "unnest",
629            "using",
630            "when",
631            "where",
632            "window",
633            "with",
634            "within",
635        }
636
637        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
638            parent = expression.parent
639
640            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
641            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
642            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
643                return self.func(
644                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
645                )
646
647            return super().attimezone_sql(expression)
648
649        def trycast_sql(self, expression: exp.TryCast) -> str:
650            return self.cast_sql(expression, safe_prefix="SAFE_")
651
652        def cte_sql(self, expression: exp.CTE) -> str:
653            if expression.alias_column_names:
654                self.unsupported("Column names in CTE definition are not supported.")
655            return super().cte_sql(expression)
656
657        def array_sql(self, expression: exp.Array) -> str:
658            first_arg = seq_get(expression.expressions, 0)
659            if isinstance(first_arg, exp.Subqueryable):
660                return f"ARRAY{self.wrap(self.sql(first_arg))}"
661
662            return inline_array_sql(self, expression)
663
664        def transaction_sql(self, *_) -> str:
665            return "BEGIN TRANSACTION"
666
667        def commit_sql(self, *_) -> str:
668            return "COMMIT TRANSACTION"
669
670        def rollback_sql(self, *_) -> str:
671            return "ROLLBACK TRANSACTION"
672
673        def in_unnest_op(self, expression: exp.Unnest) -> str:
674            return self.sql(expression)
675
676        def except_op(self, expression: exp.Except) -> str:
677            if not expression.args.get("distinct", False):
678                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
679            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
680
681        def intersect_op(self, expression: exp.Intersect) -> str:
682            if not expression.args.get("distinct", False):
683                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
684            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
685
686        def with_properties(self, properties: exp.Properties) -> str:
687            return self.properties(properties, prefix=self.seg("OPTIONS"))
688
689        def version_sql(self, expression: exp.Version) -> str:
690            if expression.name == "TIMESTAMP":
691                expression = expression.copy()
692                expression.set("this", "SYSTEM_TIME")
693            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
NVL2_SUPPORTED = False
UNNEST_WITH_ORDINALITY = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
UNESCAPED_SEQUENCE_TABLE = {7: '\\a', 8: '\\b', 12: '\\f', 10: '\\n', 13: '\\r', 9: '\\t', 11: '\\v'}
RESERVED_KEYWORDS = {'having', 'when', 'range', 'grouping', 'treat', 'limit', 'of', 'window', 'ignore', 'is', 'new', 'right', 'select', 'escape', 'true', 'recursive', 'false', 'exists', 'define', 'from', 'if', 'end', 'partition', 'intersect', 'unnest', 'set', 'respect', 'for', 'join', 'lookup', 'over', 'cast', 'natural', 'or', 'inner', 'collate', 'at', 'and', 'fetch', 'current', 'by', 'lateral', 'between', 'desc', 'exclude', 'proto', 'assert_rows_modified', 'all', 'extract', 'contains', 'with', 'hash', 'except', 'within', 'null', 'on', 'following', 'order', 'using', 'as', 'into', 'some', 'asc', 'merge', 'group', 'tablesample', 'create', 'where', 'full', 'then', 'no', 'rows', 'enum', 'default', 'qualify', 'else', 'preceding', 'left', 'array', 'like', 'union', 'outer', 'cube', 'unbounded', 'in', 'groups', 'cross', 'nulls', 'interval', 'any', 'case', 'to', 'not', 'struct', 'distinct', 'rollup'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
637        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
638            parent = expression.parent
639
640            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
641            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
642            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
643                return self.func(
644                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
645                )
646
647            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
649        def trycast_sql(self, expression: exp.TryCast) -> str:
650            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
652        def cte_sql(self, expression: exp.CTE) -> str:
653            if expression.alias_column_names:
654                self.unsupported("Column names in CTE definition are not supported.")
655            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
657        def array_sql(self, expression: exp.Array) -> str:
658            first_arg = seq_get(expression.expressions, 0)
659            if isinstance(first_arg, exp.Subqueryable):
660                return f"ARRAY{self.wrap(self.sql(first_arg))}"
661
662            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
664        def transaction_sql(self, *_) -> str:
665            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
667        def commit_sql(self, *_) -> str:
668            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
670        def rollback_sql(self, *_) -> str:
671            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
673        def in_unnest_op(self, expression: exp.Unnest) -> str:
674            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
676        def except_op(self, expression: exp.Except) -> str:
677            if not expression.args.get("distinct", False):
678                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
679            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
681        def intersect_op(self, expression: exp.Intersect) -> str:
682            if not expression.args.get("distinct", False):
683                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
684            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
686        def with_properties(self, properties: exp.Properties) -> str:
687            return self.properties(properties, prefix=self.seg("OPTIONS"))
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
689        def version_sql(self, expression: exp.Version) -> str:
690            if expression.name == "TIMESTAMP":
691                expression = expression.copy()
692                expression.set("this", "SYSTEM_TIME")
693            return super().version_sql(expression)
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
269    @classmethod
270    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
271        """Checks if text can be identified given an identify option.
272
273        Args:
274            text: The text to check.
275            identify:
276                "always" or `True`: Always returns true.
277                "safe": True if the identifier is case-insensitive.
278
279        Returns:
280            Whether or not the given text can be identified.
281        """
282        if identify is True or identify == "always":
283            return True
284
285        if identify == "safe":
286            return not cls.case_sensitive(text)
287
288        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_ADD_COLUMN_KEYWORD
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
SUPPORTS_PARAMETERS
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql