Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_safe_divide_sql,
  26    no_timestamp_sql,
  27    timestampdiff_sql,
  28)
  29from sqlglot.generator import unsupported_args
  30from sqlglot.helper import flatten, is_float, is_int, seq_get
  31from sqlglot.tokens import TokenType
  32
  33if t.TYPE_CHECKING:
  34    from sqlglot._typing import E
  35
  36
  37# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  38def _build_datetime(
  39    name: str, kind: exp.DataType.Type, safe: bool = False
  40) -> t.Callable[[t.List], exp.Func]:
  41    def _builder(args: t.List) -> exp.Func:
  42        value = seq_get(args, 0)
  43        int_value = value is not None and is_int(value.name)
  44
  45        if isinstance(value, exp.Literal):
  46            # Converts calls like `TO_TIME('01:02:03')` into casts
  47            if len(args) == 1 and value.is_string and not int_value:
  48                return (
  49                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  50                    if safe
  51                    else exp.cast(value, kind)
  52                )
  53
  54            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  55            # cases so we can transpile them, since they're relatively common
  56            if kind == exp.DataType.Type.TIMESTAMP:
  57                if int_value and not safe:
  58                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  59                    # it's not easily transpilable
  60                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  61                if not is_float(value.this):
  62                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  63                    expr.set("safe", safe)
  64                    return expr
  65
  66        if kind == exp.DataType.Type.DATE and not int_value:
  67            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  68            formatted_exp.set("safe", safe)
  69            return formatted_exp
  70
  71        return exp.Anonymous(this=name, expressions=args)
  72
  73    return _builder
  74
  75
  76def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  77    expression = parser.build_var_map(args)
  78
  79    if isinstance(expression, exp.StarMap):
  80        return expression
  81
  82    return exp.Struct(
  83        expressions=[
  84            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  85        ]
  86    )
  87
  88
  89def _build_datediff(args: t.List) -> exp.DateDiff:
  90    return exp.DateDiff(
  91        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  92    )
  93
  94
  95def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  96    def _builder(args: t.List) -> E:
  97        return expr_type(
  98            this=seq_get(args, 2),
  99            expression=seq_get(args, 1),
 100            unit=map_date_part(seq_get(args, 0)),
 101        )
 102
 103    return _builder
 104
 105
 106# https://docs.snowflake.com/en/sql-reference/functions/div0
 107def _build_if_from_div0(args: t.List) -> exp.If:
 108    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
 109        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
 110    )
 111    true = exp.Literal.number(0)
 112    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 113    return exp.If(this=cond, true=true, false=false)
 114
 115
 116# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 117def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 118    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 119    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 120
 121
 122# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 123def _build_if_from_nullifzero(args: t.List) -> exp.If:
 124    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 125    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 126
 127
 128def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 129    flag = expression.text("flag")
 130
 131    if "i" not in flag:
 132        flag += "i"
 133
 134    return self.func(
 135        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 136    )
 137
 138
 139def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 140    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 141
 142    if not regexp_replace.args.get("replacement"):
 143        regexp_replace.set("replacement", exp.Literal.string(""))
 144
 145    return regexp_replace
 146
 147
 148def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 149    def _parse(self: Snowflake.Parser) -> exp.Show:
 150        return self._parse_show_snowflake(*args, **kwargs)
 151
 152    return _parse
 153
 154
 155def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 156    trunc = date_trunc_to_time(args)
 157    trunc.set("unit", map_date_part(trunc.args["unit"]))
 158    return trunc
 159
 160
 161def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 162    """
 163    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 164    so we need to unqualify them.
 165
 166    Example:
 167        >>> from sqlglot import parse_one
 168        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 169        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 170        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 171    """
 172    if isinstance(expression, exp.Pivot) and expression.unpivot:
 173        expression = transforms.unqualify_columns(expression)
 174
 175    return expression
 176
 177
 178def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 179    assert isinstance(expression, exp.Create)
 180
 181    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 182        if expression.this in exp.DataType.NESTED_TYPES:
 183            expression.set("expressions", None)
 184        return expression
 185
 186    props = expression.args.get("properties")
 187    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 188        for schema_expression in expression.this.expressions:
 189            if isinstance(schema_expression, exp.ColumnDef):
 190                column_type = schema_expression.kind
 191                if isinstance(column_type, exp.DataType):
 192                    column_type.transform(_flatten_structured_type, copy=False)
 193
 194    return expression
 195
 196
 197def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 198    if isinstance(expression, exp.Select):
 199        for unnest in expression.find_all(exp.Unnest):
 200            if (
 201                isinstance(unnest.parent, (exp.From, exp.Join))
 202                and len(unnest.expressions) == 1
 203                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 204            ):
 205                generate_date_array = unnest.expressions[0]
 206                start = generate_date_array.args.get("start")
 207                end = generate_date_array.args.get("end")
 208                step = generate_date_array.args.get("step")
 209
 210                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 211                    continue
 212
 213                unit = step.args.get("unit")
 214
 215                unnest_alias = unnest.args.get("alias")
 216                if unnest_alias:
 217                    unnest_alias = unnest_alias.copy()
 218                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 219                else:
 220                    sequence_value_name = "value"
 221
 222                # We'll add the next sequence value to the starting date and project the result
 223                date_add = _build_date_time_add(exp.DateAdd)(
 224                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 225                ).as_(sequence_value_name)
 226
 227                # We use DATEDIFF to compute the number of sequence values needed
 228                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 229                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 230                )
 231
 232                unnest.set("expressions", [number_sequence])
 233                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 234
 235    return expression
 236
 237
 238class Snowflake(Dialect):
 239    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 240    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 241    NULL_ORDERING = "nulls_are_large"
 242    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 243    SUPPORTS_USER_DEFINED_TYPES = False
 244    SUPPORTS_SEMI_ANTI_JOIN = False
 245    PREFER_CTE_ALIAS_COLUMN = True
 246    TABLESAMPLE_SIZE_IS_PERCENT = True
 247    COPY_PARAMS_ARE_CSV = False
 248    ARRAY_AGG_INCLUDES_NULLS = None
 249
 250    TIME_MAPPING = {
 251        "YYYY": "%Y",
 252        "yyyy": "%Y",
 253        "YY": "%y",
 254        "yy": "%y",
 255        "MMMM": "%B",
 256        "mmmm": "%B",
 257        "MON": "%b",
 258        "mon": "%b",
 259        "MM": "%m",
 260        "mm": "%m",
 261        "DD": "%d",
 262        "dd": "%-d",
 263        "DY": "%a",
 264        "dy": "%w",
 265        "HH24": "%H",
 266        "hh24": "%H",
 267        "HH12": "%I",
 268        "hh12": "%I",
 269        "MI": "%M",
 270        "mi": "%M",
 271        "SS": "%S",
 272        "ss": "%S",
 273        "FF": "%f",
 274        "ff": "%f",
 275        "FF6": "%f",
 276        "ff6": "%f",
 277    }
 278
 279    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 280        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 281        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 282        if (
 283            isinstance(expression, exp.Identifier)
 284            and isinstance(expression.parent, exp.Table)
 285            and expression.name.lower() == "dual"
 286        ):
 287            return expression  # type: ignore
 288
 289        return super().quote_identifier(expression, identify=identify)
 290
 291    class Parser(parser.Parser):
 292        IDENTIFY_PIVOT_STRINGS = True
 293        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 294        COLON_IS_VARIANT_EXTRACT = True
 295
 296        ID_VAR_TOKENS = {
 297            *parser.Parser.ID_VAR_TOKENS,
 298            TokenType.MATCH_CONDITION,
 299        }
 300
 301        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 302        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 303
 304        FUNCTIONS = {
 305            **parser.Parser.FUNCTIONS,
 306            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 307            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 308            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 309                this=seq_get(args, 1), expression=seq_get(args, 0)
 310            ),
 311            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 312                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 313                start=seq_get(args, 0),
 314                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 315                step=seq_get(args, 2),
 316            ),
 317            "BITXOR": binary_from_function(exp.BitwiseXor),
 318            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 319            "BOOLXOR": binary_from_function(exp.Xor),
 320            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 321            "DATE_TRUNC": _date_trunc_to_time,
 322            "DATEADD": _build_date_time_add(exp.DateAdd),
 323            "DATEDIFF": _build_datediff,
 324            "DIV0": _build_if_from_div0,
 325            "FLATTEN": exp.Explode.from_arg_list,
 326            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 327                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 328            ),
 329            "IFF": exp.If.from_arg_list,
 330            "LAST_DAY": lambda args: exp.LastDay(
 331                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 332            ),
 333            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 334            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 335            "LISTAGG": exp.GroupConcat.from_arg_list,
 336            "NULLIFZERO": _build_if_from_nullifzero,
 337            "OBJECT_CONSTRUCT": _build_object_construct,
 338            "REGEXP_REPLACE": _build_regexp_replace,
 339            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 340                this=seq_get(args, 0),
 341                expression=seq_get(args, 1),
 342                position=seq_get(args, 2),
 343                occurrence=seq_get(args, 3),
 344                parameters=seq_get(args, 4),
 345                group=seq_get(args, 5) or exp.Literal.number(0),
 346            ),
 347            "RLIKE": exp.RegexpLike.from_arg_list,
 348            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 349            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 350            "TIMEDIFF": _build_datediff,
 351            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 352            "TIMESTAMPDIFF": _build_datediff,
 353            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 354            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 355            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 356            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 357            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 358            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 359            "TRY_TO_TIMESTAMP": _build_datetime(
 360                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 361            ),
 362            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 363            "TO_NUMBER": lambda args: exp.ToNumber(
 364                this=seq_get(args, 0),
 365                format=seq_get(args, 1),
 366                precision=seq_get(args, 2),
 367                scale=seq_get(args, 3),
 368            ),
 369            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 370            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 371            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 372            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 373            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 374            "TO_VARCHAR": exp.ToChar.from_arg_list,
 375            "ZEROIFNULL": _build_if_from_zeroifnull,
 376        }
 377
 378        FUNCTION_PARSERS = {
 379            **parser.Parser.FUNCTION_PARSERS,
 380            "DATE_PART": lambda self: self._parse_date_part(),
 381            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 382        }
 383        FUNCTION_PARSERS.pop("TRIM")
 384
 385        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 386
 387        RANGE_PARSERS = {
 388            **parser.Parser.RANGE_PARSERS,
 389            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 390            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 391        }
 392
 393        ALTER_PARSERS = {
 394            **parser.Parser.ALTER_PARSERS,
 395            "UNSET": lambda self: self.expression(
 396                exp.Set,
 397                tag=self._match_text_seq("TAG"),
 398                expressions=self._parse_csv(self._parse_id_var),
 399                unset=True,
 400            ),
 401        }
 402
 403        STATEMENT_PARSERS = {
 404            **parser.Parser.STATEMENT_PARSERS,
 405            TokenType.SHOW: lambda self: self._parse_show(),
 406        }
 407
 408        PROPERTY_PARSERS = {
 409            **parser.Parser.PROPERTY_PARSERS,
 410            "LOCATION": lambda self: self._parse_location_property(),
 411        }
 412
 413        TYPE_CONVERTERS = {
 414            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 415            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 416        }
 417
 418        SHOW_PARSERS = {
 419            "SCHEMAS": _show_parser("SCHEMAS"),
 420            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 421            "OBJECTS": _show_parser("OBJECTS"),
 422            "TERSE OBJECTS": _show_parser("OBJECTS"),
 423            "TABLES": _show_parser("TABLES"),
 424            "TERSE TABLES": _show_parser("TABLES"),
 425            "VIEWS": _show_parser("VIEWS"),
 426            "TERSE VIEWS": _show_parser("VIEWS"),
 427            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 428            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 429            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 430            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 431            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 432            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 433            "SEQUENCES": _show_parser("SEQUENCES"),
 434            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 435            "COLUMNS": _show_parser("COLUMNS"),
 436            "USERS": _show_parser("USERS"),
 437            "TERSE USERS": _show_parser("USERS"),
 438        }
 439
 440        CONSTRAINT_PARSERS = {
 441            **parser.Parser.CONSTRAINT_PARSERS,
 442            "WITH": lambda self: self._parse_with_constraint(),
 443            "MASKING": lambda self: self._parse_with_constraint(),
 444            "PROJECTION": lambda self: self._parse_with_constraint(),
 445            "TAG": lambda self: self._parse_with_constraint(),
 446        }
 447
 448        STAGED_FILE_SINGLE_TOKENS = {
 449            TokenType.DOT,
 450            TokenType.MOD,
 451            TokenType.SLASH,
 452        }
 453
 454        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 455
 456        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 457
 458        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 459
 460        LAMBDAS = {
 461            **parser.Parser.LAMBDAS,
 462            TokenType.ARROW: lambda self, expressions: self.expression(
 463                exp.Lambda,
 464                this=self._replace_lambda(
 465                    self._parse_assignment(),
 466                    expressions,
 467                ),
 468                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 469            ),
 470        }
 471
 472        def _negate_range(
 473            self, this: t.Optional[exp.Expression] = None
 474        ) -> t.Optional[exp.Expression]:
 475            if not this:
 476                return this
 477
 478            query = this.args.get("query")
 479            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 480                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 481                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 482                # which can produce different results (most likely a SnowFlake bug).
 483                #
 484                # https://docs.snowflake.com/en/sql-reference/functions/in
 485                # Context: https://github.com/tobymao/sqlglot/issues/3890
 486                return self.expression(
 487                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 488                )
 489
 490            return self.expression(exp.Not, this=this)
 491
 492        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 493            if self._prev.token_type != TokenType.WITH:
 494                self._retreat(self._index - 1)
 495
 496            if self._match_text_seq("MASKING", "POLICY"):
 497                policy = self._parse_column()
 498                return self.expression(
 499                    exp.MaskingPolicyColumnConstraint,
 500                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 501                    expressions=self._match(TokenType.USING)
 502                    and self._parse_wrapped_csv(self._parse_id_var),
 503                )
 504            if self._match_text_seq("PROJECTION", "POLICY"):
 505                policy = self._parse_column()
 506                return self.expression(
 507                    exp.ProjectionPolicyColumnConstraint,
 508                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 509                )
 510            if self._match(TokenType.TAG):
 511                return self.expression(
 512                    exp.TagColumnConstraint,
 513                    expressions=self._parse_wrapped_csv(self._parse_property),
 514                )
 515
 516            return None
 517
 518        def _parse_create(self) -> exp.Create | exp.Command:
 519            expression = super()._parse_create()
 520            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 521                # Replace the Table node with the enclosed Identifier
 522                expression.this.replace(expression.this.this)
 523
 524            return expression
 525
 526        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 527        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 528        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 529            this = self._parse_var() or self._parse_type()
 530
 531            if not this:
 532                return None
 533
 534            self._match(TokenType.COMMA)
 535            expression = self._parse_bitwise()
 536            this = map_date_part(this)
 537            name = this.name.upper()
 538
 539            if name.startswith("EPOCH"):
 540                if name == "EPOCH_MILLISECOND":
 541                    scale = 10**3
 542                elif name == "EPOCH_MICROSECOND":
 543                    scale = 10**6
 544                elif name == "EPOCH_NANOSECOND":
 545                    scale = 10**9
 546                else:
 547                    scale = None
 548
 549                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 550                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 551
 552                if scale:
 553                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 554
 555                return to_unix
 556
 557            return self.expression(exp.Extract, this=this, expression=expression)
 558
 559        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 560            if is_map:
 561                # Keys are strings in Snowflake's objects, see also:
 562                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 563                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 564                return self._parse_slice(self._parse_string())
 565
 566            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 567
 568        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 569            lateral = super()._parse_lateral()
 570            if not lateral:
 571                return lateral
 572
 573            if isinstance(lateral.this, exp.Explode):
 574                table_alias = lateral.args.get("alias")
 575                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 576                if table_alias and not table_alias.args.get("columns"):
 577                    table_alias.set("columns", columns)
 578                elif not table_alias:
 579                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 580
 581            return lateral
 582
 583        def _parse_table_parts(
 584            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 585        ) -> exp.Table:
 586            # https://docs.snowflake.com/en/user-guide/querying-stage
 587            if self._match(TokenType.STRING, advance=False):
 588                table = self._parse_string()
 589            elif self._match_text_seq("@", advance=False):
 590                table = self._parse_location_path()
 591            else:
 592                table = None
 593
 594            if table:
 595                file_format = None
 596                pattern = None
 597
 598                wrapped = self._match(TokenType.L_PAREN)
 599                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 600                    if self._match_text_seq("FILE_FORMAT", "=>"):
 601                        file_format = self._parse_string() or super()._parse_table_parts(
 602                            is_db_reference=is_db_reference
 603                        )
 604                    elif self._match_text_seq("PATTERN", "=>"):
 605                        pattern = self._parse_string()
 606                    else:
 607                        break
 608
 609                    self._match(TokenType.COMMA)
 610
 611                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 612            else:
 613                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 614
 615            return table
 616
 617        def _parse_id_var(
 618            self,
 619            any_token: bool = True,
 620            tokens: t.Optional[t.Collection[TokenType]] = None,
 621        ) -> t.Optional[exp.Expression]:
 622            if self._match_text_seq("IDENTIFIER", "("):
 623                identifier = (
 624                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 625                    or self._parse_string()
 626                )
 627                self._match_r_paren()
 628                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 629
 630            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 631
 632        def _parse_show_snowflake(self, this: str) -> exp.Show:
 633            scope = None
 634            scope_kind = None
 635
 636            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 637            # which is syntactically valid but has no effect on the output
 638            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 639
 640            history = self._match_text_seq("HISTORY")
 641
 642            like = self._parse_string() if self._match(TokenType.LIKE) else None
 643
 644            if self._match(TokenType.IN):
 645                if self._match_text_seq("ACCOUNT"):
 646                    scope_kind = "ACCOUNT"
 647                elif self._match_set(self.DB_CREATABLES):
 648                    scope_kind = self._prev.text.upper()
 649                    if self._curr:
 650                        scope = self._parse_table_parts()
 651                elif self._curr:
 652                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 653                    scope = self._parse_table_parts()
 654
 655            return self.expression(
 656                exp.Show,
 657                **{
 658                    "terse": terse,
 659                    "this": this,
 660                    "history": history,
 661                    "like": like,
 662                    "scope": scope,
 663                    "scope_kind": scope_kind,
 664                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 665                    "limit": self._parse_limit(),
 666                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 667                },
 668            )
 669
 670        def _parse_location_property(self) -> exp.LocationProperty:
 671            self._match(TokenType.EQ)
 672            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 673
 674        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 675            # Parse either a subquery or a staged file
 676            return (
 677                self._parse_select(table=True, parse_subquery_alias=False)
 678                if self._match(TokenType.L_PAREN, advance=False)
 679                else self._parse_table_parts()
 680            )
 681
 682        def _parse_location_path(self) -> exp.Var:
 683            parts = [self._advance_any(ignore_reserved=True)]
 684
 685            # We avoid consuming a comma token because external tables like @foo and @bar
 686            # can be joined in a query with a comma separator, as well as closing paren
 687            # in case of subqueries
 688            while self._is_connected() and not self._match_set(
 689                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 690            ):
 691                parts.append(self._advance_any(ignore_reserved=True))
 692
 693            return exp.var("".join(part.text for part in parts if part))
 694
 695        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 696            this = super()._parse_lambda_arg()
 697
 698            if not this:
 699                return this
 700
 701            typ = self._parse_types()
 702
 703            if typ:
 704                return self.expression(exp.Cast, this=this, to=typ)
 705
 706            return this
 707
 708    class Tokenizer(tokens.Tokenizer):
 709        STRING_ESCAPES = ["\\", "'"]
 710        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 711        RAW_STRINGS = ["$$"]
 712        COMMENTS = ["--", "//", ("/*", "*/")]
 713        NESTED_COMMENTS = False
 714
 715        KEYWORDS = {
 716            **tokens.Tokenizer.KEYWORDS,
 717            "BYTEINT": TokenType.INT,
 718            "CHAR VARYING": TokenType.VARCHAR,
 719            "CHARACTER VARYING": TokenType.VARCHAR,
 720            "EXCLUDE": TokenType.EXCEPT,
 721            "ILIKE ANY": TokenType.ILIKE_ANY,
 722            "LIKE ANY": TokenType.LIKE_ANY,
 723            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 724            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 725            "MINUS": TokenType.EXCEPT,
 726            "NCHAR VARYING": TokenType.VARCHAR,
 727            "PUT": TokenType.COMMAND,
 728            "REMOVE": TokenType.COMMAND,
 729            "RM": TokenType.COMMAND,
 730            "SAMPLE": TokenType.TABLE_SAMPLE,
 731            "SQL_DOUBLE": TokenType.DOUBLE,
 732            "SQL_VARCHAR": TokenType.VARCHAR,
 733            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 734            "TAG": TokenType.TAG,
 735            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 736            "TOP": TokenType.TOP,
 737            "WAREHOUSE": TokenType.WAREHOUSE,
 738            "STREAMLIT": TokenType.STREAMLIT,
 739        }
 740        KEYWORDS.pop("/*+")
 741
 742        SINGLE_TOKENS = {
 743            **tokens.Tokenizer.SINGLE_TOKENS,
 744            "$": TokenType.PARAMETER,
 745        }
 746
 747        VAR_SINGLE_TOKENS = {"$"}
 748
 749        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 750
 751    class Generator(generator.Generator):
 752        PARAMETER_TOKEN = "$"
 753        MATCHED_BY_SOURCE = False
 754        SINGLE_STRING_INTERVAL = True
 755        JOIN_HINTS = False
 756        TABLE_HINTS = False
 757        QUERY_HINTS = False
 758        AGGREGATE_FILTER_SUPPORTED = False
 759        SUPPORTS_TABLE_COPY = False
 760        COLLATE_IS_FUNC = True
 761        LIMIT_ONLY_LITERALS = True
 762        JSON_KEY_VALUE_PAIR_SEP = ","
 763        INSERT_OVERWRITE = " OVERWRITE INTO"
 764        STRUCT_DELIMITER = ("(", ")")
 765        COPY_PARAMS_ARE_WRAPPED = False
 766        COPY_PARAMS_EQ_REQUIRED = True
 767        STAR_EXCEPT = "EXCLUDE"
 768        SUPPORTS_EXPLODING_PROJECTIONS = False
 769        ARRAY_CONCAT_IS_VAR_LEN = False
 770        SUPPORTS_CONVERT_TIMEZONE = True
 771        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 772        SUPPORTS_MEDIAN = True
 773
 774        TRANSFORMS = {
 775            **generator.Generator.TRANSFORMS,
 776            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 777            exp.ArgMax: rename_func("MAX_BY"),
 778            exp.ArgMin: rename_func("MIN_BY"),
 779            exp.Array: inline_array_sql,
 780            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 781            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 782            exp.AtTimeZone: lambda self, e: self.func(
 783                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 784            ),
 785            exp.BitwiseXor: rename_func("BITXOR"),
 786            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 787            exp.DateAdd: date_delta_sql("DATEADD"),
 788            exp.DateDiff: date_delta_sql("DATEDIFF"),
 789            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 790            exp.DatetimeDiff: timestampdiff_sql,
 791            exp.DateStrToDate: datestrtodate_sql,
 792            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 793            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 794            exp.DayOfYear: rename_func("DAYOFYEAR"),
 795            exp.Explode: rename_func("FLATTEN"),
 796            exp.Extract: rename_func("DATE_PART"),
 797            exp.FromTimeZone: lambda self, e: self.func(
 798                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 799            ),
 800            exp.GenerateSeries: lambda self, e: self.func(
 801                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 802            ),
 803            exp.GroupConcat: rename_func("LISTAGG"),
 804            exp.If: if_sql(name="IFF", false_value="NULL"),
 805            exp.JSONExtractScalar: lambda self, e: self.func(
 806                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 807            ),
 808            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 809            exp.JSONPathRoot: lambda *_: "",
 810            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 811            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 812            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 813            exp.Max: max_or_greatest,
 814            exp.Min: min_or_least,
 815            exp.ParseJSON: lambda self, e: self.func(
 816                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 817            ),
 818            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 819            exp.PercentileCont: transforms.preprocess(
 820                [transforms.add_within_group_for_percentiles]
 821            ),
 822            exp.PercentileDisc: transforms.preprocess(
 823                [transforms.add_within_group_for_percentiles]
 824            ),
 825            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 826            exp.RegexpILike: _regexpilike_sql,
 827            exp.Rand: rename_func("RANDOM"),
 828            exp.Select: transforms.preprocess(
 829                [
 830                    transforms.eliminate_distinct_on,
 831                    transforms.explode_to_unnest(),
 832                    transforms.eliminate_semi_and_anti_joins,
 833                    _unnest_generate_date_array,
 834                ]
 835            ),
 836            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 837            exp.SHA: rename_func("SHA1"),
 838            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 839            exp.StartsWith: rename_func("STARTSWITH"),
 840            exp.StrPosition: lambda self, e: self.func(
 841                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 842            ),
 843            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 844            exp.Stuff: rename_func("INSERT"),
 845            exp.TimeAdd: date_delta_sql("TIMEADD"),
 846            exp.Timestamp: no_timestamp_sql,
 847            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 848            exp.TimestampDiff: lambda self, e: self.func(
 849                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 850            ),
 851            exp.TimestampTrunc: timestamptrunc_sql(),
 852            exp.TimeStrToTime: timestrtotime_sql,
 853            exp.TimeToStr: lambda self, e: self.func(
 854                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 855            ),
 856            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 857            exp.ToArray: rename_func("TO_ARRAY"),
 858            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 859            exp.ToDouble: rename_func("TO_DOUBLE"),
 860            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 861            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 862            exp.TsOrDsToDate: lambda self, e: self.func(
 863                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 864            ),
 865            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 866            exp.Uuid: rename_func("UUID_STRING"),
 867            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 868            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 869            exp.Xor: rename_func("BOOLXOR"),
 870        }
 871
 872        SUPPORTED_JSON_PATH_PARTS = {
 873            exp.JSONPathKey,
 874            exp.JSONPathRoot,
 875            exp.JSONPathSubscript,
 876        }
 877
 878        TYPE_MAPPING = {
 879            **generator.Generator.TYPE_MAPPING,
 880            exp.DataType.Type.NESTED: "OBJECT",
 881            exp.DataType.Type.STRUCT: "OBJECT",
 882        }
 883
 884        PROPERTIES_LOCATION = {
 885            **generator.Generator.PROPERTIES_LOCATION,
 886            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 887            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 888        }
 889
 890        UNSUPPORTED_VALUES_EXPRESSIONS = {
 891            exp.Map,
 892            exp.StarMap,
 893            exp.Struct,
 894            exp.VarMap,
 895        }
 896
 897        def with_properties(self, properties: exp.Properties) -> str:
 898            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 899
 900        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 901            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 902                values_as_table = False
 903
 904            return super().values_sql(expression, values_as_table=values_as_table)
 905
 906        def datatype_sql(self, expression: exp.DataType) -> str:
 907            expressions = expression.expressions
 908            if (
 909                expressions
 910                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 911                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 912            ):
 913                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 914                return "OBJECT"
 915
 916            return super().datatype_sql(expression)
 917
 918        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 919            return self.func(
 920                "TO_NUMBER",
 921                expression.this,
 922                expression.args.get("format"),
 923                expression.args.get("precision"),
 924                expression.args.get("scale"),
 925            )
 926
 927        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 928            milli = expression.args.get("milli")
 929            if milli is not None:
 930                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 931                expression.set("nano", milli_to_nano)
 932
 933            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 934
 935        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 936            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 937                return self.func("TO_GEOGRAPHY", expression.this)
 938            if expression.is_type(exp.DataType.Type.GEOMETRY):
 939                return self.func("TO_GEOMETRY", expression.this)
 940
 941            return super().cast_sql(expression, safe_prefix=safe_prefix)
 942
 943        def trycast_sql(self, expression: exp.TryCast) -> str:
 944            value = expression.this
 945
 946            if value.type is None:
 947                from sqlglot.optimizer.annotate_types import annotate_types
 948
 949                value = annotate_types(value)
 950
 951            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 952                return super().trycast_sql(expression)
 953
 954            # TRY_CAST only works for string values in Snowflake
 955            return self.cast_sql(expression)
 956
 957        def log_sql(self, expression: exp.Log) -> str:
 958            if not expression.expression:
 959                return self.func("LN", expression.this)
 960
 961            return super().log_sql(expression)
 962
 963        def unnest_sql(self, expression: exp.Unnest) -> str:
 964            unnest_alias = expression.args.get("alias")
 965            offset = expression.args.get("offset")
 966
 967            columns = [
 968                exp.to_identifier("seq"),
 969                exp.to_identifier("key"),
 970                exp.to_identifier("path"),
 971                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 972                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 973                or exp.to_identifier("value"),
 974                exp.to_identifier("this"),
 975            ]
 976
 977            if unnest_alias:
 978                unnest_alias.set("columns", columns)
 979            else:
 980                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 981
 982            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 983            alias = self.sql(unnest_alias)
 984            alias = f" AS {alias}" if alias else ""
 985            return f"{explode}{alias}"
 986
 987        def show_sql(self, expression: exp.Show) -> str:
 988            terse = "TERSE " if expression.args.get("terse") else ""
 989            history = " HISTORY" if expression.args.get("history") else ""
 990            like = self.sql(expression, "like")
 991            like = f" LIKE {like}" if like else ""
 992
 993            scope = self.sql(expression, "scope")
 994            scope = f" {scope}" if scope else ""
 995
 996            scope_kind = self.sql(expression, "scope_kind")
 997            if scope_kind:
 998                scope_kind = f" IN {scope_kind}"
 999
1000            starts_with = self.sql(expression, "starts_with")
1001            if starts_with:
1002                starts_with = f" STARTS WITH {starts_with}"
1003
1004            limit = self.sql(expression, "limit")
1005
1006            from_ = self.sql(expression, "from")
1007            if from_:
1008                from_ = f" FROM {from_}"
1009
1010            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1011
1012        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1013            # Other dialects don't support all of the following parameters, so we need to
1014            # generate default values as necessary to ensure the transpilation is correct
1015            group = expression.args.get("group")
1016
1017            # To avoid generating all these default values, we set group to None if
1018            # it's 0 (also default value) which doesn't trigger the following chain
1019            if group and group.name == "0":
1020                group = None
1021
1022            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1023            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1024            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1025
1026            return self.func(
1027                "REGEXP_SUBSTR",
1028                expression.this,
1029                expression.expression,
1030                position,
1031                occurrence,
1032                parameters,
1033                group,
1034            )
1035
1036        def describe_sql(self, expression: exp.Describe) -> str:
1037            # Default to table if kind is unknown
1038            kind_value = expression.args.get("kind") or "TABLE"
1039            kind = f" {kind_value}" if kind_value else ""
1040            this = f" {self.sql(expression, 'this')}"
1041            expressions = self.expressions(expression, flat=True)
1042            expressions = f" {expressions}" if expressions else ""
1043            return f"DESCRIBE{kind}{this}{expressions}"
1044
1045        def generatedasidentitycolumnconstraint_sql(
1046            self, expression: exp.GeneratedAsIdentityColumnConstraint
1047        ) -> str:
1048            start = expression.args.get("start")
1049            start = f" START {start}" if start else ""
1050            increment = expression.args.get("increment")
1051            increment = f" INCREMENT {increment}" if increment else ""
1052            return f"AUTOINCREMENT{start}{increment}"
1053
1054        def cluster_sql(self, expression: exp.Cluster) -> str:
1055            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1056
1057        def struct_sql(self, expression: exp.Struct) -> str:
1058            keys = []
1059            values = []
1060
1061            for i, e in enumerate(expression.expressions):
1062                if isinstance(e, exp.PropertyEQ):
1063                    keys.append(
1064                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1065                    )
1066                    values.append(e.expression)
1067                else:
1068                    keys.append(exp.Literal.string(f"_{i}"))
1069                    values.append(e)
1070
1071            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1072
1073        @unsupported_args("weight", "accuracy")
1074        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1075            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1076
1077        def alterset_sql(self, expression: exp.AlterSet) -> str:
1078            exprs = self.expressions(expression, flat=True)
1079            exprs = f" {exprs}" if exprs else ""
1080            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1081            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1082            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1083            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1084            tag = self.expressions(expression, key="tag", flat=True)
1085            tag = f" TAG {tag}" if tag else ""
1086
1087            return f"SET{exprs}{file_format}{copy_options}{tag}"
1088
1089        def strtotime_sql(self, expression: exp.StrToTime):
1090            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1091            return self.func(
1092                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1093            )
1094
1095        def timestampsub_sql(self, expression: exp.TimestampSub):
1096            return self.sql(
1097                exp.TimestampAdd(
1098                    this=expression.this,
1099                    expression=expression.expression * -1,
1100                    unit=expression.unit,
1101                )
1102            )
1103
1104        def jsonextract_sql(self, expression: exp.JSONExtract):
1105            this = expression.this
1106
1107            # JSON strings are valid coming from other dialects such as BQ
1108            return self.func(
1109                "GET_PATH",
1110                exp.ParseJSON(this=this) if this.is_string else this,
1111                expression.expression,
1112            )
class Snowflake(sqlglot.dialects.dialect.Dialect):
 239class Snowflake(Dialect):
 240    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 241    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 242    NULL_ORDERING = "nulls_are_large"
 243    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 244    SUPPORTS_USER_DEFINED_TYPES = False
 245    SUPPORTS_SEMI_ANTI_JOIN = False
 246    PREFER_CTE_ALIAS_COLUMN = True
 247    TABLESAMPLE_SIZE_IS_PERCENT = True
 248    COPY_PARAMS_ARE_CSV = False
 249    ARRAY_AGG_INCLUDES_NULLS = None
 250
 251    TIME_MAPPING = {
 252        "YYYY": "%Y",
 253        "yyyy": "%Y",
 254        "YY": "%y",
 255        "yy": "%y",
 256        "MMMM": "%B",
 257        "mmmm": "%B",
 258        "MON": "%b",
 259        "mon": "%b",
 260        "MM": "%m",
 261        "mm": "%m",
 262        "DD": "%d",
 263        "dd": "%-d",
 264        "DY": "%a",
 265        "dy": "%w",
 266        "HH24": "%H",
 267        "hh24": "%H",
 268        "HH12": "%I",
 269        "hh12": "%I",
 270        "MI": "%M",
 271        "mi": "%M",
 272        "SS": "%S",
 273        "ss": "%S",
 274        "FF": "%f",
 275        "ff": "%f",
 276        "FF6": "%f",
 277        "ff6": "%f",
 278    }
 279
 280    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 281        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 282        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 283        if (
 284            isinstance(expression, exp.Identifier)
 285            and isinstance(expression.parent, exp.Table)
 286            and expression.name.lower() == "dual"
 287        ):
 288            return expression  # type: ignore
 289
 290        return super().quote_identifier(expression, identify=identify)
 291
 292    class Parser(parser.Parser):
 293        IDENTIFY_PIVOT_STRINGS = True
 294        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 295        COLON_IS_VARIANT_EXTRACT = True
 296
 297        ID_VAR_TOKENS = {
 298            *parser.Parser.ID_VAR_TOKENS,
 299            TokenType.MATCH_CONDITION,
 300        }
 301
 302        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 303        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 304
 305        FUNCTIONS = {
 306            **parser.Parser.FUNCTIONS,
 307            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 308            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 309            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 310                this=seq_get(args, 1), expression=seq_get(args, 0)
 311            ),
 312            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 313                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 314                start=seq_get(args, 0),
 315                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 316                step=seq_get(args, 2),
 317            ),
 318            "BITXOR": binary_from_function(exp.BitwiseXor),
 319            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 320            "BOOLXOR": binary_from_function(exp.Xor),
 321            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 322            "DATE_TRUNC": _date_trunc_to_time,
 323            "DATEADD": _build_date_time_add(exp.DateAdd),
 324            "DATEDIFF": _build_datediff,
 325            "DIV0": _build_if_from_div0,
 326            "FLATTEN": exp.Explode.from_arg_list,
 327            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 328                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 329            ),
 330            "IFF": exp.If.from_arg_list,
 331            "LAST_DAY": lambda args: exp.LastDay(
 332                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 333            ),
 334            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 335            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 336            "LISTAGG": exp.GroupConcat.from_arg_list,
 337            "NULLIFZERO": _build_if_from_nullifzero,
 338            "OBJECT_CONSTRUCT": _build_object_construct,
 339            "REGEXP_REPLACE": _build_regexp_replace,
 340            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 341                this=seq_get(args, 0),
 342                expression=seq_get(args, 1),
 343                position=seq_get(args, 2),
 344                occurrence=seq_get(args, 3),
 345                parameters=seq_get(args, 4),
 346                group=seq_get(args, 5) or exp.Literal.number(0),
 347            ),
 348            "RLIKE": exp.RegexpLike.from_arg_list,
 349            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 350            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 351            "TIMEDIFF": _build_datediff,
 352            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 353            "TIMESTAMPDIFF": _build_datediff,
 354            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 355            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 356            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 357            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 358            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 359            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 360            "TRY_TO_TIMESTAMP": _build_datetime(
 361                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 362            ),
 363            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 364            "TO_NUMBER": lambda args: exp.ToNumber(
 365                this=seq_get(args, 0),
 366                format=seq_get(args, 1),
 367                precision=seq_get(args, 2),
 368                scale=seq_get(args, 3),
 369            ),
 370            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 371            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 372            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 373            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 374            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 375            "TO_VARCHAR": exp.ToChar.from_arg_list,
 376            "ZEROIFNULL": _build_if_from_zeroifnull,
 377        }
 378
 379        FUNCTION_PARSERS = {
 380            **parser.Parser.FUNCTION_PARSERS,
 381            "DATE_PART": lambda self: self._parse_date_part(),
 382            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 383        }
 384        FUNCTION_PARSERS.pop("TRIM")
 385
 386        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 387
 388        RANGE_PARSERS = {
 389            **parser.Parser.RANGE_PARSERS,
 390            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 391            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 392        }
 393
 394        ALTER_PARSERS = {
 395            **parser.Parser.ALTER_PARSERS,
 396            "UNSET": lambda self: self.expression(
 397                exp.Set,
 398                tag=self._match_text_seq("TAG"),
 399                expressions=self._parse_csv(self._parse_id_var),
 400                unset=True,
 401            ),
 402        }
 403
 404        STATEMENT_PARSERS = {
 405            **parser.Parser.STATEMENT_PARSERS,
 406            TokenType.SHOW: lambda self: self._parse_show(),
 407        }
 408
 409        PROPERTY_PARSERS = {
 410            **parser.Parser.PROPERTY_PARSERS,
 411            "LOCATION": lambda self: self._parse_location_property(),
 412        }
 413
 414        TYPE_CONVERTERS = {
 415            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 416            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 417        }
 418
 419        SHOW_PARSERS = {
 420            "SCHEMAS": _show_parser("SCHEMAS"),
 421            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 422            "OBJECTS": _show_parser("OBJECTS"),
 423            "TERSE OBJECTS": _show_parser("OBJECTS"),
 424            "TABLES": _show_parser("TABLES"),
 425            "TERSE TABLES": _show_parser("TABLES"),
 426            "VIEWS": _show_parser("VIEWS"),
 427            "TERSE VIEWS": _show_parser("VIEWS"),
 428            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 429            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 430            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 431            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 432            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 433            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 434            "SEQUENCES": _show_parser("SEQUENCES"),
 435            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 436            "COLUMNS": _show_parser("COLUMNS"),
 437            "USERS": _show_parser("USERS"),
 438            "TERSE USERS": _show_parser("USERS"),
 439        }
 440
 441        CONSTRAINT_PARSERS = {
 442            **parser.Parser.CONSTRAINT_PARSERS,
 443            "WITH": lambda self: self._parse_with_constraint(),
 444            "MASKING": lambda self: self._parse_with_constraint(),
 445            "PROJECTION": lambda self: self._parse_with_constraint(),
 446            "TAG": lambda self: self._parse_with_constraint(),
 447        }
 448
 449        STAGED_FILE_SINGLE_TOKENS = {
 450            TokenType.DOT,
 451            TokenType.MOD,
 452            TokenType.SLASH,
 453        }
 454
 455        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 456
 457        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 458
 459        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 460
 461        LAMBDAS = {
 462            **parser.Parser.LAMBDAS,
 463            TokenType.ARROW: lambda self, expressions: self.expression(
 464                exp.Lambda,
 465                this=self._replace_lambda(
 466                    self._parse_assignment(),
 467                    expressions,
 468                ),
 469                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 470            ),
 471        }
 472
 473        def _negate_range(
 474            self, this: t.Optional[exp.Expression] = None
 475        ) -> t.Optional[exp.Expression]:
 476            if not this:
 477                return this
 478
 479            query = this.args.get("query")
 480            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 481                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 482                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 483                # which can produce different results (most likely a SnowFlake bug).
 484                #
 485                # https://docs.snowflake.com/en/sql-reference/functions/in
 486                # Context: https://github.com/tobymao/sqlglot/issues/3890
 487                return self.expression(
 488                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 489                )
 490
 491            return self.expression(exp.Not, this=this)
 492
 493        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 494            if self._prev.token_type != TokenType.WITH:
 495                self._retreat(self._index - 1)
 496
 497            if self._match_text_seq("MASKING", "POLICY"):
 498                policy = self._parse_column()
 499                return self.expression(
 500                    exp.MaskingPolicyColumnConstraint,
 501                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 502                    expressions=self._match(TokenType.USING)
 503                    and self._parse_wrapped_csv(self._parse_id_var),
 504                )
 505            if self._match_text_seq("PROJECTION", "POLICY"):
 506                policy = self._parse_column()
 507                return self.expression(
 508                    exp.ProjectionPolicyColumnConstraint,
 509                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 510                )
 511            if self._match(TokenType.TAG):
 512                return self.expression(
 513                    exp.TagColumnConstraint,
 514                    expressions=self._parse_wrapped_csv(self._parse_property),
 515                )
 516
 517            return None
 518
 519        def _parse_create(self) -> exp.Create | exp.Command:
 520            expression = super()._parse_create()
 521            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 522                # Replace the Table node with the enclosed Identifier
 523                expression.this.replace(expression.this.this)
 524
 525            return expression
 526
 527        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 528        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 529        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 530            this = self._parse_var() or self._parse_type()
 531
 532            if not this:
 533                return None
 534
 535            self._match(TokenType.COMMA)
 536            expression = self._parse_bitwise()
 537            this = map_date_part(this)
 538            name = this.name.upper()
 539
 540            if name.startswith("EPOCH"):
 541                if name == "EPOCH_MILLISECOND":
 542                    scale = 10**3
 543                elif name == "EPOCH_MICROSECOND":
 544                    scale = 10**6
 545                elif name == "EPOCH_NANOSECOND":
 546                    scale = 10**9
 547                else:
 548                    scale = None
 549
 550                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 551                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 552
 553                if scale:
 554                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 555
 556                return to_unix
 557
 558            return self.expression(exp.Extract, this=this, expression=expression)
 559
 560        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 561            if is_map:
 562                # Keys are strings in Snowflake's objects, see also:
 563                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 564                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 565                return self._parse_slice(self._parse_string())
 566
 567            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 568
 569        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 570            lateral = super()._parse_lateral()
 571            if not lateral:
 572                return lateral
 573
 574            if isinstance(lateral.this, exp.Explode):
 575                table_alias = lateral.args.get("alias")
 576                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 577                if table_alias and not table_alias.args.get("columns"):
 578                    table_alias.set("columns", columns)
 579                elif not table_alias:
 580                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 581
 582            return lateral
 583
 584        def _parse_table_parts(
 585            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 586        ) -> exp.Table:
 587            # https://docs.snowflake.com/en/user-guide/querying-stage
 588            if self._match(TokenType.STRING, advance=False):
 589                table = self._parse_string()
 590            elif self._match_text_seq("@", advance=False):
 591                table = self._parse_location_path()
 592            else:
 593                table = None
 594
 595            if table:
 596                file_format = None
 597                pattern = None
 598
 599                wrapped = self._match(TokenType.L_PAREN)
 600                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 601                    if self._match_text_seq("FILE_FORMAT", "=>"):
 602                        file_format = self._parse_string() or super()._parse_table_parts(
 603                            is_db_reference=is_db_reference
 604                        )
 605                    elif self._match_text_seq("PATTERN", "=>"):
 606                        pattern = self._parse_string()
 607                    else:
 608                        break
 609
 610                    self._match(TokenType.COMMA)
 611
 612                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 613            else:
 614                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 615
 616            return table
 617
 618        def _parse_id_var(
 619            self,
 620            any_token: bool = True,
 621            tokens: t.Optional[t.Collection[TokenType]] = None,
 622        ) -> t.Optional[exp.Expression]:
 623            if self._match_text_seq("IDENTIFIER", "("):
 624                identifier = (
 625                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 626                    or self._parse_string()
 627                )
 628                self._match_r_paren()
 629                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 630
 631            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 632
 633        def _parse_show_snowflake(self, this: str) -> exp.Show:
 634            scope = None
 635            scope_kind = None
 636
 637            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 638            # which is syntactically valid but has no effect on the output
 639            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 640
 641            history = self._match_text_seq("HISTORY")
 642
 643            like = self._parse_string() if self._match(TokenType.LIKE) else None
 644
 645            if self._match(TokenType.IN):
 646                if self._match_text_seq("ACCOUNT"):
 647                    scope_kind = "ACCOUNT"
 648                elif self._match_set(self.DB_CREATABLES):
 649                    scope_kind = self._prev.text.upper()
 650                    if self._curr:
 651                        scope = self._parse_table_parts()
 652                elif self._curr:
 653                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 654                    scope = self._parse_table_parts()
 655
 656            return self.expression(
 657                exp.Show,
 658                **{
 659                    "terse": terse,
 660                    "this": this,
 661                    "history": history,
 662                    "like": like,
 663                    "scope": scope,
 664                    "scope_kind": scope_kind,
 665                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 666                    "limit": self._parse_limit(),
 667                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 668                },
 669            )
 670
 671        def _parse_location_property(self) -> exp.LocationProperty:
 672            self._match(TokenType.EQ)
 673            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 674
 675        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 676            # Parse either a subquery or a staged file
 677            return (
 678                self._parse_select(table=True, parse_subquery_alias=False)
 679                if self._match(TokenType.L_PAREN, advance=False)
 680                else self._parse_table_parts()
 681            )
 682
 683        def _parse_location_path(self) -> exp.Var:
 684            parts = [self._advance_any(ignore_reserved=True)]
 685
 686            # We avoid consuming a comma token because external tables like @foo and @bar
 687            # can be joined in a query with a comma separator, as well as closing paren
 688            # in case of subqueries
 689            while self._is_connected() and not self._match_set(
 690                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 691            ):
 692                parts.append(self._advance_any(ignore_reserved=True))
 693
 694            return exp.var("".join(part.text for part in parts if part))
 695
 696        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 697            this = super()._parse_lambda_arg()
 698
 699            if not this:
 700                return this
 701
 702            typ = self._parse_types()
 703
 704            if typ:
 705                return self.expression(exp.Cast, this=this, to=typ)
 706
 707            return this
 708
 709    class Tokenizer(tokens.Tokenizer):
 710        STRING_ESCAPES = ["\\", "'"]
 711        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 712        RAW_STRINGS = ["$$"]
 713        COMMENTS = ["--", "//", ("/*", "*/")]
 714        NESTED_COMMENTS = False
 715
 716        KEYWORDS = {
 717            **tokens.Tokenizer.KEYWORDS,
 718            "BYTEINT": TokenType.INT,
 719            "CHAR VARYING": TokenType.VARCHAR,
 720            "CHARACTER VARYING": TokenType.VARCHAR,
 721            "EXCLUDE": TokenType.EXCEPT,
 722            "ILIKE ANY": TokenType.ILIKE_ANY,
 723            "LIKE ANY": TokenType.LIKE_ANY,
 724            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 725            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 726            "MINUS": TokenType.EXCEPT,
 727            "NCHAR VARYING": TokenType.VARCHAR,
 728            "PUT": TokenType.COMMAND,
 729            "REMOVE": TokenType.COMMAND,
 730            "RM": TokenType.COMMAND,
 731            "SAMPLE": TokenType.TABLE_SAMPLE,
 732            "SQL_DOUBLE": TokenType.DOUBLE,
 733            "SQL_VARCHAR": TokenType.VARCHAR,
 734            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 735            "TAG": TokenType.TAG,
 736            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 737            "TOP": TokenType.TOP,
 738            "WAREHOUSE": TokenType.WAREHOUSE,
 739            "STREAMLIT": TokenType.STREAMLIT,
 740        }
 741        KEYWORDS.pop("/*+")
 742
 743        SINGLE_TOKENS = {
 744            **tokens.Tokenizer.SINGLE_TOKENS,
 745            "$": TokenType.PARAMETER,
 746        }
 747
 748        VAR_SINGLE_TOKENS = {"$"}
 749
 750        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 751
 752    class Generator(generator.Generator):
 753        PARAMETER_TOKEN = "$"
 754        MATCHED_BY_SOURCE = False
 755        SINGLE_STRING_INTERVAL = True
 756        JOIN_HINTS = False
 757        TABLE_HINTS = False
 758        QUERY_HINTS = False
 759        AGGREGATE_FILTER_SUPPORTED = False
 760        SUPPORTS_TABLE_COPY = False
 761        COLLATE_IS_FUNC = True
 762        LIMIT_ONLY_LITERALS = True
 763        JSON_KEY_VALUE_PAIR_SEP = ","
 764        INSERT_OVERWRITE = " OVERWRITE INTO"
 765        STRUCT_DELIMITER = ("(", ")")
 766        COPY_PARAMS_ARE_WRAPPED = False
 767        COPY_PARAMS_EQ_REQUIRED = True
 768        STAR_EXCEPT = "EXCLUDE"
 769        SUPPORTS_EXPLODING_PROJECTIONS = False
 770        ARRAY_CONCAT_IS_VAR_LEN = False
 771        SUPPORTS_CONVERT_TIMEZONE = True
 772        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 773        SUPPORTS_MEDIAN = True
 774
 775        TRANSFORMS = {
 776            **generator.Generator.TRANSFORMS,
 777            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 778            exp.ArgMax: rename_func("MAX_BY"),
 779            exp.ArgMin: rename_func("MIN_BY"),
 780            exp.Array: inline_array_sql,
 781            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 782            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 783            exp.AtTimeZone: lambda self, e: self.func(
 784                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 785            ),
 786            exp.BitwiseXor: rename_func("BITXOR"),
 787            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 788            exp.DateAdd: date_delta_sql("DATEADD"),
 789            exp.DateDiff: date_delta_sql("DATEDIFF"),
 790            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 791            exp.DatetimeDiff: timestampdiff_sql,
 792            exp.DateStrToDate: datestrtodate_sql,
 793            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 794            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 795            exp.DayOfYear: rename_func("DAYOFYEAR"),
 796            exp.Explode: rename_func("FLATTEN"),
 797            exp.Extract: rename_func("DATE_PART"),
 798            exp.FromTimeZone: lambda self, e: self.func(
 799                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 800            ),
 801            exp.GenerateSeries: lambda self, e: self.func(
 802                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 803            ),
 804            exp.GroupConcat: rename_func("LISTAGG"),
 805            exp.If: if_sql(name="IFF", false_value="NULL"),
 806            exp.JSONExtractScalar: lambda self, e: self.func(
 807                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 808            ),
 809            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 810            exp.JSONPathRoot: lambda *_: "",
 811            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 812            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 813            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 814            exp.Max: max_or_greatest,
 815            exp.Min: min_or_least,
 816            exp.ParseJSON: lambda self, e: self.func(
 817                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 818            ),
 819            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 820            exp.PercentileCont: transforms.preprocess(
 821                [transforms.add_within_group_for_percentiles]
 822            ),
 823            exp.PercentileDisc: transforms.preprocess(
 824                [transforms.add_within_group_for_percentiles]
 825            ),
 826            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 827            exp.RegexpILike: _regexpilike_sql,
 828            exp.Rand: rename_func("RANDOM"),
 829            exp.Select: transforms.preprocess(
 830                [
 831                    transforms.eliminate_distinct_on,
 832                    transforms.explode_to_unnest(),
 833                    transforms.eliminate_semi_and_anti_joins,
 834                    _unnest_generate_date_array,
 835                ]
 836            ),
 837            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 838            exp.SHA: rename_func("SHA1"),
 839            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 840            exp.StartsWith: rename_func("STARTSWITH"),
 841            exp.StrPosition: lambda self, e: self.func(
 842                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 843            ),
 844            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 845            exp.Stuff: rename_func("INSERT"),
 846            exp.TimeAdd: date_delta_sql("TIMEADD"),
 847            exp.Timestamp: no_timestamp_sql,
 848            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 849            exp.TimestampDiff: lambda self, e: self.func(
 850                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 851            ),
 852            exp.TimestampTrunc: timestamptrunc_sql(),
 853            exp.TimeStrToTime: timestrtotime_sql,
 854            exp.TimeToStr: lambda self, e: self.func(
 855                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 856            ),
 857            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 858            exp.ToArray: rename_func("TO_ARRAY"),
 859            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 860            exp.ToDouble: rename_func("TO_DOUBLE"),
 861            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 862            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 863            exp.TsOrDsToDate: lambda self, e: self.func(
 864                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 865            ),
 866            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 867            exp.Uuid: rename_func("UUID_STRING"),
 868            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 869            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 870            exp.Xor: rename_func("BOOLXOR"),
 871        }
 872
 873        SUPPORTED_JSON_PATH_PARTS = {
 874            exp.JSONPathKey,
 875            exp.JSONPathRoot,
 876            exp.JSONPathSubscript,
 877        }
 878
 879        TYPE_MAPPING = {
 880            **generator.Generator.TYPE_MAPPING,
 881            exp.DataType.Type.NESTED: "OBJECT",
 882            exp.DataType.Type.STRUCT: "OBJECT",
 883        }
 884
 885        PROPERTIES_LOCATION = {
 886            **generator.Generator.PROPERTIES_LOCATION,
 887            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 888            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 889        }
 890
 891        UNSUPPORTED_VALUES_EXPRESSIONS = {
 892            exp.Map,
 893            exp.StarMap,
 894            exp.Struct,
 895            exp.VarMap,
 896        }
 897
 898        def with_properties(self, properties: exp.Properties) -> str:
 899            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 900
 901        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 902            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 903                values_as_table = False
 904
 905            return super().values_sql(expression, values_as_table=values_as_table)
 906
 907        def datatype_sql(self, expression: exp.DataType) -> str:
 908            expressions = expression.expressions
 909            if (
 910                expressions
 911                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 912                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 913            ):
 914                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 915                return "OBJECT"
 916
 917            return super().datatype_sql(expression)
 918
 919        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 920            return self.func(
 921                "TO_NUMBER",
 922                expression.this,
 923                expression.args.get("format"),
 924                expression.args.get("precision"),
 925                expression.args.get("scale"),
 926            )
 927
 928        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 929            milli = expression.args.get("milli")
 930            if milli is not None:
 931                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 932                expression.set("nano", milli_to_nano)
 933
 934            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 935
 936        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 937            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 938                return self.func("TO_GEOGRAPHY", expression.this)
 939            if expression.is_type(exp.DataType.Type.GEOMETRY):
 940                return self.func("TO_GEOMETRY", expression.this)
 941
 942            return super().cast_sql(expression, safe_prefix=safe_prefix)
 943
 944        def trycast_sql(self, expression: exp.TryCast) -> str:
 945            value = expression.this
 946
 947            if value.type is None:
 948                from sqlglot.optimizer.annotate_types import annotate_types
 949
 950                value = annotate_types(value)
 951
 952            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 953                return super().trycast_sql(expression)
 954
 955            # TRY_CAST only works for string values in Snowflake
 956            return self.cast_sql(expression)
 957
 958        def log_sql(self, expression: exp.Log) -> str:
 959            if not expression.expression:
 960                return self.func("LN", expression.this)
 961
 962            return super().log_sql(expression)
 963
 964        def unnest_sql(self, expression: exp.Unnest) -> str:
 965            unnest_alias = expression.args.get("alias")
 966            offset = expression.args.get("offset")
 967
 968            columns = [
 969                exp.to_identifier("seq"),
 970                exp.to_identifier("key"),
 971                exp.to_identifier("path"),
 972                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 973                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 974                or exp.to_identifier("value"),
 975                exp.to_identifier("this"),
 976            ]
 977
 978            if unnest_alias:
 979                unnest_alias.set("columns", columns)
 980            else:
 981                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 982
 983            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 984            alias = self.sql(unnest_alias)
 985            alias = f" AS {alias}" if alias else ""
 986            return f"{explode}{alias}"
 987
 988        def show_sql(self, expression: exp.Show) -> str:
 989            terse = "TERSE " if expression.args.get("terse") else ""
 990            history = " HISTORY" if expression.args.get("history") else ""
 991            like = self.sql(expression, "like")
 992            like = f" LIKE {like}" if like else ""
 993
 994            scope = self.sql(expression, "scope")
 995            scope = f" {scope}" if scope else ""
 996
 997            scope_kind = self.sql(expression, "scope_kind")
 998            if scope_kind:
 999                scope_kind = f" IN {scope_kind}"
1000
1001            starts_with = self.sql(expression, "starts_with")
1002            if starts_with:
1003                starts_with = f" STARTS WITH {starts_with}"
1004
1005            limit = self.sql(expression, "limit")
1006
1007            from_ = self.sql(expression, "from")
1008            if from_:
1009                from_ = f" FROM {from_}"
1010
1011            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1012
1013        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1014            # Other dialects don't support all of the following parameters, so we need to
1015            # generate default values as necessary to ensure the transpilation is correct
1016            group = expression.args.get("group")
1017
1018            # To avoid generating all these default values, we set group to None if
1019            # it's 0 (also default value) which doesn't trigger the following chain
1020            if group and group.name == "0":
1021                group = None
1022
1023            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1024            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1025            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1026
1027            return self.func(
1028                "REGEXP_SUBSTR",
1029                expression.this,
1030                expression.expression,
1031                position,
1032                occurrence,
1033                parameters,
1034                group,
1035            )
1036
1037        def describe_sql(self, expression: exp.Describe) -> str:
1038            # Default to table if kind is unknown
1039            kind_value = expression.args.get("kind") or "TABLE"
1040            kind = f" {kind_value}" if kind_value else ""
1041            this = f" {self.sql(expression, 'this')}"
1042            expressions = self.expressions(expression, flat=True)
1043            expressions = f" {expressions}" if expressions else ""
1044            return f"DESCRIBE{kind}{this}{expressions}"
1045
1046        def generatedasidentitycolumnconstraint_sql(
1047            self, expression: exp.GeneratedAsIdentityColumnConstraint
1048        ) -> str:
1049            start = expression.args.get("start")
1050            start = f" START {start}" if start else ""
1051            increment = expression.args.get("increment")
1052            increment = f" INCREMENT {increment}" if increment else ""
1053            return f"AUTOINCREMENT{start}{increment}"
1054
1055        def cluster_sql(self, expression: exp.Cluster) -> str:
1056            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1057
1058        def struct_sql(self, expression: exp.Struct) -> str:
1059            keys = []
1060            values = []
1061
1062            for i, e in enumerate(expression.expressions):
1063                if isinstance(e, exp.PropertyEQ):
1064                    keys.append(
1065                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1066                    )
1067                    values.append(e.expression)
1068                else:
1069                    keys.append(exp.Literal.string(f"_{i}"))
1070                    values.append(e)
1071
1072            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1073
1074        @unsupported_args("weight", "accuracy")
1075        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1076            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1077
1078        def alterset_sql(self, expression: exp.AlterSet) -> str:
1079            exprs = self.expressions(expression, flat=True)
1080            exprs = f" {exprs}" if exprs else ""
1081            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1082            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1083            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1084            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1085            tag = self.expressions(expression, key="tag", flat=True)
1086            tag = f" TAG {tag}" if tag else ""
1087
1088            return f"SET{exprs}{file_format}{copy_options}{tag}"
1089
1090        def strtotime_sql(self, expression: exp.StrToTime):
1091            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1092            return self.func(
1093                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1094            )
1095
1096        def timestampsub_sql(self, expression: exp.TimestampSub):
1097            return self.sql(
1098                exp.TimestampAdd(
1099                    this=expression.this,
1100                    expression=expression.expression * -1,
1101                    unit=expression.unit,
1102                )
1103            )
1104
1105        def jsonextract_sql(self, expression: exp.JSONExtract):
1106            this = expression.this
1107
1108            # JSON strings are valid coming from other dialects such as BQ
1109            return self.func(
1110                "GET_PATH",
1111                exp.ParseJSON(this=this) if this.is_string else this,
1112                expression.expression,
1113            )
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
280    def quote_identifier(self, expression: E, identify: bool = True) -> E:
281        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
282        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
283        if (
284            isinstance(expression, exp.Identifier)
285            and isinstance(expression.parent, exp.Table)
286            and expression.name.lower() == "dual"
287        ):
288            return expression  # type: ignore
289
290        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
292    class Parser(parser.Parser):
293        IDENTIFY_PIVOT_STRINGS = True
294        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
295        COLON_IS_VARIANT_EXTRACT = True
296
297        ID_VAR_TOKENS = {
298            *parser.Parser.ID_VAR_TOKENS,
299            TokenType.MATCH_CONDITION,
300        }
301
302        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
303        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
304
305        FUNCTIONS = {
306            **parser.Parser.FUNCTIONS,
307            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
308            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
309            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
310                this=seq_get(args, 1), expression=seq_get(args, 0)
311            ),
312            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
313                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
314                start=seq_get(args, 0),
315                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
316                step=seq_get(args, 2),
317            ),
318            "BITXOR": binary_from_function(exp.BitwiseXor),
319            "BIT_XOR": binary_from_function(exp.BitwiseXor),
320            "BOOLXOR": binary_from_function(exp.Xor),
321            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
322            "DATE_TRUNC": _date_trunc_to_time,
323            "DATEADD": _build_date_time_add(exp.DateAdd),
324            "DATEDIFF": _build_datediff,
325            "DIV0": _build_if_from_div0,
326            "FLATTEN": exp.Explode.from_arg_list,
327            "GET_PATH": lambda args, dialect: exp.JSONExtract(
328                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
329            ),
330            "IFF": exp.If.from_arg_list,
331            "LAST_DAY": lambda args: exp.LastDay(
332                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
333            ),
334            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
335            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
336            "LISTAGG": exp.GroupConcat.from_arg_list,
337            "NULLIFZERO": _build_if_from_nullifzero,
338            "OBJECT_CONSTRUCT": _build_object_construct,
339            "REGEXP_REPLACE": _build_regexp_replace,
340            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
341                this=seq_get(args, 0),
342                expression=seq_get(args, 1),
343                position=seq_get(args, 2),
344                occurrence=seq_get(args, 3),
345                parameters=seq_get(args, 4),
346                group=seq_get(args, 5) or exp.Literal.number(0),
347            ),
348            "RLIKE": exp.RegexpLike.from_arg_list,
349            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
350            "TIMEADD": _build_date_time_add(exp.TimeAdd),
351            "TIMEDIFF": _build_datediff,
352            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
353            "TIMESTAMPDIFF": _build_datediff,
354            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
355            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
356            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
357            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
358            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
359            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
360            "TRY_TO_TIMESTAMP": _build_datetime(
361                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
362            ),
363            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
364            "TO_NUMBER": lambda args: exp.ToNumber(
365                this=seq_get(args, 0),
366                format=seq_get(args, 1),
367                precision=seq_get(args, 2),
368                scale=seq_get(args, 3),
369            ),
370            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
371            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
372            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
373            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
374            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
375            "TO_VARCHAR": exp.ToChar.from_arg_list,
376            "ZEROIFNULL": _build_if_from_zeroifnull,
377        }
378
379        FUNCTION_PARSERS = {
380            **parser.Parser.FUNCTION_PARSERS,
381            "DATE_PART": lambda self: self._parse_date_part(),
382            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
383        }
384        FUNCTION_PARSERS.pop("TRIM")
385
386        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
387
388        RANGE_PARSERS = {
389            **parser.Parser.RANGE_PARSERS,
390            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
391            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
392        }
393
394        ALTER_PARSERS = {
395            **parser.Parser.ALTER_PARSERS,
396            "UNSET": lambda self: self.expression(
397                exp.Set,
398                tag=self._match_text_seq("TAG"),
399                expressions=self._parse_csv(self._parse_id_var),
400                unset=True,
401            ),
402        }
403
404        STATEMENT_PARSERS = {
405            **parser.Parser.STATEMENT_PARSERS,
406            TokenType.SHOW: lambda self: self._parse_show(),
407        }
408
409        PROPERTY_PARSERS = {
410            **parser.Parser.PROPERTY_PARSERS,
411            "LOCATION": lambda self: self._parse_location_property(),
412        }
413
414        TYPE_CONVERTERS = {
415            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
416            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
417        }
418
419        SHOW_PARSERS = {
420            "SCHEMAS": _show_parser("SCHEMAS"),
421            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
422            "OBJECTS": _show_parser("OBJECTS"),
423            "TERSE OBJECTS": _show_parser("OBJECTS"),
424            "TABLES": _show_parser("TABLES"),
425            "TERSE TABLES": _show_parser("TABLES"),
426            "VIEWS": _show_parser("VIEWS"),
427            "TERSE VIEWS": _show_parser("VIEWS"),
428            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
429            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
430            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
431            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
432            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
433            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
434            "SEQUENCES": _show_parser("SEQUENCES"),
435            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
436            "COLUMNS": _show_parser("COLUMNS"),
437            "USERS": _show_parser("USERS"),
438            "TERSE USERS": _show_parser("USERS"),
439        }
440
441        CONSTRAINT_PARSERS = {
442            **parser.Parser.CONSTRAINT_PARSERS,
443            "WITH": lambda self: self._parse_with_constraint(),
444            "MASKING": lambda self: self._parse_with_constraint(),
445            "PROJECTION": lambda self: self._parse_with_constraint(),
446            "TAG": lambda self: self._parse_with_constraint(),
447        }
448
449        STAGED_FILE_SINGLE_TOKENS = {
450            TokenType.DOT,
451            TokenType.MOD,
452            TokenType.SLASH,
453        }
454
455        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
456
457        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
458
459        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
460
461        LAMBDAS = {
462            **parser.Parser.LAMBDAS,
463            TokenType.ARROW: lambda self, expressions: self.expression(
464                exp.Lambda,
465                this=self._replace_lambda(
466                    self._parse_assignment(),
467                    expressions,
468                ),
469                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
470            ),
471        }
472
473        def _negate_range(
474            self, this: t.Optional[exp.Expression] = None
475        ) -> t.Optional[exp.Expression]:
476            if not this:
477                return this
478
479            query = this.args.get("query")
480            if isinstance(this, exp.In) and isinstance(query, exp.Query):
481                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
482                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
483                # which can produce different results (most likely a SnowFlake bug).
484                #
485                # https://docs.snowflake.com/en/sql-reference/functions/in
486                # Context: https://github.com/tobymao/sqlglot/issues/3890
487                return self.expression(
488                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
489                )
490
491            return self.expression(exp.Not, this=this)
492
493        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
494            if self._prev.token_type != TokenType.WITH:
495                self._retreat(self._index - 1)
496
497            if self._match_text_seq("MASKING", "POLICY"):
498                policy = self._parse_column()
499                return self.expression(
500                    exp.MaskingPolicyColumnConstraint,
501                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
502                    expressions=self._match(TokenType.USING)
503                    and self._parse_wrapped_csv(self._parse_id_var),
504                )
505            if self._match_text_seq("PROJECTION", "POLICY"):
506                policy = self._parse_column()
507                return self.expression(
508                    exp.ProjectionPolicyColumnConstraint,
509                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
510                )
511            if self._match(TokenType.TAG):
512                return self.expression(
513                    exp.TagColumnConstraint,
514                    expressions=self._parse_wrapped_csv(self._parse_property),
515                )
516
517            return None
518
519        def _parse_create(self) -> exp.Create | exp.Command:
520            expression = super()._parse_create()
521            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
522                # Replace the Table node with the enclosed Identifier
523                expression.this.replace(expression.this.this)
524
525            return expression
526
527        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
528        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
529        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
530            this = self._parse_var() or self._parse_type()
531
532            if not this:
533                return None
534
535            self._match(TokenType.COMMA)
536            expression = self._parse_bitwise()
537            this = map_date_part(this)
538            name = this.name.upper()
539
540            if name.startswith("EPOCH"):
541                if name == "EPOCH_MILLISECOND":
542                    scale = 10**3
543                elif name == "EPOCH_MICROSECOND":
544                    scale = 10**6
545                elif name == "EPOCH_NANOSECOND":
546                    scale = 10**9
547                else:
548                    scale = None
549
550                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
551                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
552
553                if scale:
554                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
555
556                return to_unix
557
558            return self.expression(exp.Extract, this=this, expression=expression)
559
560        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
561            if is_map:
562                # Keys are strings in Snowflake's objects, see also:
563                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
564                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
565                return self._parse_slice(self._parse_string())
566
567            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
568
569        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
570            lateral = super()._parse_lateral()
571            if not lateral:
572                return lateral
573
574            if isinstance(lateral.this, exp.Explode):
575                table_alias = lateral.args.get("alias")
576                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
577                if table_alias and not table_alias.args.get("columns"):
578                    table_alias.set("columns", columns)
579                elif not table_alias:
580                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
581
582            return lateral
583
584        def _parse_table_parts(
585            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
586        ) -> exp.Table:
587            # https://docs.snowflake.com/en/user-guide/querying-stage
588            if self._match(TokenType.STRING, advance=False):
589                table = self._parse_string()
590            elif self._match_text_seq("@", advance=False):
591                table = self._parse_location_path()
592            else:
593                table = None
594
595            if table:
596                file_format = None
597                pattern = None
598
599                wrapped = self._match(TokenType.L_PAREN)
600                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
601                    if self._match_text_seq("FILE_FORMAT", "=>"):
602                        file_format = self._parse_string() or super()._parse_table_parts(
603                            is_db_reference=is_db_reference
604                        )
605                    elif self._match_text_seq("PATTERN", "=>"):
606                        pattern = self._parse_string()
607                    else:
608                        break
609
610                    self._match(TokenType.COMMA)
611
612                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
613            else:
614                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
615
616            return table
617
618        def _parse_id_var(
619            self,
620            any_token: bool = True,
621            tokens: t.Optional[t.Collection[TokenType]] = None,
622        ) -> t.Optional[exp.Expression]:
623            if self._match_text_seq("IDENTIFIER", "("):
624                identifier = (
625                    super()._parse_id_var(any_token=any_token, tokens=tokens)
626                    or self._parse_string()
627                )
628                self._match_r_paren()
629                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
630
631            return super()._parse_id_var(any_token=any_token, tokens=tokens)
632
633        def _parse_show_snowflake(self, this: str) -> exp.Show:
634            scope = None
635            scope_kind = None
636
637            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
638            # which is syntactically valid but has no effect on the output
639            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
640
641            history = self._match_text_seq("HISTORY")
642
643            like = self._parse_string() if self._match(TokenType.LIKE) else None
644
645            if self._match(TokenType.IN):
646                if self._match_text_seq("ACCOUNT"):
647                    scope_kind = "ACCOUNT"
648                elif self._match_set(self.DB_CREATABLES):
649                    scope_kind = self._prev.text.upper()
650                    if self._curr:
651                        scope = self._parse_table_parts()
652                elif self._curr:
653                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
654                    scope = self._parse_table_parts()
655
656            return self.expression(
657                exp.Show,
658                **{
659                    "terse": terse,
660                    "this": this,
661                    "history": history,
662                    "like": like,
663                    "scope": scope,
664                    "scope_kind": scope_kind,
665                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
666                    "limit": self._parse_limit(),
667                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
668                },
669            )
670
671        def _parse_location_property(self) -> exp.LocationProperty:
672            self._match(TokenType.EQ)
673            return self.expression(exp.LocationProperty, this=self._parse_location_path())
674
675        def _parse_file_location(self) -> t.Optional[exp.Expression]:
676            # Parse either a subquery or a staged file
677            return (
678                self._parse_select(table=True, parse_subquery_alias=False)
679                if self._match(TokenType.L_PAREN, advance=False)
680                else self._parse_table_parts()
681            )
682
683        def _parse_location_path(self) -> exp.Var:
684            parts = [self._advance_any(ignore_reserved=True)]
685
686            # We avoid consuming a comma token because external tables like @foo and @bar
687            # can be joined in a query with a comma separator, as well as closing paren
688            # in case of subqueries
689            while self._is_connected() and not self._match_set(
690                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
691            ):
692                parts.append(self._advance_any(ignore_reserved=True))
693
694            return exp.var("".join(part.text for part in parts if part))
695
696        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
697            this = super()._parse_lambda_arg()
698
699            if not this:
700                return this
701
702            typ = self._parse_types()
703
704            if typ:
705                return self.expression(exp.Cast, this=this, to=typ)
706
707            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.PERCENT: 'PERCENT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.IS: 'IS'>, <TokenType.VIEW: 'VIEW'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSON: 'JSON'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INT: 'INT'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.FALSE: 'FALSE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.NULL: 'NULL'>, <TokenType.TAG: 'TAG'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.XML: 'XML'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.RANGE: 'RANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.BINARY: 'BINARY'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.END: 'END'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.ASC: 'ASC'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.RING: 'RING'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CASE: 'CASE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.DATE: 'DATE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.UINT: 'UINT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SET: 'SET'>, <TokenType.RENAME: 'RENAME'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.YEAR: 'YEAR'>, <TokenType.LEFT: 'LEFT'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INET: 'INET'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.POINT: 'POINT'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DIV: 'DIV'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MAP: 'MAP'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.BIT: 'BIT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MERGE: 'MERGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.INT128: 'INT128'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CUBE: 'CUBE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.VAR: 'VAR'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.FULL: 'FULL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COPY: 'COPY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DATE32: 'DATE32'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NAME: 'NAME'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CHAR: 'CHAR'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LIST: 'LIST'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.KILL: 'KILL'>, <TokenType.ASOF: 'ASOF'>, <TokenType.USE: 'USE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.IPV4: 'IPV4'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIME: 'TIME'>, <TokenType.TRUE: 'TRUE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.DESC: 'DESC'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.SOME: 'SOME'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ANY: 'ANY'>}
TABLE_ALIAS_TOKENS = {<TokenType.PERCENT: 'PERCENT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.IS: 'IS'>, <TokenType.VIEW: 'VIEW'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSON: 'JSON'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INT: 'INT'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.FALSE: 'FALSE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.NULL: 'NULL'>, <TokenType.TAG: 'TAG'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.XML: 'XML'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.RANGE: 'RANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.BINARY: 'BINARY'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.END: 'END'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.ASC: 'ASC'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.RING: 'RING'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CASE: 'CASE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.DATE: 'DATE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.UINT: 'UINT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SET: 'SET'>, <TokenType.RENAME: 'RENAME'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INET: 'INET'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.POINT: 'POINT'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DIV: 'DIV'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MAP: 'MAP'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.BIT: 'BIT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MERGE: 'MERGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.INT128: 'INT128'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CUBE: 'CUBE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.VAR: 'VAR'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.UINT128: 'UINT128'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.COPY: 'COPY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DATE32: 'DATE32'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.NAME: 'NAME'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CHAR: 'CHAR'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.LIST: 'LIST'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.KILL: 'KILL'>, <TokenType.USE: 'USE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.IPV4: 'IPV4'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TIME: 'TIME'>, <TokenType.TRUE: 'TRUE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.DESC: 'DESC'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.SOME: 'SOME'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ANY: 'ANY'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function Snowflake.Parser.<lambda>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'UNIQUE KEYS', 'TABLES', 'SEQUENCES', 'IMPORTED KEYS', 'VIEWS', 'OBJECTS'}
NON_TABLE_CREATABLES = {'STORAGE INTEGRATION', 'WAREHOUSE', 'STREAMLIT', 'TAG'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
709    class Tokenizer(tokens.Tokenizer):
710        STRING_ESCAPES = ["\\", "'"]
711        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
712        RAW_STRINGS = ["$$"]
713        COMMENTS = ["--", "//", ("/*", "*/")]
714        NESTED_COMMENTS = False
715
716        KEYWORDS = {
717            **tokens.Tokenizer.KEYWORDS,
718            "BYTEINT": TokenType.INT,
719            "CHAR VARYING": TokenType.VARCHAR,
720            "CHARACTER VARYING": TokenType.VARCHAR,
721            "EXCLUDE": TokenType.EXCEPT,
722            "ILIKE ANY": TokenType.ILIKE_ANY,
723            "LIKE ANY": TokenType.LIKE_ANY,
724            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
725            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
726            "MINUS": TokenType.EXCEPT,
727            "NCHAR VARYING": TokenType.VARCHAR,
728            "PUT": TokenType.COMMAND,
729            "REMOVE": TokenType.COMMAND,
730            "RM": TokenType.COMMAND,
731            "SAMPLE": TokenType.TABLE_SAMPLE,
732            "SQL_DOUBLE": TokenType.DOUBLE,
733            "SQL_VARCHAR": TokenType.VARCHAR,
734            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
735            "TAG": TokenType.TAG,
736            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
737            "TOP": TokenType.TOP,
738            "WAREHOUSE": TokenType.WAREHOUSE,
739            "STREAMLIT": TokenType.STREAMLIT,
740        }
741        KEYWORDS.pop("/*+")
742
743        SINGLE_TOKENS = {
744            **tokens.Tokenizer.SINGLE_TOKENS,
745            "$": TokenType.PARAMETER,
746        }
747
748        VAR_SINGLE_TOKENS = {"$"}
749
750        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 752    class Generator(generator.Generator):
 753        PARAMETER_TOKEN = "$"
 754        MATCHED_BY_SOURCE = False
 755        SINGLE_STRING_INTERVAL = True
 756        JOIN_HINTS = False
 757        TABLE_HINTS = False
 758        QUERY_HINTS = False
 759        AGGREGATE_FILTER_SUPPORTED = False
 760        SUPPORTS_TABLE_COPY = False
 761        COLLATE_IS_FUNC = True
 762        LIMIT_ONLY_LITERALS = True
 763        JSON_KEY_VALUE_PAIR_SEP = ","
 764        INSERT_OVERWRITE = " OVERWRITE INTO"
 765        STRUCT_DELIMITER = ("(", ")")
 766        COPY_PARAMS_ARE_WRAPPED = False
 767        COPY_PARAMS_EQ_REQUIRED = True
 768        STAR_EXCEPT = "EXCLUDE"
 769        SUPPORTS_EXPLODING_PROJECTIONS = False
 770        ARRAY_CONCAT_IS_VAR_LEN = False
 771        SUPPORTS_CONVERT_TIMEZONE = True
 772        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 773        SUPPORTS_MEDIAN = True
 774
 775        TRANSFORMS = {
 776            **generator.Generator.TRANSFORMS,
 777            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 778            exp.ArgMax: rename_func("MAX_BY"),
 779            exp.ArgMin: rename_func("MIN_BY"),
 780            exp.Array: inline_array_sql,
 781            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 782            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 783            exp.AtTimeZone: lambda self, e: self.func(
 784                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 785            ),
 786            exp.BitwiseXor: rename_func("BITXOR"),
 787            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 788            exp.DateAdd: date_delta_sql("DATEADD"),
 789            exp.DateDiff: date_delta_sql("DATEDIFF"),
 790            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 791            exp.DatetimeDiff: timestampdiff_sql,
 792            exp.DateStrToDate: datestrtodate_sql,
 793            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 794            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 795            exp.DayOfYear: rename_func("DAYOFYEAR"),
 796            exp.Explode: rename_func("FLATTEN"),
 797            exp.Extract: rename_func("DATE_PART"),
 798            exp.FromTimeZone: lambda self, e: self.func(
 799                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 800            ),
 801            exp.GenerateSeries: lambda self, e: self.func(
 802                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 803            ),
 804            exp.GroupConcat: rename_func("LISTAGG"),
 805            exp.If: if_sql(name="IFF", false_value="NULL"),
 806            exp.JSONExtractScalar: lambda self, e: self.func(
 807                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 808            ),
 809            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 810            exp.JSONPathRoot: lambda *_: "",
 811            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 812            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 813            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 814            exp.Max: max_or_greatest,
 815            exp.Min: min_or_least,
 816            exp.ParseJSON: lambda self, e: self.func(
 817                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 818            ),
 819            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 820            exp.PercentileCont: transforms.preprocess(
 821                [transforms.add_within_group_for_percentiles]
 822            ),
 823            exp.PercentileDisc: transforms.preprocess(
 824                [transforms.add_within_group_for_percentiles]
 825            ),
 826            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 827            exp.RegexpILike: _regexpilike_sql,
 828            exp.Rand: rename_func("RANDOM"),
 829            exp.Select: transforms.preprocess(
 830                [
 831                    transforms.eliminate_distinct_on,
 832                    transforms.explode_to_unnest(),
 833                    transforms.eliminate_semi_and_anti_joins,
 834                    _unnest_generate_date_array,
 835                ]
 836            ),
 837            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 838            exp.SHA: rename_func("SHA1"),
 839            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 840            exp.StartsWith: rename_func("STARTSWITH"),
 841            exp.StrPosition: lambda self, e: self.func(
 842                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 843            ),
 844            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 845            exp.Stuff: rename_func("INSERT"),
 846            exp.TimeAdd: date_delta_sql("TIMEADD"),
 847            exp.Timestamp: no_timestamp_sql,
 848            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 849            exp.TimestampDiff: lambda self, e: self.func(
 850                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 851            ),
 852            exp.TimestampTrunc: timestamptrunc_sql(),
 853            exp.TimeStrToTime: timestrtotime_sql,
 854            exp.TimeToStr: lambda self, e: self.func(
 855                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 856            ),
 857            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 858            exp.ToArray: rename_func("TO_ARRAY"),
 859            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 860            exp.ToDouble: rename_func("TO_DOUBLE"),
 861            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 862            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 863            exp.TsOrDsToDate: lambda self, e: self.func(
 864                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 865            ),
 866            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 867            exp.Uuid: rename_func("UUID_STRING"),
 868            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 869            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 870            exp.Xor: rename_func("BOOLXOR"),
 871        }
 872
 873        SUPPORTED_JSON_PATH_PARTS = {
 874            exp.JSONPathKey,
 875            exp.JSONPathRoot,
 876            exp.JSONPathSubscript,
 877        }
 878
 879        TYPE_MAPPING = {
 880            **generator.Generator.TYPE_MAPPING,
 881            exp.DataType.Type.NESTED: "OBJECT",
 882            exp.DataType.Type.STRUCT: "OBJECT",
 883        }
 884
 885        PROPERTIES_LOCATION = {
 886            **generator.Generator.PROPERTIES_LOCATION,
 887            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 888            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 889        }
 890
 891        UNSUPPORTED_VALUES_EXPRESSIONS = {
 892            exp.Map,
 893            exp.StarMap,
 894            exp.Struct,
 895            exp.VarMap,
 896        }
 897
 898        def with_properties(self, properties: exp.Properties) -> str:
 899            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 900
 901        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 902            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 903                values_as_table = False
 904
 905            return super().values_sql(expression, values_as_table=values_as_table)
 906
 907        def datatype_sql(self, expression: exp.DataType) -> str:
 908            expressions = expression.expressions
 909            if (
 910                expressions
 911                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 912                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 913            ):
 914                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 915                return "OBJECT"
 916
 917            return super().datatype_sql(expression)
 918
 919        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 920            return self.func(
 921                "TO_NUMBER",
 922                expression.this,
 923                expression.args.get("format"),
 924                expression.args.get("precision"),
 925                expression.args.get("scale"),
 926            )
 927
 928        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 929            milli = expression.args.get("milli")
 930            if milli is not None:
 931                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 932                expression.set("nano", milli_to_nano)
 933
 934            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 935
 936        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 937            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 938                return self.func("TO_GEOGRAPHY", expression.this)
 939            if expression.is_type(exp.DataType.Type.GEOMETRY):
 940                return self.func("TO_GEOMETRY", expression.this)
 941
 942            return super().cast_sql(expression, safe_prefix=safe_prefix)
 943
 944        def trycast_sql(self, expression: exp.TryCast) -> str:
 945            value = expression.this
 946
 947            if value.type is None:
 948                from sqlglot.optimizer.annotate_types import annotate_types
 949
 950                value = annotate_types(value)
 951
 952            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 953                return super().trycast_sql(expression)
 954
 955            # TRY_CAST only works for string values in Snowflake
 956            return self.cast_sql(expression)
 957
 958        def log_sql(self, expression: exp.Log) -> str:
 959            if not expression.expression:
 960                return self.func("LN", expression.this)
 961
 962            return super().log_sql(expression)
 963
 964        def unnest_sql(self, expression: exp.Unnest) -> str:
 965            unnest_alias = expression.args.get("alias")
 966            offset = expression.args.get("offset")
 967
 968            columns = [
 969                exp.to_identifier("seq"),
 970                exp.to_identifier("key"),
 971                exp.to_identifier("path"),
 972                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 973                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 974                or exp.to_identifier("value"),
 975                exp.to_identifier("this"),
 976            ]
 977
 978            if unnest_alias:
 979                unnest_alias.set("columns", columns)
 980            else:
 981                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 982
 983            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 984            alias = self.sql(unnest_alias)
 985            alias = f" AS {alias}" if alias else ""
 986            return f"{explode}{alias}"
 987
 988        def show_sql(self, expression: exp.Show) -> str:
 989            terse = "TERSE " if expression.args.get("terse") else ""
 990            history = " HISTORY" if expression.args.get("history") else ""
 991            like = self.sql(expression, "like")
 992            like = f" LIKE {like}" if like else ""
 993
 994            scope = self.sql(expression, "scope")
 995            scope = f" {scope}" if scope else ""
 996
 997            scope_kind = self.sql(expression, "scope_kind")
 998            if scope_kind:
 999                scope_kind = f" IN {scope_kind}"
1000
1001            starts_with = self.sql(expression, "starts_with")
1002            if starts_with:
1003                starts_with = f" STARTS WITH {starts_with}"
1004
1005            limit = self.sql(expression, "limit")
1006
1007            from_ = self.sql(expression, "from")
1008            if from_:
1009                from_ = f" FROM {from_}"
1010
1011            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1012
1013        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1014            # Other dialects don't support all of the following parameters, so we need to
1015            # generate default values as necessary to ensure the transpilation is correct
1016            group = expression.args.get("group")
1017
1018            # To avoid generating all these default values, we set group to None if
1019            # it's 0 (also default value) which doesn't trigger the following chain
1020            if group and group.name == "0":
1021                group = None
1022
1023            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1024            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1025            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1026
1027            return self.func(
1028                "REGEXP_SUBSTR",
1029                expression.this,
1030                expression.expression,
1031                position,
1032                occurrence,
1033                parameters,
1034                group,
1035            )
1036
1037        def describe_sql(self, expression: exp.Describe) -> str:
1038            # Default to table if kind is unknown
1039            kind_value = expression.args.get("kind") or "TABLE"
1040            kind = f" {kind_value}" if kind_value else ""
1041            this = f" {self.sql(expression, 'this')}"
1042            expressions = self.expressions(expression, flat=True)
1043            expressions = f" {expressions}" if expressions else ""
1044            return f"DESCRIBE{kind}{this}{expressions}"
1045
1046        def generatedasidentitycolumnconstraint_sql(
1047            self, expression: exp.GeneratedAsIdentityColumnConstraint
1048        ) -> str:
1049            start = expression.args.get("start")
1050            start = f" START {start}" if start else ""
1051            increment = expression.args.get("increment")
1052            increment = f" INCREMENT {increment}" if increment else ""
1053            return f"AUTOINCREMENT{start}{increment}"
1054
1055        def cluster_sql(self, expression: exp.Cluster) -> str:
1056            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1057
1058        def struct_sql(self, expression: exp.Struct) -> str:
1059            keys = []
1060            values = []
1061
1062            for i, e in enumerate(expression.expressions):
1063                if isinstance(e, exp.PropertyEQ):
1064                    keys.append(
1065                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1066                    )
1067                    values.append(e.expression)
1068                else:
1069                    keys.append(exp.Literal.string(f"_{i}"))
1070                    values.append(e)
1071
1072            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1073
1074        @unsupported_args("weight", "accuracy")
1075        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1076            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1077
1078        def alterset_sql(self, expression: exp.AlterSet) -> str:
1079            exprs = self.expressions(expression, flat=True)
1080            exprs = f" {exprs}" if exprs else ""
1081            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1082            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1083            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1084            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1085            tag = self.expressions(expression, key="tag", flat=True)
1086            tag = f" TAG {tag}" if tag else ""
1087
1088            return f"SET{exprs}{file_format}{copy_options}{tag}"
1089
1090        def strtotime_sql(self, expression: exp.StrToTime):
1091            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1092            return self.func(
1093                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1094            )
1095
1096        def timestampsub_sql(self, expression: exp.TimestampSub):
1097            return self.sql(
1098                exp.TimestampAdd(
1099                    this=expression.this,
1100                    expression=expression.expression * -1,
1101                    unit=expression.unit,
1102                )
1103            )
1104
1105        def jsonextract_sql(self, expression: exp.JSONExtract):
1106            this = expression.this
1107
1108            # JSON strings are valid coming from other dialects such as BQ
1109            return self.func(
1110                "GET_PATH",
1111                exp.ParseJSON(this=this) if this.is_string else this,
1112                expression.expression,
1113            )

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
898        def with_properties(self, properties: exp.Properties) -> str:
899            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
901        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
902            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
903                values_as_table = False
904
905            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
907        def datatype_sql(self, expression: exp.DataType) -> str:
908            expressions = expression.expressions
909            if (
910                expressions
911                and expression.is_type(*exp.DataType.STRUCT_TYPES)
912                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
913            ):
914                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
915                return "OBJECT"
916
917            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
919        def tonumber_sql(self, expression: exp.ToNumber) -> str:
920            return self.func(
921                "TO_NUMBER",
922                expression.this,
923                expression.args.get("format"),
924                expression.args.get("precision"),
925                expression.args.get("scale"),
926            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
928        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
929            milli = expression.args.get("milli")
930            if milli is not None:
931                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
932                expression.set("nano", milli_to_nano)
933
934            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
936        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
937            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
938                return self.func("TO_GEOGRAPHY", expression.this)
939            if expression.is_type(exp.DataType.Type.GEOMETRY):
940                return self.func("TO_GEOMETRY", expression.this)
941
942            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
944        def trycast_sql(self, expression: exp.TryCast) -> str:
945            value = expression.this
946
947            if value.type is None:
948                from sqlglot.optimizer.annotate_types import annotate_types
949
950                value = annotate_types(value)
951
952            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
953                return super().trycast_sql(expression)
954
955            # TRY_CAST only works for string values in Snowflake
956            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
958        def log_sql(self, expression: exp.Log) -> str:
959            if not expression.expression:
960                return self.func("LN", expression.this)
961
962            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
964        def unnest_sql(self, expression: exp.Unnest) -> str:
965            unnest_alias = expression.args.get("alias")
966            offset = expression.args.get("offset")
967
968            columns = [
969                exp.to_identifier("seq"),
970                exp.to_identifier("key"),
971                exp.to_identifier("path"),
972                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
973                seq_get(unnest_alias.columns if unnest_alias else [], 0)
974                or exp.to_identifier("value"),
975                exp.to_identifier("this"),
976            ]
977
978            if unnest_alias:
979                unnest_alias.set("columns", columns)
980            else:
981                unnest_alias = exp.TableAlias(this="_u", columns=columns)
982
983            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
984            alias = self.sql(unnest_alias)
985            alias = f" AS {alias}" if alias else ""
986            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
 988        def show_sql(self, expression: exp.Show) -> str:
 989            terse = "TERSE " if expression.args.get("terse") else ""
 990            history = " HISTORY" if expression.args.get("history") else ""
 991            like = self.sql(expression, "like")
 992            like = f" LIKE {like}" if like else ""
 993
 994            scope = self.sql(expression, "scope")
 995            scope = f" {scope}" if scope else ""
 996
 997            scope_kind = self.sql(expression, "scope_kind")
 998            if scope_kind:
 999                scope_kind = f" IN {scope_kind}"
1000
1001            starts_with = self.sql(expression, "starts_with")
1002            if starts_with:
1003                starts_with = f" STARTS WITH {starts_with}"
1004
1005            limit = self.sql(expression, "limit")
1006
1007            from_ = self.sql(expression, "from")
1008            if from_:
1009                from_ = f" FROM {from_}"
1010
1011            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
1013        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1014            # Other dialects don't support all of the following parameters, so we need to
1015            # generate default values as necessary to ensure the transpilation is correct
1016            group = expression.args.get("group")
1017
1018            # To avoid generating all these default values, we set group to None if
1019            # it's 0 (also default value) which doesn't trigger the following chain
1020            if group and group.name == "0":
1021                group = None
1022
1023            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1024            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1025            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1026
1027            return self.func(
1028                "REGEXP_SUBSTR",
1029                expression.this,
1030                expression.expression,
1031                position,
1032                occurrence,
1033                parameters,
1034                group,
1035            )
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1037        def describe_sql(self, expression: exp.Describe) -> str:
1038            # Default to table if kind is unknown
1039            kind_value = expression.args.get("kind") or "TABLE"
1040            kind = f" {kind_value}" if kind_value else ""
1041            this = f" {self.sql(expression, 'this')}"
1042            expressions = self.expressions(expression, flat=True)
1043            expressions = f" {expressions}" if expressions else ""
1044            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1046        def generatedasidentitycolumnconstraint_sql(
1047            self, expression: exp.GeneratedAsIdentityColumnConstraint
1048        ) -> str:
1049            start = expression.args.get("start")
1050            start = f" START {start}" if start else ""
1051            increment = expression.args.get("increment")
1052            increment = f" INCREMENT {increment}" if increment else ""
1053            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1055        def cluster_sql(self, expression: exp.Cluster) -> str:
1056            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1058        def struct_sql(self, expression: exp.Struct) -> str:
1059            keys = []
1060            values = []
1061
1062            for i, e in enumerate(expression.expressions):
1063                if isinstance(e, exp.PropertyEQ):
1064                    keys.append(
1065                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1066                    )
1067                    values.append(e.expression)
1068                else:
1069                    keys.append(exp.Literal.string(f"_{i}"))
1070                    values.append(e)
1071
1072            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1074        @unsupported_args("weight", "accuracy")
1075        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1076            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1078        def alterset_sql(self, expression: exp.AlterSet) -> str:
1079            exprs = self.expressions(expression, flat=True)
1080            exprs = f" {exprs}" if exprs else ""
1081            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1082            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1083            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1084            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1085            tag = self.expressions(expression, key="tag", flat=True)
1086            tag = f" TAG {tag}" if tag else ""
1087
1088            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1090        def strtotime_sql(self, expression: exp.StrToTime):
1091            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1092            return self.func(
1093                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1094            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1096        def timestampsub_sql(self, expression: exp.TimestampSub):
1097            return self.sql(
1098                exp.TimestampAdd(
1099                    this=expression.this,
1100                    expression=expression.expression * -1,
1101                    unit=expression.unit,
1102                )
1103            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1105        def jsonextract_sql(self, expression: exp.JSONExtract):
1106            this = expression.this
1107
1108            # JSON strings are valid coming from other dialects such as BQ
1109            return self.func(
1110                "GET_PATH",
1111                exp.ParseJSON(this=this) if this.is_string else this,
1112                expression.expression,
1113            )
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql