Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, jsonpath, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_timestamp_sql,
  26    strposition_sql,
  27    timestampdiff_sql,
  28    no_make_interval_sql,
  29)
  30from sqlglot.generator import unsupported_args
  31from sqlglot.helper import flatten, is_float, is_int, seq_get
  32from sqlglot.tokens import TokenType
  33
  34if t.TYPE_CHECKING:
  35    from sqlglot._typing import E, B
  36
  37
  38# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  39def _build_datetime(
  40    name: str, kind: exp.DataType.Type, safe: bool = False
  41) -> t.Callable[[t.List], exp.Func]:
  42    def _builder(args: t.List) -> exp.Func:
  43        value = seq_get(args, 0)
  44        scale_or_fmt = seq_get(args, 1)
  45
  46        int_value = value is not None and is_int(value.name)
  47        int_scale_or_fmt = scale_or_fmt is not None and scale_or_fmt.is_int
  48
  49        if isinstance(value, exp.Literal) or (value and scale_or_fmt):
  50            # Converts calls like `TO_TIME('01:02:03')` into casts
  51            if len(args) == 1 and value.is_string and not int_value:
  52                return (
  53                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  54                    if safe
  55                    else exp.cast(value, kind)
  56                )
  57
  58            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  59            # cases so we can transpile them, since they're relatively common
  60            if kind == exp.DataType.Type.TIMESTAMP:
  61                if not safe and (int_value or int_scale_or_fmt):
  62                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  63                    # it's not easily transpilable
  64                    return exp.UnixToTime(this=value, scale=scale_or_fmt)
  65                if not int_scale_or_fmt and not is_float(value.name):
  66                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  67                    expr.set("safe", safe)
  68                    return expr
  69
  70        if kind in (exp.DataType.Type.DATE, exp.DataType.Type.TIME) and not int_value:
  71            klass = exp.TsOrDsToDate if kind == exp.DataType.Type.DATE else exp.TsOrDsToTime
  72            formatted_exp = build_formatted_time(klass, "snowflake")(args)
  73            formatted_exp.set("safe", safe)
  74            return formatted_exp
  75
  76        return exp.Anonymous(this=name, expressions=args)
  77
  78    return _builder
  79
  80
  81def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  82    expression = parser.build_var_map(args)
  83
  84    if isinstance(expression, exp.StarMap):
  85        return expression
  86
  87    return exp.Struct(
  88        expressions=[
  89            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  90        ]
  91    )
  92
  93
  94def _build_datediff(args: t.List) -> exp.DateDiff:
  95    return exp.DateDiff(
  96        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  97    )
  98
  99
 100def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 101    def _builder(args: t.List) -> E:
 102        return expr_type(
 103            this=seq_get(args, 2),
 104            expression=seq_get(args, 1),
 105            unit=map_date_part(seq_get(args, 0)),
 106        )
 107
 108    return _builder
 109
 110
 111def _build_bitwise(expr_type: t.Type[B], name: str) -> t.Callable[[t.List], B | exp.Anonymous]:
 112    def _builder(args: t.List) -> B | exp.Anonymous:
 113        if len(args) == 3:
 114            return exp.Anonymous(this=name, expressions=args)
 115
 116        return binary_from_function(expr_type)(args)
 117
 118    return _builder
 119
 120
 121# https://docs.snowflake.com/en/sql-reference/functions/div0
 122def _build_if_from_div0(args: t.List) -> exp.If:
 123    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 124    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 125
 126    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 127        exp.Is(this=lhs, expression=exp.null()).not_()
 128    )
 129    true = exp.Literal.number(0)
 130    false = exp.Div(this=lhs, expression=rhs)
 131    return exp.If(this=cond, true=true, false=false)
 132
 133
 134# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 135def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 136    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 137    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 138
 139
 140# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 141def _build_if_from_nullifzero(args: t.List) -> exp.If:
 142    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 143    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 144
 145
 146def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 147    flag = expression.text("flag")
 148
 149    if "i" not in flag:
 150        flag += "i"
 151
 152    return self.func(
 153        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 154    )
 155
 156
 157def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 158    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 159
 160    if not regexp_replace.args.get("replacement"):
 161        regexp_replace.set("replacement", exp.Literal.string(""))
 162
 163    return regexp_replace
 164
 165
 166def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 167    def _parse(self: Snowflake.Parser) -> exp.Show:
 168        return self._parse_show_snowflake(*args, **kwargs)
 169
 170    return _parse
 171
 172
 173def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 174    trunc = date_trunc_to_time(args)
 175    trunc.set("unit", map_date_part(trunc.args["unit"]))
 176    return trunc
 177
 178
 179def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 180    """
 181    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 182    so we need to unqualify them.
 183
 184    Example:
 185        >>> from sqlglot import parse_one
 186        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 187        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 188        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 189    """
 190    if isinstance(expression, exp.Pivot) and expression.unpivot:
 191        expression = transforms.unqualify_columns(expression)
 192
 193    return expression
 194
 195
 196def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 197    assert isinstance(expression, exp.Create)
 198
 199    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 200        if expression.this in exp.DataType.NESTED_TYPES:
 201            expression.set("expressions", None)
 202        return expression
 203
 204    props = expression.args.get("properties")
 205    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 206        for schema_expression in expression.this.expressions:
 207            if isinstance(schema_expression, exp.ColumnDef):
 208                column_type = schema_expression.kind
 209                if isinstance(column_type, exp.DataType):
 210                    column_type.transform(_flatten_structured_type, copy=False)
 211
 212    return expression
 213
 214
 215def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 216    generate_date_array = unnest.expressions[0]
 217    start = generate_date_array.args.get("start")
 218    end = generate_date_array.args.get("end")
 219    step = generate_date_array.args.get("step")
 220
 221    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 222        return
 223
 224    unit = step.args.get("unit")
 225
 226    unnest_alias = unnest.args.get("alias")
 227    if unnest_alias:
 228        unnest_alias = unnest_alias.copy()
 229        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 230    else:
 231        sequence_value_name = "value"
 232
 233    # We'll add the next sequence value to the starting date and project the result
 234    date_add = _build_date_time_add(exp.DateAdd)(
 235        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 236    ).as_(sequence_value_name)
 237
 238    # We use DATEDIFF to compute the number of sequence values needed
 239    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 240        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 241    )
 242
 243    unnest.set("expressions", [number_sequence])
 244    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 245
 246
 247def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 248    if isinstance(expression, exp.Select):
 249        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 250            parent = generate_date_array.parent
 251
 252            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 253            # query is the following (it'll be unnested properly on the next iteration due to copy):
 254            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 255            if not isinstance(parent, exp.Unnest):
 256                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 257                generate_date_array.replace(
 258                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 259                )
 260
 261            if (
 262                isinstance(parent, exp.Unnest)
 263                and isinstance(parent.parent, (exp.From, exp.Join))
 264                and len(parent.expressions) == 1
 265            ):
 266                _unnest_generate_date_array(parent)
 267
 268    return expression
 269
 270
 271def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 272    def _builder(args: t.List) -> E:
 273        return expr_type(
 274            this=seq_get(args, 0),
 275            expression=seq_get(args, 1),
 276            position=seq_get(args, 2),
 277            occurrence=seq_get(args, 3),
 278            parameters=seq_get(args, 4),
 279            group=seq_get(args, 5) or exp.Literal.number(0),
 280        )
 281
 282    return _builder
 283
 284
 285def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 286    # Other dialects don't support all of the following parameters, so we need to
 287    # generate default values as necessary to ensure the transpilation is correct
 288    group = expression.args.get("group")
 289
 290    # To avoid generating all these default values, we set group to None if
 291    # it's 0 (also default value) which doesn't trigger the following chain
 292    if group and group.name == "0":
 293        group = None
 294
 295    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 296    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 297    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 298
 299    return self.func(
 300        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 301        expression.this,
 302        expression.expression,
 303        position,
 304        occurrence,
 305        parameters,
 306        group,
 307    )
 308
 309
 310def _json_extract_value_array_sql(
 311    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 312) -> str:
 313    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 314    ident = exp.to_identifier("x")
 315
 316    if isinstance(expression, exp.JSONValueArray):
 317        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 318    else:
 319        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 320
 321    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 322
 323    return self.func("TRANSFORM", json_extract, transform_lambda)
 324
 325
 326class Snowflake(Dialect):
 327    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 328    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 329    NULL_ORDERING = "nulls_are_large"
 330    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 331    SUPPORTS_USER_DEFINED_TYPES = False
 332    SUPPORTS_SEMI_ANTI_JOIN = False
 333    PREFER_CTE_ALIAS_COLUMN = True
 334    TABLESAMPLE_SIZE_IS_PERCENT = True
 335    COPY_PARAMS_ARE_CSV = False
 336    ARRAY_AGG_INCLUDES_NULLS = None
 337
 338    TIME_MAPPING = {
 339        "YYYY": "%Y",
 340        "yyyy": "%Y",
 341        "YY": "%y",
 342        "yy": "%y",
 343        "MMMM": "%B",
 344        "mmmm": "%B",
 345        "MON": "%b",
 346        "mon": "%b",
 347        "MM": "%m",
 348        "mm": "%m",
 349        "DD": "%d",
 350        "dd": "%-d",
 351        "DY": "%a",
 352        "dy": "%w",
 353        "HH24": "%H",
 354        "hh24": "%H",
 355        "HH12": "%I",
 356        "hh12": "%I",
 357        "MI": "%M",
 358        "mi": "%M",
 359        "SS": "%S",
 360        "ss": "%S",
 361        "FF6": "%f",
 362        "ff6": "%f",
 363    }
 364
 365    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 366        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 367        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 368        if (
 369            isinstance(expression, exp.Identifier)
 370            and isinstance(expression.parent, exp.Table)
 371            and expression.name.lower() == "dual"
 372        ):
 373            return expression  # type: ignore
 374
 375        return super().quote_identifier(expression, identify=identify)
 376
 377    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 378        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 379        SINGLE_TOKENS.pop("$")
 380
 381    class Parser(parser.Parser):
 382        IDENTIFY_PIVOT_STRINGS = True
 383        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 384        COLON_IS_VARIANT_EXTRACT = True
 385
 386        ID_VAR_TOKENS = {
 387            *parser.Parser.ID_VAR_TOKENS,
 388            TokenType.MATCH_CONDITION,
 389        }
 390
 391        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 392        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 393
 394        FUNCTIONS = {
 395            **parser.Parser.FUNCTIONS,
 396            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 397            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 398            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 399                this=seq_get(args, 1), expression=seq_get(args, 0)
 400            ),
 401            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 402                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 403                start=seq_get(args, 0),
 404                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 405                step=seq_get(args, 2),
 406            ),
 407            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 408            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 409            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 410            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 411            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 412            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 413            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 414            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 415            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 416            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 417            "DATE_TRUNC": _date_trunc_to_time,
 418            "DATEADD": _build_date_time_add(exp.DateAdd),
 419            "DATEDIFF": _build_datediff,
 420            "DIV0": _build_if_from_div0,
 421            "EDITDISTANCE": lambda args: exp.Levenshtein(
 422                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 423            ),
 424            "FLATTEN": exp.Explode.from_arg_list,
 425            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 426                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 427            ),
 428            "IFF": exp.If.from_arg_list,
 429            "LAST_DAY": lambda args: exp.LastDay(
 430                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 431            ),
 432            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 433            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 434            "LISTAGG": exp.GroupConcat.from_arg_list,
 435            "NULLIFZERO": _build_if_from_nullifzero,
 436            "OBJECT_CONSTRUCT": _build_object_construct,
 437            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 438            "REGEXP_REPLACE": _build_regexp_replace,
 439            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 440            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 441            "RLIKE": exp.RegexpLike.from_arg_list,
 442            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 443            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 444            "TIMEDIFF": _build_datediff,
 445            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 446            "TIMESTAMPDIFF": _build_datediff,
 447            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 448            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 449            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 450            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 451            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 452            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 453            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 454            "TRY_TO_TIMESTAMP": _build_datetime(
 455                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 456            ),
 457            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 458            "TO_NUMBER": lambda args: exp.ToNumber(
 459                this=seq_get(args, 0),
 460                format=seq_get(args, 1),
 461                precision=seq_get(args, 2),
 462                scale=seq_get(args, 3),
 463            ),
 464            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 465            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 466            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 467            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 468            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 469            "TO_VARCHAR": exp.ToChar.from_arg_list,
 470            "ZEROIFNULL": _build_if_from_zeroifnull,
 471        }
 472
 473        FUNCTION_PARSERS = {
 474            **parser.Parser.FUNCTION_PARSERS,
 475            "DATE_PART": lambda self: self._parse_date_part(),
 476            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 477        }
 478        FUNCTION_PARSERS.pop("TRIM")
 479
 480        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 481
 482        RANGE_PARSERS = {
 483            **parser.Parser.RANGE_PARSERS,
 484            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 485            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 486        }
 487
 488        ALTER_PARSERS = {
 489            **parser.Parser.ALTER_PARSERS,
 490            "UNSET": lambda self: self.expression(
 491                exp.Set,
 492                tag=self._match_text_seq("TAG"),
 493                expressions=self._parse_csv(self._parse_id_var),
 494                unset=True,
 495            ),
 496        }
 497
 498        STATEMENT_PARSERS = {
 499            **parser.Parser.STATEMENT_PARSERS,
 500            TokenType.SHOW: lambda self: self._parse_show(),
 501        }
 502
 503        PROPERTY_PARSERS = {
 504            **parser.Parser.PROPERTY_PARSERS,
 505            "LOCATION": lambda self: self._parse_location_property(),
 506            "TAG": lambda self: self._parse_tag(),
 507        }
 508
 509        TYPE_CONVERTERS = {
 510            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 511            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 512        }
 513
 514        SHOW_PARSERS = {
 515            "SCHEMAS": _show_parser("SCHEMAS"),
 516            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 517            "OBJECTS": _show_parser("OBJECTS"),
 518            "TERSE OBJECTS": _show_parser("OBJECTS"),
 519            "TABLES": _show_parser("TABLES"),
 520            "TERSE TABLES": _show_parser("TABLES"),
 521            "VIEWS": _show_parser("VIEWS"),
 522            "TERSE VIEWS": _show_parser("VIEWS"),
 523            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 524            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 525            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 526            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 527            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 528            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 529            "SEQUENCES": _show_parser("SEQUENCES"),
 530            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 531            "COLUMNS": _show_parser("COLUMNS"),
 532            "USERS": _show_parser("USERS"),
 533            "TERSE USERS": _show_parser("USERS"),
 534        }
 535
 536        CONSTRAINT_PARSERS = {
 537            **parser.Parser.CONSTRAINT_PARSERS,
 538            "WITH": lambda self: self._parse_with_constraint(),
 539            "MASKING": lambda self: self._parse_with_constraint(),
 540            "PROJECTION": lambda self: self._parse_with_constraint(),
 541            "TAG": lambda self: self._parse_with_constraint(),
 542        }
 543
 544        STAGED_FILE_SINGLE_TOKENS = {
 545            TokenType.DOT,
 546            TokenType.MOD,
 547            TokenType.SLASH,
 548        }
 549
 550        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 551
 552        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 553
 554        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 555
 556        LAMBDAS = {
 557            **parser.Parser.LAMBDAS,
 558            TokenType.ARROW: lambda self, expressions: self.expression(
 559                exp.Lambda,
 560                this=self._replace_lambda(
 561                    self._parse_assignment(),
 562                    expressions,
 563                ),
 564                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 565            ),
 566        }
 567
 568        def _negate_range(
 569            self, this: t.Optional[exp.Expression] = None
 570        ) -> t.Optional[exp.Expression]:
 571            if not this:
 572                return this
 573
 574            query = this.args.get("query")
 575            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 576                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 577                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 578                # which can produce different results (most likely a SnowFlake bug).
 579                #
 580                # https://docs.snowflake.com/en/sql-reference/functions/in
 581                # Context: https://github.com/tobymao/sqlglot/issues/3890
 582                return self.expression(
 583                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 584                )
 585
 586            return self.expression(exp.Not, this=this)
 587
 588        def _parse_tag(self) -> exp.Tags:
 589            return self.expression(
 590                exp.Tags,
 591                expressions=self._parse_wrapped_csv(self._parse_property),
 592            )
 593
 594        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 595            if self._prev.token_type != TokenType.WITH:
 596                self._retreat(self._index - 1)
 597
 598            if self._match_text_seq("MASKING", "POLICY"):
 599                policy = self._parse_column()
 600                return self.expression(
 601                    exp.MaskingPolicyColumnConstraint,
 602                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 603                    expressions=self._match(TokenType.USING)
 604                    and self._parse_wrapped_csv(self._parse_id_var),
 605                )
 606            if self._match_text_seq("PROJECTION", "POLICY"):
 607                policy = self._parse_column()
 608                return self.expression(
 609                    exp.ProjectionPolicyColumnConstraint,
 610                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 611                )
 612            if self._match(TokenType.TAG):
 613                return self._parse_tag()
 614
 615            return None
 616
 617        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 618            if self._match(TokenType.TAG):
 619                return self._parse_tag()
 620
 621            return super()._parse_with_property()
 622
 623        def _parse_create(self) -> exp.Create | exp.Command:
 624            expression = super()._parse_create()
 625            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 626                # Replace the Table node with the enclosed Identifier
 627                expression.this.replace(expression.this.this)
 628
 629            return expression
 630
 631        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 632        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 633        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 634            this = self._parse_var() or self._parse_type()
 635
 636            if not this:
 637                return None
 638
 639            self._match(TokenType.COMMA)
 640            expression = self._parse_bitwise()
 641            this = map_date_part(this)
 642            name = this.name.upper()
 643
 644            if name.startswith("EPOCH"):
 645                if name == "EPOCH_MILLISECOND":
 646                    scale = 10**3
 647                elif name == "EPOCH_MICROSECOND":
 648                    scale = 10**6
 649                elif name == "EPOCH_NANOSECOND":
 650                    scale = 10**9
 651                else:
 652                    scale = None
 653
 654                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 655                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 656
 657                if scale:
 658                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 659
 660                return to_unix
 661
 662            return self.expression(exp.Extract, this=this, expression=expression)
 663
 664        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 665            if is_map:
 666                # Keys are strings in Snowflake's objects, see also:
 667                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 668                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 669                return self._parse_slice(self._parse_string())
 670
 671            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 672
 673        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 674            lateral = super()._parse_lateral()
 675            if not lateral:
 676                return lateral
 677
 678            if isinstance(lateral.this, exp.Explode):
 679                table_alias = lateral.args.get("alias")
 680                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 681                if table_alias and not table_alias.args.get("columns"):
 682                    table_alias.set("columns", columns)
 683                elif not table_alias:
 684                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 685
 686            return lateral
 687
 688        def _parse_table_parts(
 689            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 690        ) -> exp.Table:
 691            # https://docs.snowflake.com/en/user-guide/querying-stage
 692            if self._match(TokenType.STRING, advance=False):
 693                table = self._parse_string()
 694            elif self._match_text_seq("@", advance=False):
 695                table = self._parse_location_path()
 696            else:
 697                table = None
 698
 699            if table:
 700                file_format = None
 701                pattern = None
 702
 703                wrapped = self._match(TokenType.L_PAREN)
 704                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 705                    if self._match_text_seq("FILE_FORMAT", "=>"):
 706                        file_format = self._parse_string() or super()._parse_table_parts(
 707                            is_db_reference=is_db_reference
 708                        )
 709                    elif self._match_text_seq("PATTERN", "=>"):
 710                        pattern = self._parse_string()
 711                    else:
 712                        break
 713
 714                    self._match(TokenType.COMMA)
 715
 716                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 717            else:
 718                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 719
 720            return table
 721
 722        def _parse_id_var(
 723            self,
 724            any_token: bool = True,
 725            tokens: t.Optional[t.Collection[TokenType]] = None,
 726        ) -> t.Optional[exp.Expression]:
 727            if self._match_text_seq("IDENTIFIER", "("):
 728                identifier = (
 729                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 730                    or self._parse_string()
 731                )
 732                self._match_r_paren()
 733                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 734
 735            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 736
 737        def _parse_show_snowflake(self, this: str) -> exp.Show:
 738            scope = None
 739            scope_kind = None
 740
 741            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 742            # which is syntactically valid but has no effect on the output
 743            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 744
 745            history = self._match_text_seq("HISTORY")
 746
 747            like = self._parse_string() if self._match(TokenType.LIKE) else None
 748
 749            if self._match(TokenType.IN):
 750                if self._match_text_seq("ACCOUNT"):
 751                    scope_kind = "ACCOUNT"
 752                elif self._match_set(self.DB_CREATABLES):
 753                    scope_kind = self._prev.text.upper()
 754                    if self._curr:
 755                        scope = self._parse_table_parts()
 756                elif self._curr:
 757                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 758                    scope = self._parse_table_parts()
 759
 760            return self.expression(
 761                exp.Show,
 762                **{
 763                    "terse": terse,
 764                    "this": this,
 765                    "history": history,
 766                    "like": like,
 767                    "scope": scope,
 768                    "scope_kind": scope_kind,
 769                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 770                    "limit": self._parse_limit(),
 771                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 772                },
 773            )
 774
 775        def _parse_location_property(self) -> exp.LocationProperty:
 776            self._match(TokenType.EQ)
 777            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 778
 779        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 780            # Parse either a subquery or a staged file
 781            return (
 782                self._parse_select(table=True, parse_subquery_alias=False)
 783                if self._match(TokenType.L_PAREN, advance=False)
 784                else self._parse_table_parts()
 785            )
 786
 787        def _parse_location_path(self) -> exp.Var:
 788            parts = [self._advance_any(ignore_reserved=True)]
 789
 790            # We avoid consuming a comma token because external tables like @foo and @bar
 791            # can be joined in a query with a comma separator, as well as closing paren
 792            # in case of subqueries
 793            while self._is_connected() and not self._match_set(
 794                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 795            ):
 796                parts.append(self._advance_any(ignore_reserved=True))
 797
 798            return exp.var("".join(part.text for part in parts if part))
 799
 800        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 801            this = super()._parse_lambda_arg()
 802
 803            if not this:
 804                return this
 805
 806            typ = self._parse_types()
 807
 808            if typ:
 809                return self.expression(exp.Cast, this=this, to=typ)
 810
 811            return this
 812
 813        def _parse_foreign_key(self) -> exp.ForeignKey:
 814            # inlineFK, the REFERENCES columns are implied
 815            if self._match(TokenType.REFERENCES, advance=False):
 816                return self.expression(exp.ForeignKey)
 817
 818            # outoflineFK, explicitly names the columns
 819            return super()._parse_foreign_key()
 820
 821    class Tokenizer(tokens.Tokenizer):
 822        STRING_ESCAPES = ["\\", "'"]
 823        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 824        RAW_STRINGS = ["$$"]
 825        COMMENTS = ["--", "//", ("/*", "*/")]
 826        NESTED_COMMENTS = False
 827
 828        KEYWORDS = {
 829            **tokens.Tokenizer.KEYWORDS,
 830            "BYTEINT": TokenType.INT,
 831            "CHAR VARYING": TokenType.VARCHAR,
 832            "CHARACTER VARYING": TokenType.VARCHAR,
 833            "EXCLUDE": TokenType.EXCEPT,
 834            "ILIKE ANY": TokenType.ILIKE_ANY,
 835            "LIKE ANY": TokenType.LIKE_ANY,
 836            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 837            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 838            "MINUS": TokenType.EXCEPT,
 839            "NCHAR VARYING": TokenType.VARCHAR,
 840            "PUT": TokenType.COMMAND,
 841            "REMOVE": TokenType.COMMAND,
 842            "RM": TokenType.COMMAND,
 843            "SAMPLE": TokenType.TABLE_SAMPLE,
 844            "SQL_DOUBLE": TokenType.DOUBLE,
 845            "SQL_VARCHAR": TokenType.VARCHAR,
 846            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 847            "TAG": TokenType.TAG,
 848            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 849            "TOP": TokenType.TOP,
 850            "WAREHOUSE": TokenType.WAREHOUSE,
 851            "STREAMLIT": TokenType.STREAMLIT,
 852        }
 853        KEYWORDS.pop("/*+")
 854
 855        SINGLE_TOKENS = {
 856            **tokens.Tokenizer.SINGLE_TOKENS,
 857            "$": TokenType.PARAMETER,
 858        }
 859
 860        VAR_SINGLE_TOKENS = {"$"}
 861
 862        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 863
 864    class Generator(generator.Generator):
 865        PARAMETER_TOKEN = "$"
 866        MATCHED_BY_SOURCE = False
 867        SINGLE_STRING_INTERVAL = True
 868        JOIN_HINTS = False
 869        TABLE_HINTS = False
 870        QUERY_HINTS = False
 871        AGGREGATE_FILTER_SUPPORTED = False
 872        SUPPORTS_TABLE_COPY = False
 873        COLLATE_IS_FUNC = True
 874        LIMIT_ONLY_LITERALS = True
 875        JSON_KEY_VALUE_PAIR_SEP = ","
 876        INSERT_OVERWRITE = " OVERWRITE INTO"
 877        STRUCT_DELIMITER = ("(", ")")
 878        COPY_PARAMS_ARE_WRAPPED = False
 879        COPY_PARAMS_EQ_REQUIRED = True
 880        STAR_EXCEPT = "EXCLUDE"
 881        SUPPORTS_EXPLODING_PROJECTIONS = False
 882        ARRAY_CONCAT_IS_VAR_LEN = False
 883        SUPPORTS_CONVERT_TIMEZONE = True
 884        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 885        SUPPORTS_MEDIAN = True
 886        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 887
 888        TRANSFORMS = {
 889            **generator.Generator.TRANSFORMS,
 890            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 891            exp.ArgMax: rename_func("MAX_BY"),
 892            exp.ArgMin: rename_func("MIN_BY"),
 893            exp.Array: inline_array_sql,
 894            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 895            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 896            exp.AtTimeZone: lambda self, e: self.func(
 897                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 898            ),
 899            exp.BitwiseOr: rename_func("BITOR"),
 900            exp.BitwiseXor: rename_func("BITXOR"),
 901            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
 902            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
 903            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 904            exp.DateAdd: date_delta_sql("DATEADD"),
 905            exp.DateDiff: date_delta_sql("DATEDIFF"),
 906            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 907            exp.DatetimeDiff: timestampdiff_sql,
 908            exp.DateStrToDate: datestrtodate_sql,
 909            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 910            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 911            exp.DayOfYear: rename_func("DAYOFYEAR"),
 912            exp.Explode: rename_func("FLATTEN"),
 913            exp.Extract: rename_func("DATE_PART"),
 914            exp.FromTimeZone: lambda self, e: self.func(
 915                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 916            ),
 917            exp.GenerateSeries: lambda self, e: self.func(
 918                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 919            ),
 920            exp.GroupConcat: rename_func("LISTAGG"),
 921            exp.If: if_sql(name="IFF", false_value="NULL"),
 922            exp.JSONExtractArray: _json_extract_value_array_sql,
 923            exp.JSONExtractScalar: lambda self, e: self.func(
 924                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 925            ),
 926            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 927            exp.JSONPathRoot: lambda *_: "",
 928            exp.JSONValueArray: _json_extract_value_array_sql,
 929            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 930            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 931            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 932            exp.MakeInterval: no_make_interval_sql,
 933            exp.Max: max_or_greatest,
 934            exp.Min: min_or_least,
 935            exp.ParseJSON: lambda self, e: self.func(
 936                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 937            ),
 938            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 939            exp.PercentileCont: transforms.preprocess(
 940                [transforms.add_within_group_for_percentiles]
 941            ),
 942            exp.PercentileDisc: transforms.preprocess(
 943                [transforms.add_within_group_for_percentiles]
 944            ),
 945            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 946            exp.RegexpExtract: _regexpextract_sql,
 947            exp.RegexpExtractAll: _regexpextract_sql,
 948            exp.RegexpILike: _regexpilike_sql,
 949            exp.Rand: rename_func("RANDOM"),
 950            exp.Select: transforms.preprocess(
 951                [
 952                    transforms.eliminate_distinct_on,
 953                    transforms.explode_to_unnest(),
 954                    transforms.eliminate_semi_and_anti_joins,
 955                    _transform_generate_date_array,
 956                ]
 957            ),
 958            exp.SHA: rename_func("SHA1"),
 959            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 960            exp.StartsWith: rename_func("STARTSWITH"),
 961            exp.StrPosition: lambda self, e: strposition_sql(
 962                self, e, func_name="CHARINDEX", supports_position=True
 963            ),
 964            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 965            exp.Stuff: rename_func("INSERT"),
 966            exp.TimeAdd: date_delta_sql("TIMEADD"),
 967            exp.Timestamp: no_timestamp_sql,
 968            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 969            exp.TimestampDiff: lambda self, e: self.func(
 970                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 971            ),
 972            exp.TimestampTrunc: timestamptrunc_sql(),
 973            exp.TimeStrToTime: timestrtotime_sql,
 974            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 975            exp.ToArray: rename_func("TO_ARRAY"),
 976            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 977            exp.ToDouble: rename_func("TO_DOUBLE"),
 978            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 979            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 980            exp.TsOrDsToDate: lambda self, e: self.func(
 981                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 982            ),
 983            exp.TsOrDsToTime: lambda self, e: self.func(
 984                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
 985            ),
 986            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 987            exp.Uuid: rename_func("UUID_STRING"),
 988            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 989            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 990            exp.Xor: rename_func("BOOLXOR"),
 991            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 992                rename_func("EDITDISTANCE")
 993            ),
 994        }
 995
 996        SUPPORTED_JSON_PATH_PARTS = {
 997            exp.JSONPathKey,
 998            exp.JSONPathRoot,
 999            exp.JSONPathSubscript,
1000        }
1001
1002        TYPE_MAPPING = {
1003            **generator.Generator.TYPE_MAPPING,
1004            exp.DataType.Type.NESTED: "OBJECT",
1005            exp.DataType.Type.STRUCT: "OBJECT",
1006        }
1007
1008        PROPERTIES_LOCATION = {
1009            **generator.Generator.PROPERTIES_LOCATION,
1010            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1011            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1012        }
1013
1014        UNSUPPORTED_VALUES_EXPRESSIONS = {
1015            exp.Map,
1016            exp.StarMap,
1017            exp.Struct,
1018            exp.VarMap,
1019        }
1020
1021        def with_properties(self, properties: exp.Properties) -> str:
1022            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1023
1024        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1025            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1026                values_as_table = False
1027
1028            return super().values_sql(expression, values_as_table=values_as_table)
1029
1030        def datatype_sql(self, expression: exp.DataType) -> str:
1031            expressions = expression.expressions
1032            if (
1033                expressions
1034                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1035                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1036            ):
1037                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1038                return "OBJECT"
1039
1040            return super().datatype_sql(expression)
1041
1042        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1043            return self.func(
1044                "TO_NUMBER",
1045                expression.this,
1046                expression.args.get("format"),
1047                expression.args.get("precision"),
1048                expression.args.get("scale"),
1049            )
1050
1051        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1052            milli = expression.args.get("milli")
1053            if milli is not None:
1054                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1055                expression.set("nano", milli_to_nano)
1056
1057            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1058
1059        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1060            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1061                return self.func("TO_GEOGRAPHY", expression.this)
1062            if expression.is_type(exp.DataType.Type.GEOMETRY):
1063                return self.func("TO_GEOMETRY", expression.this)
1064
1065            return super().cast_sql(expression, safe_prefix=safe_prefix)
1066
1067        def trycast_sql(self, expression: exp.TryCast) -> str:
1068            value = expression.this
1069
1070            if value.type is None:
1071                from sqlglot.optimizer.annotate_types import annotate_types
1072
1073                value = annotate_types(value)
1074
1075            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1076                return super().trycast_sql(expression)
1077
1078            # TRY_CAST only works for string values in Snowflake
1079            return self.cast_sql(expression)
1080
1081        def log_sql(self, expression: exp.Log) -> str:
1082            if not expression.expression:
1083                return self.func("LN", expression.this)
1084
1085            return super().log_sql(expression)
1086
1087        def unnest_sql(self, expression: exp.Unnest) -> str:
1088            unnest_alias = expression.args.get("alias")
1089            offset = expression.args.get("offset")
1090
1091            columns = [
1092                exp.to_identifier("seq"),
1093                exp.to_identifier("key"),
1094                exp.to_identifier("path"),
1095                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1096                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1097                or exp.to_identifier("value"),
1098                exp.to_identifier("this"),
1099            ]
1100
1101            if unnest_alias:
1102                unnest_alias.set("columns", columns)
1103            else:
1104                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1105
1106            table_input = self.sql(expression.expressions[0])
1107            if not table_input.startswith("INPUT =>"):
1108                table_input = f"INPUT => {table_input}"
1109
1110            explode = f"TABLE(FLATTEN({table_input}))"
1111            alias = self.sql(unnest_alias)
1112            alias = f" AS {alias}" if alias else ""
1113            return f"{explode}{alias}"
1114
1115        def show_sql(self, expression: exp.Show) -> str:
1116            terse = "TERSE " if expression.args.get("terse") else ""
1117            history = " HISTORY" if expression.args.get("history") else ""
1118            like = self.sql(expression, "like")
1119            like = f" LIKE {like}" if like else ""
1120
1121            scope = self.sql(expression, "scope")
1122            scope = f" {scope}" if scope else ""
1123
1124            scope_kind = self.sql(expression, "scope_kind")
1125            if scope_kind:
1126                scope_kind = f" IN {scope_kind}"
1127
1128            starts_with = self.sql(expression, "starts_with")
1129            if starts_with:
1130                starts_with = f" STARTS WITH {starts_with}"
1131
1132            limit = self.sql(expression, "limit")
1133
1134            from_ = self.sql(expression, "from")
1135            if from_:
1136                from_ = f" FROM {from_}"
1137
1138            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1139
1140        def describe_sql(self, expression: exp.Describe) -> str:
1141            # Default to table if kind is unknown
1142            kind_value = expression.args.get("kind") or "TABLE"
1143            kind = f" {kind_value}" if kind_value else ""
1144            this = f" {self.sql(expression, 'this')}"
1145            expressions = self.expressions(expression, flat=True)
1146            expressions = f" {expressions}" if expressions else ""
1147            return f"DESCRIBE{kind}{this}{expressions}"
1148
1149        def generatedasidentitycolumnconstraint_sql(
1150            self, expression: exp.GeneratedAsIdentityColumnConstraint
1151        ) -> str:
1152            start = expression.args.get("start")
1153            start = f" START {start}" if start else ""
1154            increment = expression.args.get("increment")
1155            increment = f" INCREMENT {increment}" if increment else ""
1156            return f"AUTOINCREMENT{start}{increment}"
1157
1158        def cluster_sql(self, expression: exp.Cluster) -> str:
1159            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1160
1161        def struct_sql(self, expression: exp.Struct) -> str:
1162            keys = []
1163            values = []
1164
1165            for i, e in enumerate(expression.expressions):
1166                if isinstance(e, exp.PropertyEQ):
1167                    keys.append(
1168                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1169                    )
1170                    values.append(e.expression)
1171                else:
1172                    keys.append(exp.Literal.string(f"_{i}"))
1173                    values.append(e)
1174
1175            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1176
1177        @unsupported_args("weight", "accuracy")
1178        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1179            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1180
1181        def alterset_sql(self, expression: exp.AlterSet) -> str:
1182            exprs = self.expressions(expression, flat=True)
1183            exprs = f" {exprs}" if exprs else ""
1184            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1185            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1186            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1187            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1188            tag = self.expressions(expression, key="tag", flat=True)
1189            tag = f" TAG {tag}" if tag else ""
1190
1191            return f"SET{exprs}{file_format}{copy_options}{tag}"
1192
1193        def strtotime_sql(self, expression: exp.StrToTime):
1194            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1195            return self.func(
1196                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1197            )
1198
1199        def timestampsub_sql(self, expression: exp.TimestampSub):
1200            return self.sql(
1201                exp.TimestampAdd(
1202                    this=expression.this,
1203                    expression=expression.expression * -1,
1204                    unit=expression.unit,
1205                )
1206            )
1207
1208        def jsonextract_sql(self, expression: exp.JSONExtract):
1209            this = expression.this
1210
1211            # JSON strings are valid coming from other dialects such as BQ
1212            return self.func(
1213                "GET_PATH",
1214                exp.ParseJSON(this=this) if this.is_string else this,
1215                expression.expression,
1216            )
1217
1218        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1219            this = expression.this
1220            if not isinstance(this, exp.TsOrDsToTimestamp):
1221                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1222
1223            return self.func("TO_CHAR", this, self.format_time(expression))
1224
1225        def datesub_sql(self, expression: exp.DateSub) -> str:
1226            value = expression.expression
1227            if value:
1228                value.replace(value * (-1))
1229            else:
1230                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1231
1232            return date_delta_sql("DATEADD")(self, expression)
1233
1234        def select_sql(self, expression: exp.Select) -> str:
1235            limit = expression.args.get("limit")
1236            offset = expression.args.get("offset")
1237            if offset and not limit:
1238                expression.limit(exp.Null(), copy=False)
1239            return super().select_sql(expression)
class Snowflake(sqlglot.dialects.dialect.Dialect):
 327class Snowflake(Dialect):
 328    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 329    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 330    NULL_ORDERING = "nulls_are_large"
 331    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 332    SUPPORTS_USER_DEFINED_TYPES = False
 333    SUPPORTS_SEMI_ANTI_JOIN = False
 334    PREFER_CTE_ALIAS_COLUMN = True
 335    TABLESAMPLE_SIZE_IS_PERCENT = True
 336    COPY_PARAMS_ARE_CSV = False
 337    ARRAY_AGG_INCLUDES_NULLS = None
 338
 339    TIME_MAPPING = {
 340        "YYYY": "%Y",
 341        "yyyy": "%Y",
 342        "YY": "%y",
 343        "yy": "%y",
 344        "MMMM": "%B",
 345        "mmmm": "%B",
 346        "MON": "%b",
 347        "mon": "%b",
 348        "MM": "%m",
 349        "mm": "%m",
 350        "DD": "%d",
 351        "dd": "%-d",
 352        "DY": "%a",
 353        "dy": "%w",
 354        "HH24": "%H",
 355        "hh24": "%H",
 356        "HH12": "%I",
 357        "hh12": "%I",
 358        "MI": "%M",
 359        "mi": "%M",
 360        "SS": "%S",
 361        "ss": "%S",
 362        "FF6": "%f",
 363        "ff6": "%f",
 364    }
 365
 366    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 367        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 368        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 369        if (
 370            isinstance(expression, exp.Identifier)
 371            and isinstance(expression.parent, exp.Table)
 372            and expression.name.lower() == "dual"
 373        ):
 374            return expression  # type: ignore
 375
 376        return super().quote_identifier(expression, identify=identify)
 377
 378    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
 379        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
 380        SINGLE_TOKENS.pop("$")
 381
 382    class Parser(parser.Parser):
 383        IDENTIFY_PIVOT_STRINGS = True
 384        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 385        COLON_IS_VARIANT_EXTRACT = True
 386
 387        ID_VAR_TOKENS = {
 388            *parser.Parser.ID_VAR_TOKENS,
 389            TokenType.MATCH_CONDITION,
 390        }
 391
 392        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 393        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 394
 395        FUNCTIONS = {
 396            **parser.Parser.FUNCTIONS,
 397            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 398            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 399            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 400                this=seq_get(args, 1), expression=seq_get(args, 0)
 401            ),
 402            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 403                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 404                start=seq_get(args, 0),
 405                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 406                step=seq_get(args, 2),
 407            ),
 408            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 409            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
 410            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 411            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
 412            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
 413            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
 414            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
 415            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
 416            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
 417            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 418            "DATE_TRUNC": _date_trunc_to_time,
 419            "DATEADD": _build_date_time_add(exp.DateAdd),
 420            "DATEDIFF": _build_datediff,
 421            "DIV0": _build_if_from_div0,
 422            "EDITDISTANCE": lambda args: exp.Levenshtein(
 423                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 424            ),
 425            "FLATTEN": exp.Explode.from_arg_list,
 426            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 427                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 428            ),
 429            "IFF": exp.If.from_arg_list,
 430            "LAST_DAY": lambda args: exp.LastDay(
 431                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 432            ),
 433            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 434            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 435            "LISTAGG": exp.GroupConcat.from_arg_list,
 436            "NULLIFZERO": _build_if_from_nullifzero,
 437            "OBJECT_CONSTRUCT": _build_object_construct,
 438            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 439            "REGEXP_REPLACE": _build_regexp_replace,
 440            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 441            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 442            "RLIKE": exp.RegexpLike.from_arg_list,
 443            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 444            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 445            "TIMEDIFF": _build_datediff,
 446            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 447            "TIMESTAMPDIFF": _build_datediff,
 448            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 449            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 450            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 451            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 452            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 453            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 454            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
 455            "TRY_TO_TIMESTAMP": _build_datetime(
 456                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 457            ),
 458            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 459            "TO_NUMBER": lambda args: exp.ToNumber(
 460                this=seq_get(args, 0),
 461                format=seq_get(args, 1),
 462                precision=seq_get(args, 2),
 463                scale=seq_get(args, 3),
 464            ),
 465            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 466            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 467            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 468            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 469            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 470            "TO_VARCHAR": exp.ToChar.from_arg_list,
 471            "ZEROIFNULL": _build_if_from_zeroifnull,
 472        }
 473
 474        FUNCTION_PARSERS = {
 475            **parser.Parser.FUNCTION_PARSERS,
 476            "DATE_PART": lambda self: self._parse_date_part(),
 477            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 478        }
 479        FUNCTION_PARSERS.pop("TRIM")
 480
 481        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 482
 483        RANGE_PARSERS = {
 484            **parser.Parser.RANGE_PARSERS,
 485            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 486            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 487        }
 488
 489        ALTER_PARSERS = {
 490            **parser.Parser.ALTER_PARSERS,
 491            "UNSET": lambda self: self.expression(
 492                exp.Set,
 493                tag=self._match_text_seq("TAG"),
 494                expressions=self._parse_csv(self._parse_id_var),
 495                unset=True,
 496            ),
 497        }
 498
 499        STATEMENT_PARSERS = {
 500            **parser.Parser.STATEMENT_PARSERS,
 501            TokenType.SHOW: lambda self: self._parse_show(),
 502        }
 503
 504        PROPERTY_PARSERS = {
 505            **parser.Parser.PROPERTY_PARSERS,
 506            "LOCATION": lambda self: self._parse_location_property(),
 507            "TAG": lambda self: self._parse_tag(),
 508        }
 509
 510        TYPE_CONVERTERS = {
 511            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 512            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 513        }
 514
 515        SHOW_PARSERS = {
 516            "SCHEMAS": _show_parser("SCHEMAS"),
 517            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 518            "OBJECTS": _show_parser("OBJECTS"),
 519            "TERSE OBJECTS": _show_parser("OBJECTS"),
 520            "TABLES": _show_parser("TABLES"),
 521            "TERSE TABLES": _show_parser("TABLES"),
 522            "VIEWS": _show_parser("VIEWS"),
 523            "TERSE VIEWS": _show_parser("VIEWS"),
 524            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 525            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 526            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 527            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 528            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 529            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 530            "SEQUENCES": _show_parser("SEQUENCES"),
 531            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 532            "COLUMNS": _show_parser("COLUMNS"),
 533            "USERS": _show_parser("USERS"),
 534            "TERSE USERS": _show_parser("USERS"),
 535        }
 536
 537        CONSTRAINT_PARSERS = {
 538            **parser.Parser.CONSTRAINT_PARSERS,
 539            "WITH": lambda self: self._parse_with_constraint(),
 540            "MASKING": lambda self: self._parse_with_constraint(),
 541            "PROJECTION": lambda self: self._parse_with_constraint(),
 542            "TAG": lambda self: self._parse_with_constraint(),
 543        }
 544
 545        STAGED_FILE_SINGLE_TOKENS = {
 546            TokenType.DOT,
 547            TokenType.MOD,
 548            TokenType.SLASH,
 549        }
 550
 551        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 552
 553        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 554
 555        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 556
 557        LAMBDAS = {
 558            **parser.Parser.LAMBDAS,
 559            TokenType.ARROW: lambda self, expressions: self.expression(
 560                exp.Lambda,
 561                this=self._replace_lambda(
 562                    self._parse_assignment(),
 563                    expressions,
 564                ),
 565                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 566            ),
 567        }
 568
 569        def _negate_range(
 570            self, this: t.Optional[exp.Expression] = None
 571        ) -> t.Optional[exp.Expression]:
 572            if not this:
 573                return this
 574
 575            query = this.args.get("query")
 576            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 577                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 578                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 579                # which can produce different results (most likely a SnowFlake bug).
 580                #
 581                # https://docs.snowflake.com/en/sql-reference/functions/in
 582                # Context: https://github.com/tobymao/sqlglot/issues/3890
 583                return self.expression(
 584                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 585                )
 586
 587            return self.expression(exp.Not, this=this)
 588
 589        def _parse_tag(self) -> exp.Tags:
 590            return self.expression(
 591                exp.Tags,
 592                expressions=self._parse_wrapped_csv(self._parse_property),
 593            )
 594
 595        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 596            if self._prev.token_type != TokenType.WITH:
 597                self._retreat(self._index - 1)
 598
 599            if self._match_text_seq("MASKING", "POLICY"):
 600                policy = self._parse_column()
 601                return self.expression(
 602                    exp.MaskingPolicyColumnConstraint,
 603                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 604                    expressions=self._match(TokenType.USING)
 605                    and self._parse_wrapped_csv(self._parse_id_var),
 606                )
 607            if self._match_text_seq("PROJECTION", "POLICY"):
 608                policy = self._parse_column()
 609                return self.expression(
 610                    exp.ProjectionPolicyColumnConstraint,
 611                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 612                )
 613            if self._match(TokenType.TAG):
 614                return self._parse_tag()
 615
 616            return None
 617
 618        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 619            if self._match(TokenType.TAG):
 620                return self._parse_tag()
 621
 622            return super()._parse_with_property()
 623
 624        def _parse_create(self) -> exp.Create | exp.Command:
 625            expression = super()._parse_create()
 626            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 627                # Replace the Table node with the enclosed Identifier
 628                expression.this.replace(expression.this.this)
 629
 630            return expression
 631
 632        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 633        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 634        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 635            this = self._parse_var() or self._parse_type()
 636
 637            if not this:
 638                return None
 639
 640            self._match(TokenType.COMMA)
 641            expression = self._parse_bitwise()
 642            this = map_date_part(this)
 643            name = this.name.upper()
 644
 645            if name.startswith("EPOCH"):
 646                if name == "EPOCH_MILLISECOND":
 647                    scale = 10**3
 648                elif name == "EPOCH_MICROSECOND":
 649                    scale = 10**6
 650                elif name == "EPOCH_NANOSECOND":
 651                    scale = 10**9
 652                else:
 653                    scale = None
 654
 655                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 656                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 657
 658                if scale:
 659                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 660
 661                return to_unix
 662
 663            return self.expression(exp.Extract, this=this, expression=expression)
 664
 665        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 666            if is_map:
 667                # Keys are strings in Snowflake's objects, see also:
 668                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 669                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 670                return self._parse_slice(self._parse_string())
 671
 672            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 673
 674        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 675            lateral = super()._parse_lateral()
 676            if not lateral:
 677                return lateral
 678
 679            if isinstance(lateral.this, exp.Explode):
 680                table_alias = lateral.args.get("alias")
 681                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 682                if table_alias and not table_alias.args.get("columns"):
 683                    table_alias.set("columns", columns)
 684                elif not table_alias:
 685                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 686
 687            return lateral
 688
 689        def _parse_table_parts(
 690            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 691        ) -> exp.Table:
 692            # https://docs.snowflake.com/en/user-guide/querying-stage
 693            if self._match(TokenType.STRING, advance=False):
 694                table = self._parse_string()
 695            elif self._match_text_seq("@", advance=False):
 696                table = self._parse_location_path()
 697            else:
 698                table = None
 699
 700            if table:
 701                file_format = None
 702                pattern = None
 703
 704                wrapped = self._match(TokenType.L_PAREN)
 705                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 706                    if self._match_text_seq("FILE_FORMAT", "=>"):
 707                        file_format = self._parse_string() or super()._parse_table_parts(
 708                            is_db_reference=is_db_reference
 709                        )
 710                    elif self._match_text_seq("PATTERN", "=>"):
 711                        pattern = self._parse_string()
 712                    else:
 713                        break
 714
 715                    self._match(TokenType.COMMA)
 716
 717                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 718            else:
 719                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 720
 721            return table
 722
 723        def _parse_id_var(
 724            self,
 725            any_token: bool = True,
 726            tokens: t.Optional[t.Collection[TokenType]] = None,
 727        ) -> t.Optional[exp.Expression]:
 728            if self._match_text_seq("IDENTIFIER", "("):
 729                identifier = (
 730                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 731                    or self._parse_string()
 732                )
 733                self._match_r_paren()
 734                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 735
 736            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 737
 738        def _parse_show_snowflake(self, this: str) -> exp.Show:
 739            scope = None
 740            scope_kind = None
 741
 742            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 743            # which is syntactically valid but has no effect on the output
 744            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 745
 746            history = self._match_text_seq("HISTORY")
 747
 748            like = self._parse_string() if self._match(TokenType.LIKE) else None
 749
 750            if self._match(TokenType.IN):
 751                if self._match_text_seq("ACCOUNT"):
 752                    scope_kind = "ACCOUNT"
 753                elif self._match_set(self.DB_CREATABLES):
 754                    scope_kind = self._prev.text.upper()
 755                    if self._curr:
 756                        scope = self._parse_table_parts()
 757                elif self._curr:
 758                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 759                    scope = self._parse_table_parts()
 760
 761            return self.expression(
 762                exp.Show,
 763                **{
 764                    "terse": terse,
 765                    "this": this,
 766                    "history": history,
 767                    "like": like,
 768                    "scope": scope,
 769                    "scope_kind": scope_kind,
 770                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 771                    "limit": self._parse_limit(),
 772                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 773                },
 774            )
 775
 776        def _parse_location_property(self) -> exp.LocationProperty:
 777            self._match(TokenType.EQ)
 778            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 779
 780        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 781            # Parse either a subquery or a staged file
 782            return (
 783                self._parse_select(table=True, parse_subquery_alias=False)
 784                if self._match(TokenType.L_PAREN, advance=False)
 785                else self._parse_table_parts()
 786            )
 787
 788        def _parse_location_path(self) -> exp.Var:
 789            parts = [self._advance_any(ignore_reserved=True)]
 790
 791            # We avoid consuming a comma token because external tables like @foo and @bar
 792            # can be joined in a query with a comma separator, as well as closing paren
 793            # in case of subqueries
 794            while self._is_connected() and not self._match_set(
 795                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 796            ):
 797                parts.append(self._advance_any(ignore_reserved=True))
 798
 799            return exp.var("".join(part.text for part in parts if part))
 800
 801        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 802            this = super()._parse_lambda_arg()
 803
 804            if not this:
 805                return this
 806
 807            typ = self._parse_types()
 808
 809            if typ:
 810                return self.expression(exp.Cast, this=this, to=typ)
 811
 812            return this
 813
 814        def _parse_foreign_key(self) -> exp.ForeignKey:
 815            # inlineFK, the REFERENCES columns are implied
 816            if self._match(TokenType.REFERENCES, advance=False):
 817                return self.expression(exp.ForeignKey)
 818
 819            # outoflineFK, explicitly names the columns
 820            return super()._parse_foreign_key()
 821
 822    class Tokenizer(tokens.Tokenizer):
 823        STRING_ESCAPES = ["\\", "'"]
 824        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 825        RAW_STRINGS = ["$$"]
 826        COMMENTS = ["--", "//", ("/*", "*/")]
 827        NESTED_COMMENTS = False
 828
 829        KEYWORDS = {
 830            **tokens.Tokenizer.KEYWORDS,
 831            "BYTEINT": TokenType.INT,
 832            "CHAR VARYING": TokenType.VARCHAR,
 833            "CHARACTER VARYING": TokenType.VARCHAR,
 834            "EXCLUDE": TokenType.EXCEPT,
 835            "ILIKE ANY": TokenType.ILIKE_ANY,
 836            "LIKE ANY": TokenType.LIKE_ANY,
 837            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 838            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 839            "MINUS": TokenType.EXCEPT,
 840            "NCHAR VARYING": TokenType.VARCHAR,
 841            "PUT": TokenType.COMMAND,
 842            "REMOVE": TokenType.COMMAND,
 843            "RM": TokenType.COMMAND,
 844            "SAMPLE": TokenType.TABLE_SAMPLE,
 845            "SQL_DOUBLE": TokenType.DOUBLE,
 846            "SQL_VARCHAR": TokenType.VARCHAR,
 847            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 848            "TAG": TokenType.TAG,
 849            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 850            "TOP": TokenType.TOP,
 851            "WAREHOUSE": TokenType.WAREHOUSE,
 852            "STREAMLIT": TokenType.STREAMLIT,
 853        }
 854        KEYWORDS.pop("/*+")
 855
 856        SINGLE_TOKENS = {
 857            **tokens.Tokenizer.SINGLE_TOKENS,
 858            "$": TokenType.PARAMETER,
 859        }
 860
 861        VAR_SINGLE_TOKENS = {"$"}
 862
 863        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 864
 865    class Generator(generator.Generator):
 866        PARAMETER_TOKEN = "$"
 867        MATCHED_BY_SOURCE = False
 868        SINGLE_STRING_INTERVAL = True
 869        JOIN_HINTS = False
 870        TABLE_HINTS = False
 871        QUERY_HINTS = False
 872        AGGREGATE_FILTER_SUPPORTED = False
 873        SUPPORTS_TABLE_COPY = False
 874        COLLATE_IS_FUNC = True
 875        LIMIT_ONLY_LITERALS = True
 876        JSON_KEY_VALUE_PAIR_SEP = ","
 877        INSERT_OVERWRITE = " OVERWRITE INTO"
 878        STRUCT_DELIMITER = ("(", ")")
 879        COPY_PARAMS_ARE_WRAPPED = False
 880        COPY_PARAMS_EQ_REQUIRED = True
 881        STAR_EXCEPT = "EXCLUDE"
 882        SUPPORTS_EXPLODING_PROJECTIONS = False
 883        ARRAY_CONCAT_IS_VAR_LEN = False
 884        SUPPORTS_CONVERT_TIMEZONE = True
 885        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 886        SUPPORTS_MEDIAN = True
 887        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 888
 889        TRANSFORMS = {
 890            **generator.Generator.TRANSFORMS,
 891            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 892            exp.ArgMax: rename_func("MAX_BY"),
 893            exp.ArgMin: rename_func("MIN_BY"),
 894            exp.Array: inline_array_sql,
 895            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 896            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 897            exp.AtTimeZone: lambda self, e: self.func(
 898                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 899            ),
 900            exp.BitwiseOr: rename_func("BITOR"),
 901            exp.BitwiseXor: rename_func("BITXOR"),
 902            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
 903            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
 904            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 905            exp.DateAdd: date_delta_sql("DATEADD"),
 906            exp.DateDiff: date_delta_sql("DATEDIFF"),
 907            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 908            exp.DatetimeDiff: timestampdiff_sql,
 909            exp.DateStrToDate: datestrtodate_sql,
 910            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 911            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 912            exp.DayOfYear: rename_func("DAYOFYEAR"),
 913            exp.Explode: rename_func("FLATTEN"),
 914            exp.Extract: rename_func("DATE_PART"),
 915            exp.FromTimeZone: lambda self, e: self.func(
 916                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 917            ),
 918            exp.GenerateSeries: lambda self, e: self.func(
 919                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 920            ),
 921            exp.GroupConcat: rename_func("LISTAGG"),
 922            exp.If: if_sql(name="IFF", false_value="NULL"),
 923            exp.JSONExtractArray: _json_extract_value_array_sql,
 924            exp.JSONExtractScalar: lambda self, e: self.func(
 925                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 926            ),
 927            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 928            exp.JSONPathRoot: lambda *_: "",
 929            exp.JSONValueArray: _json_extract_value_array_sql,
 930            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 931            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 932            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 933            exp.MakeInterval: no_make_interval_sql,
 934            exp.Max: max_or_greatest,
 935            exp.Min: min_or_least,
 936            exp.ParseJSON: lambda self, e: self.func(
 937                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 938            ),
 939            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 940            exp.PercentileCont: transforms.preprocess(
 941                [transforms.add_within_group_for_percentiles]
 942            ),
 943            exp.PercentileDisc: transforms.preprocess(
 944                [transforms.add_within_group_for_percentiles]
 945            ),
 946            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 947            exp.RegexpExtract: _regexpextract_sql,
 948            exp.RegexpExtractAll: _regexpextract_sql,
 949            exp.RegexpILike: _regexpilike_sql,
 950            exp.Rand: rename_func("RANDOM"),
 951            exp.Select: transforms.preprocess(
 952                [
 953                    transforms.eliminate_distinct_on,
 954                    transforms.explode_to_unnest(),
 955                    transforms.eliminate_semi_and_anti_joins,
 956                    _transform_generate_date_array,
 957                ]
 958            ),
 959            exp.SHA: rename_func("SHA1"),
 960            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 961            exp.StartsWith: rename_func("STARTSWITH"),
 962            exp.StrPosition: lambda self, e: strposition_sql(
 963                self, e, func_name="CHARINDEX", supports_position=True
 964            ),
 965            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 966            exp.Stuff: rename_func("INSERT"),
 967            exp.TimeAdd: date_delta_sql("TIMEADD"),
 968            exp.Timestamp: no_timestamp_sql,
 969            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 970            exp.TimestampDiff: lambda self, e: self.func(
 971                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 972            ),
 973            exp.TimestampTrunc: timestamptrunc_sql(),
 974            exp.TimeStrToTime: timestrtotime_sql,
 975            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 976            exp.ToArray: rename_func("TO_ARRAY"),
 977            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 978            exp.ToDouble: rename_func("TO_DOUBLE"),
 979            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 980            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 981            exp.TsOrDsToDate: lambda self, e: self.func(
 982                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 983            ),
 984            exp.TsOrDsToTime: lambda self, e: self.func(
 985                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
 986            ),
 987            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 988            exp.Uuid: rename_func("UUID_STRING"),
 989            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 990            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 991            exp.Xor: rename_func("BOOLXOR"),
 992            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 993                rename_func("EDITDISTANCE")
 994            ),
 995        }
 996
 997        SUPPORTED_JSON_PATH_PARTS = {
 998            exp.JSONPathKey,
 999            exp.JSONPathRoot,
1000            exp.JSONPathSubscript,
1001        }
1002
1003        TYPE_MAPPING = {
1004            **generator.Generator.TYPE_MAPPING,
1005            exp.DataType.Type.NESTED: "OBJECT",
1006            exp.DataType.Type.STRUCT: "OBJECT",
1007        }
1008
1009        PROPERTIES_LOCATION = {
1010            **generator.Generator.PROPERTIES_LOCATION,
1011            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1012            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1013        }
1014
1015        UNSUPPORTED_VALUES_EXPRESSIONS = {
1016            exp.Map,
1017            exp.StarMap,
1018            exp.Struct,
1019            exp.VarMap,
1020        }
1021
1022        def with_properties(self, properties: exp.Properties) -> str:
1023            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1024
1025        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1026            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1027                values_as_table = False
1028
1029            return super().values_sql(expression, values_as_table=values_as_table)
1030
1031        def datatype_sql(self, expression: exp.DataType) -> str:
1032            expressions = expression.expressions
1033            if (
1034                expressions
1035                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1036                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1037            ):
1038                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1039                return "OBJECT"
1040
1041            return super().datatype_sql(expression)
1042
1043        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1044            return self.func(
1045                "TO_NUMBER",
1046                expression.this,
1047                expression.args.get("format"),
1048                expression.args.get("precision"),
1049                expression.args.get("scale"),
1050            )
1051
1052        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1053            milli = expression.args.get("milli")
1054            if milli is not None:
1055                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1056                expression.set("nano", milli_to_nano)
1057
1058            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1059
1060        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1061            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1062                return self.func("TO_GEOGRAPHY", expression.this)
1063            if expression.is_type(exp.DataType.Type.GEOMETRY):
1064                return self.func("TO_GEOMETRY", expression.this)
1065
1066            return super().cast_sql(expression, safe_prefix=safe_prefix)
1067
1068        def trycast_sql(self, expression: exp.TryCast) -> str:
1069            value = expression.this
1070
1071            if value.type is None:
1072                from sqlglot.optimizer.annotate_types import annotate_types
1073
1074                value = annotate_types(value)
1075
1076            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1077                return super().trycast_sql(expression)
1078
1079            # TRY_CAST only works for string values in Snowflake
1080            return self.cast_sql(expression)
1081
1082        def log_sql(self, expression: exp.Log) -> str:
1083            if not expression.expression:
1084                return self.func("LN", expression.this)
1085
1086            return super().log_sql(expression)
1087
1088        def unnest_sql(self, expression: exp.Unnest) -> str:
1089            unnest_alias = expression.args.get("alias")
1090            offset = expression.args.get("offset")
1091
1092            columns = [
1093                exp.to_identifier("seq"),
1094                exp.to_identifier("key"),
1095                exp.to_identifier("path"),
1096                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1097                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1098                or exp.to_identifier("value"),
1099                exp.to_identifier("this"),
1100            ]
1101
1102            if unnest_alias:
1103                unnest_alias.set("columns", columns)
1104            else:
1105                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1106
1107            table_input = self.sql(expression.expressions[0])
1108            if not table_input.startswith("INPUT =>"):
1109                table_input = f"INPUT => {table_input}"
1110
1111            explode = f"TABLE(FLATTEN({table_input}))"
1112            alias = self.sql(unnest_alias)
1113            alias = f" AS {alias}" if alias else ""
1114            return f"{explode}{alias}"
1115
1116        def show_sql(self, expression: exp.Show) -> str:
1117            terse = "TERSE " if expression.args.get("terse") else ""
1118            history = " HISTORY" if expression.args.get("history") else ""
1119            like = self.sql(expression, "like")
1120            like = f" LIKE {like}" if like else ""
1121
1122            scope = self.sql(expression, "scope")
1123            scope = f" {scope}" if scope else ""
1124
1125            scope_kind = self.sql(expression, "scope_kind")
1126            if scope_kind:
1127                scope_kind = f" IN {scope_kind}"
1128
1129            starts_with = self.sql(expression, "starts_with")
1130            if starts_with:
1131                starts_with = f" STARTS WITH {starts_with}"
1132
1133            limit = self.sql(expression, "limit")
1134
1135            from_ = self.sql(expression, "from")
1136            if from_:
1137                from_ = f" FROM {from_}"
1138
1139            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1140
1141        def describe_sql(self, expression: exp.Describe) -> str:
1142            # Default to table if kind is unknown
1143            kind_value = expression.args.get("kind") or "TABLE"
1144            kind = f" {kind_value}" if kind_value else ""
1145            this = f" {self.sql(expression, 'this')}"
1146            expressions = self.expressions(expression, flat=True)
1147            expressions = f" {expressions}" if expressions else ""
1148            return f"DESCRIBE{kind}{this}{expressions}"
1149
1150        def generatedasidentitycolumnconstraint_sql(
1151            self, expression: exp.GeneratedAsIdentityColumnConstraint
1152        ) -> str:
1153            start = expression.args.get("start")
1154            start = f" START {start}" if start else ""
1155            increment = expression.args.get("increment")
1156            increment = f" INCREMENT {increment}" if increment else ""
1157            return f"AUTOINCREMENT{start}{increment}"
1158
1159        def cluster_sql(self, expression: exp.Cluster) -> str:
1160            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1161
1162        def struct_sql(self, expression: exp.Struct) -> str:
1163            keys = []
1164            values = []
1165
1166            for i, e in enumerate(expression.expressions):
1167                if isinstance(e, exp.PropertyEQ):
1168                    keys.append(
1169                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1170                    )
1171                    values.append(e.expression)
1172                else:
1173                    keys.append(exp.Literal.string(f"_{i}"))
1174                    values.append(e)
1175
1176            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1177
1178        @unsupported_args("weight", "accuracy")
1179        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1180            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1181
1182        def alterset_sql(self, expression: exp.AlterSet) -> str:
1183            exprs = self.expressions(expression, flat=True)
1184            exprs = f" {exprs}" if exprs else ""
1185            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1186            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1187            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1188            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1189            tag = self.expressions(expression, key="tag", flat=True)
1190            tag = f" TAG {tag}" if tag else ""
1191
1192            return f"SET{exprs}{file_format}{copy_options}{tag}"
1193
1194        def strtotime_sql(self, expression: exp.StrToTime):
1195            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1196            return self.func(
1197                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1198            )
1199
1200        def timestampsub_sql(self, expression: exp.TimestampSub):
1201            return self.sql(
1202                exp.TimestampAdd(
1203                    this=expression.this,
1204                    expression=expression.expression * -1,
1205                    unit=expression.unit,
1206                )
1207            )
1208
1209        def jsonextract_sql(self, expression: exp.JSONExtract):
1210            this = expression.this
1211
1212            # JSON strings are valid coming from other dialects such as BQ
1213            return self.func(
1214                "GET_PATH",
1215                exp.ParseJSON(this=this) if this.is_string else this,
1216                expression.expression,
1217            )
1218
1219        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1220            this = expression.this
1221            if not isinstance(this, exp.TsOrDsToTimestamp):
1222                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1223
1224            return self.func("TO_CHAR", this, self.format_time(expression))
1225
1226        def datesub_sql(self, expression: exp.DateSub) -> str:
1227            value = expression.expression
1228            if value:
1229                value.replace(value * (-1))
1230            else:
1231                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1232
1233            return date_delta_sql("DATEADD")(self, expression)
1234
1235        def select_sql(self, expression: exp.Select) -> str:
1236            limit = expression.args.get("limit")
1237            offset = expression.args.get("offset")
1238            if offset and not limit:
1239                expression.limit(exp.Null(), copy=False)
1240            return super().select_sql(expression)
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
366    def quote_identifier(self, expression: E, identify: bool = True) -> E:
367        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
368        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
369        if (
370            isinstance(expression, exp.Identifier)
371            and isinstance(expression.parent, exp.Table)
372            and expression.name.lower() == "dual"
373        ):
374            return expression  # type: ignore
375
376        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'Snowflake.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {'6': {0: True}}}, 'f': {'f': {'6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.JSONPathTokenizer(sqlglot.jsonpath.JSONPathTokenizer):
378    class JSONPathTokenizer(jsonpath.JSONPathTokenizer):
379        SINGLE_TOKENS = jsonpath.JSONPathTokenizer.SINGLE_TOKENS.copy()
380        SINGLE_TOKENS.pop("$")
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '-': <TokenType.DASH: 'DASH'>, '.': <TokenType.DOT: 'DOT'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '"': <TokenType.QUOTE: 'QUOTE'>, '*': <TokenType.STAR: 'STAR'>}
class Snowflake.Parser(sqlglot.parser.Parser):
382    class Parser(parser.Parser):
383        IDENTIFY_PIVOT_STRINGS = True
384        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
385        COLON_IS_VARIANT_EXTRACT = True
386
387        ID_VAR_TOKENS = {
388            *parser.Parser.ID_VAR_TOKENS,
389            TokenType.MATCH_CONDITION,
390        }
391
392        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
393        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
394
395        FUNCTIONS = {
396            **parser.Parser.FUNCTIONS,
397            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
398            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
399            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
400                this=seq_get(args, 1), expression=seq_get(args, 0)
401            ),
402            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
403                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
404                start=seq_get(args, 0),
405                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
406                step=seq_get(args, 2),
407            ),
408            "BITXOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
409            "BIT_XOR": _build_bitwise(exp.BitwiseXor, "BITXOR"),
410            "BITOR": _build_bitwise(exp.BitwiseOr, "BITOR"),
411            "BIT_OR": _build_bitwise(exp.BitwiseOr, "BITOR"),
412            "BITSHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BITSHIFTLEFT"),
413            "BIT_SHIFTLEFT": _build_bitwise(exp.BitwiseLeftShift, "BIT_SHIFTLEFT"),
414            "BITSHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BITSHIFTRIGHT"),
415            "BIT_SHIFTRIGHT": _build_bitwise(exp.BitwiseRightShift, "BIT_SHIFTRIGHT"),
416            "BOOLXOR": _build_bitwise(exp.Xor, "BOOLXOR"),
417            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
418            "DATE_TRUNC": _date_trunc_to_time,
419            "DATEADD": _build_date_time_add(exp.DateAdd),
420            "DATEDIFF": _build_datediff,
421            "DIV0": _build_if_from_div0,
422            "EDITDISTANCE": lambda args: exp.Levenshtein(
423                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
424            ),
425            "FLATTEN": exp.Explode.from_arg_list,
426            "GET_PATH": lambda args, dialect: exp.JSONExtract(
427                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
428            ),
429            "IFF": exp.If.from_arg_list,
430            "LAST_DAY": lambda args: exp.LastDay(
431                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
432            ),
433            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
434            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
435            "LISTAGG": exp.GroupConcat.from_arg_list,
436            "NULLIFZERO": _build_if_from_nullifzero,
437            "OBJECT_CONSTRUCT": _build_object_construct,
438            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
439            "REGEXP_REPLACE": _build_regexp_replace,
440            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
441            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
442            "RLIKE": exp.RegexpLike.from_arg_list,
443            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
444            "TIMEADD": _build_date_time_add(exp.TimeAdd),
445            "TIMEDIFF": _build_datediff,
446            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
447            "TIMESTAMPDIFF": _build_datediff,
448            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
449            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
450            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
451            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
452            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
453            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
454            "TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
455            "TRY_TO_TIMESTAMP": _build_datetime(
456                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
457            ),
458            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
459            "TO_NUMBER": lambda args: exp.ToNumber(
460                this=seq_get(args, 0),
461                format=seq_get(args, 1),
462                precision=seq_get(args, 2),
463                scale=seq_get(args, 3),
464            ),
465            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
466            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
467            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
468            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
469            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
470            "TO_VARCHAR": exp.ToChar.from_arg_list,
471            "ZEROIFNULL": _build_if_from_zeroifnull,
472        }
473
474        FUNCTION_PARSERS = {
475            **parser.Parser.FUNCTION_PARSERS,
476            "DATE_PART": lambda self: self._parse_date_part(),
477            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
478        }
479        FUNCTION_PARSERS.pop("TRIM")
480
481        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
482
483        RANGE_PARSERS = {
484            **parser.Parser.RANGE_PARSERS,
485            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
486            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
487        }
488
489        ALTER_PARSERS = {
490            **parser.Parser.ALTER_PARSERS,
491            "UNSET": lambda self: self.expression(
492                exp.Set,
493                tag=self._match_text_seq("TAG"),
494                expressions=self._parse_csv(self._parse_id_var),
495                unset=True,
496            ),
497        }
498
499        STATEMENT_PARSERS = {
500            **parser.Parser.STATEMENT_PARSERS,
501            TokenType.SHOW: lambda self: self._parse_show(),
502        }
503
504        PROPERTY_PARSERS = {
505            **parser.Parser.PROPERTY_PARSERS,
506            "LOCATION": lambda self: self._parse_location_property(),
507            "TAG": lambda self: self._parse_tag(),
508        }
509
510        TYPE_CONVERTERS = {
511            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
512            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
513        }
514
515        SHOW_PARSERS = {
516            "SCHEMAS": _show_parser("SCHEMAS"),
517            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
518            "OBJECTS": _show_parser("OBJECTS"),
519            "TERSE OBJECTS": _show_parser("OBJECTS"),
520            "TABLES": _show_parser("TABLES"),
521            "TERSE TABLES": _show_parser("TABLES"),
522            "VIEWS": _show_parser("VIEWS"),
523            "TERSE VIEWS": _show_parser("VIEWS"),
524            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
525            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
526            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
527            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
528            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
529            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
530            "SEQUENCES": _show_parser("SEQUENCES"),
531            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
532            "COLUMNS": _show_parser("COLUMNS"),
533            "USERS": _show_parser("USERS"),
534            "TERSE USERS": _show_parser("USERS"),
535        }
536
537        CONSTRAINT_PARSERS = {
538            **parser.Parser.CONSTRAINT_PARSERS,
539            "WITH": lambda self: self._parse_with_constraint(),
540            "MASKING": lambda self: self._parse_with_constraint(),
541            "PROJECTION": lambda self: self._parse_with_constraint(),
542            "TAG": lambda self: self._parse_with_constraint(),
543        }
544
545        STAGED_FILE_SINGLE_TOKENS = {
546            TokenType.DOT,
547            TokenType.MOD,
548            TokenType.SLASH,
549        }
550
551        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
552
553        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
554
555        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
556
557        LAMBDAS = {
558            **parser.Parser.LAMBDAS,
559            TokenType.ARROW: lambda self, expressions: self.expression(
560                exp.Lambda,
561                this=self._replace_lambda(
562                    self._parse_assignment(),
563                    expressions,
564                ),
565                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
566            ),
567        }
568
569        def _negate_range(
570            self, this: t.Optional[exp.Expression] = None
571        ) -> t.Optional[exp.Expression]:
572            if not this:
573                return this
574
575            query = this.args.get("query")
576            if isinstance(this, exp.In) and isinstance(query, exp.Query):
577                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
578                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
579                # which can produce different results (most likely a SnowFlake bug).
580                #
581                # https://docs.snowflake.com/en/sql-reference/functions/in
582                # Context: https://github.com/tobymao/sqlglot/issues/3890
583                return self.expression(
584                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
585                )
586
587            return self.expression(exp.Not, this=this)
588
589        def _parse_tag(self) -> exp.Tags:
590            return self.expression(
591                exp.Tags,
592                expressions=self._parse_wrapped_csv(self._parse_property),
593            )
594
595        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
596            if self._prev.token_type != TokenType.WITH:
597                self._retreat(self._index - 1)
598
599            if self._match_text_seq("MASKING", "POLICY"):
600                policy = self._parse_column()
601                return self.expression(
602                    exp.MaskingPolicyColumnConstraint,
603                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
604                    expressions=self._match(TokenType.USING)
605                    and self._parse_wrapped_csv(self._parse_id_var),
606                )
607            if self._match_text_seq("PROJECTION", "POLICY"):
608                policy = self._parse_column()
609                return self.expression(
610                    exp.ProjectionPolicyColumnConstraint,
611                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
612                )
613            if self._match(TokenType.TAG):
614                return self._parse_tag()
615
616            return None
617
618        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
619            if self._match(TokenType.TAG):
620                return self._parse_tag()
621
622            return super()._parse_with_property()
623
624        def _parse_create(self) -> exp.Create | exp.Command:
625            expression = super()._parse_create()
626            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
627                # Replace the Table node with the enclosed Identifier
628                expression.this.replace(expression.this.this)
629
630            return expression
631
632        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
633        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
634        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
635            this = self._parse_var() or self._parse_type()
636
637            if not this:
638                return None
639
640            self._match(TokenType.COMMA)
641            expression = self._parse_bitwise()
642            this = map_date_part(this)
643            name = this.name.upper()
644
645            if name.startswith("EPOCH"):
646                if name == "EPOCH_MILLISECOND":
647                    scale = 10**3
648                elif name == "EPOCH_MICROSECOND":
649                    scale = 10**6
650                elif name == "EPOCH_NANOSECOND":
651                    scale = 10**9
652                else:
653                    scale = None
654
655                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
656                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
657
658                if scale:
659                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
660
661                return to_unix
662
663            return self.expression(exp.Extract, this=this, expression=expression)
664
665        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
666            if is_map:
667                # Keys are strings in Snowflake's objects, see also:
668                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
669                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
670                return self._parse_slice(self._parse_string())
671
672            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
673
674        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
675            lateral = super()._parse_lateral()
676            if not lateral:
677                return lateral
678
679            if isinstance(lateral.this, exp.Explode):
680                table_alias = lateral.args.get("alias")
681                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
682                if table_alias and not table_alias.args.get("columns"):
683                    table_alias.set("columns", columns)
684                elif not table_alias:
685                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
686
687            return lateral
688
689        def _parse_table_parts(
690            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
691        ) -> exp.Table:
692            # https://docs.snowflake.com/en/user-guide/querying-stage
693            if self._match(TokenType.STRING, advance=False):
694                table = self._parse_string()
695            elif self._match_text_seq("@", advance=False):
696                table = self._parse_location_path()
697            else:
698                table = None
699
700            if table:
701                file_format = None
702                pattern = None
703
704                wrapped = self._match(TokenType.L_PAREN)
705                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
706                    if self._match_text_seq("FILE_FORMAT", "=>"):
707                        file_format = self._parse_string() or super()._parse_table_parts(
708                            is_db_reference=is_db_reference
709                        )
710                    elif self._match_text_seq("PATTERN", "=>"):
711                        pattern = self._parse_string()
712                    else:
713                        break
714
715                    self._match(TokenType.COMMA)
716
717                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
718            else:
719                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
720
721            return table
722
723        def _parse_id_var(
724            self,
725            any_token: bool = True,
726            tokens: t.Optional[t.Collection[TokenType]] = None,
727        ) -> t.Optional[exp.Expression]:
728            if self._match_text_seq("IDENTIFIER", "("):
729                identifier = (
730                    super()._parse_id_var(any_token=any_token, tokens=tokens)
731                    or self._parse_string()
732                )
733                self._match_r_paren()
734                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
735
736            return super()._parse_id_var(any_token=any_token, tokens=tokens)
737
738        def _parse_show_snowflake(self, this: str) -> exp.Show:
739            scope = None
740            scope_kind = None
741
742            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
743            # which is syntactically valid but has no effect on the output
744            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
745
746            history = self._match_text_seq("HISTORY")
747
748            like = self._parse_string() if self._match(TokenType.LIKE) else None
749
750            if self._match(TokenType.IN):
751                if self._match_text_seq("ACCOUNT"):
752                    scope_kind = "ACCOUNT"
753                elif self._match_set(self.DB_CREATABLES):
754                    scope_kind = self._prev.text.upper()
755                    if self._curr:
756                        scope = self._parse_table_parts()
757                elif self._curr:
758                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
759                    scope = self._parse_table_parts()
760
761            return self.expression(
762                exp.Show,
763                **{
764                    "terse": terse,
765                    "this": this,
766                    "history": history,
767                    "like": like,
768                    "scope": scope,
769                    "scope_kind": scope_kind,
770                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
771                    "limit": self._parse_limit(),
772                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
773                },
774            )
775
776        def _parse_location_property(self) -> exp.LocationProperty:
777            self._match(TokenType.EQ)
778            return self.expression(exp.LocationProperty, this=self._parse_location_path())
779
780        def _parse_file_location(self) -> t.Optional[exp.Expression]:
781            # Parse either a subquery or a staged file
782            return (
783                self._parse_select(table=True, parse_subquery_alias=False)
784                if self._match(TokenType.L_PAREN, advance=False)
785                else self._parse_table_parts()
786            )
787
788        def _parse_location_path(self) -> exp.Var:
789            parts = [self._advance_any(ignore_reserved=True)]
790
791            # We avoid consuming a comma token because external tables like @foo and @bar
792            # can be joined in a query with a comma separator, as well as closing paren
793            # in case of subqueries
794            while self._is_connected() and not self._match_set(
795                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
796            ):
797                parts.append(self._advance_any(ignore_reserved=True))
798
799            return exp.var("".join(part.text for part in parts if part))
800
801        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
802            this = super()._parse_lambda_arg()
803
804            if not this:
805                return this
806
807            typ = self._parse_types()
808
809            if typ:
810                return self.expression(exp.Cast, this=this, to=typ)
811
812            return this
813
814        def _parse_foreign_key(self) -> exp.ForeignKey:
815            # inlineFK, the REFERENCES columns are implied
816            if self._match(TokenType.REFERENCES, advance=False):
817                return self.expression(exp.ForeignKey)
818
819            # outoflineFK, explicitly names the columns
820            return super()._parse_foreign_key()

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.NEXT: 'NEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROW: 'ROW'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.POINT: 'POINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CASE: 'CASE'>, <TokenType.TIME: 'TIME'>, <TokenType.LOAD: 'LOAD'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ASC: 'ASC'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.SET: 'SET'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INT: 'INT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FULL: 'FULL'>, <TokenType.JSON: 'JSON'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.COPY: 'COPY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INT128: 'INT128'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TOP: 'TOP'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DELETE: 'DELETE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.NAME: 'NAME'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.ANY: 'ANY'>, <TokenType.RING: 'RING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.DATE: 'DATE'>, <TokenType.NULL: 'NULL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.KILL: 'KILL'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DIV: 'DIV'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.UINT: 'UINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.VAR: 'VAR'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INET: 'INET'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.FALSE: 'FALSE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DETACH: 'DETACH'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TEXT: 'TEXT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ASOF: 'ASOF'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.USE: 'USE'>, <TokenType.TAG: 'TAG'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.LIST: 'LIST'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.APPLY: 'APPLY'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DESC: 'DESC'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.END: 'END'>, <TokenType.XML: 'XML'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.IS: 'IS'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIT: 'BIT'>, <TokenType.INT256: 'INT256'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.SINK: 'SINK'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ALL: 'ALL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.MAP: 'MAP'>}
TABLE_ALIAS_TOKENS = {<TokenType.NEXT: 'NEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROW: 'ROW'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.POINT: 'POINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CASE: 'CASE'>, <TokenType.TIME: 'TIME'>, <TokenType.LOAD: 'LOAD'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ASC: 'ASC'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.SET: 'SET'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INT: 'INT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.JSON: 'JSON'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.COPY: 'COPY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INT128: 'INT128'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TOP: 'TOP'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DELETE: 'DELETE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.UINT256: 'UINT256'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.NAME: 'NAME'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.ANY: 'ANY'>, <TokenType.RING: 'RING'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DATE: 'DATE'>, <TokenType.NULL: 'NULL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.KILL: 'KILL'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DIV: 'DIV'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.UINT: 'UINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.VAR: 'VAR'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SOME: 'SOME'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INET: 'INET'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.FALSE: 'FALSE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DETACH: 'DETACH'>, <TokenType.YEAR: 'YEAR'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TEXT: 'TEXT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INDEX: 'INDEX'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.USE: 'USE'>, <TokenType.TAG: 'TAG'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.LIST: 'LIST'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DESC: 'DESC'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.END: 'END'>, <TokenType.XML: 'XML'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.IS: 'IS'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SEMI: 'SEMI'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIT: 'BIT'>, <TokenType.INT256: 'INT256'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.SUPER: 'SUPER'>, <TokenType.SINK: 'SINK'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ALL: 'ALL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MAP: 'MAP'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function _build_bitwise.<locals>._builder>, 'BIT_XOR': <function _build_bitwise.<locals>._builder>, 'BITOR': <function _build_bitwise.<locals>._builder>, 'BIT_OR': <function _build_bitwise.<locals>._builder>, 'BITSHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTLEFT': <function _build_bitwise.<locals>._builder>, 'BITSHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BIT_SHIFTRIGHT': <function _build_bitwise.<locals>._builder>, 'BOOLXOR': <function _build_bitwise.<locals>._builder>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIME': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'UNIQUE KEYS', 'SEQUENCES', 'IMPORTED KEYS', 'VIEWS', 'OBJECTS', 'TABLES'}
NON_TABLE_CREATABLES = {'STORAGE INTEGRATION', 'TAG', 'WAREHOUSE', 'STREAMLIT'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
822    class Tokenizer(tokens.Tokenizer):
823        STRING_ESCAPES = ["\\", "'"]
824        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
825        RAW_STRINGS = ["$$"]
826        COMMENTS = ["--", "//", ("/*", "*/")]
827        NESTED_COMMENTS = False
828
829        KEYWORDS = {
830            **tokens.Tokenizer.KEYWORDS,
831            "BYTEINT": TokenType.INT,
832            "CHAR VARYING": TokenType.VARCHAR,
833            "CHARACTER VARYING": TokenType.VARCHAR,
834            "EXCLUDE": TokenType.EXCEPT,
835            "ILIKE ANY": TokenType.ILIKE_ANY,
836            "LIKE ANY": TokenType.LIKE_ANY,
837            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
838            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
839            "MINUS": TokenType.EXCEPT,
840            "NCHAR VARYING": TokenType.VARCHAR,
841            "PUT": TokenType.COMMAND,
842            "REMOVE": TokenType.COMMAND,
843            "RM": TokenType.COMMAND,
844            "SAMPLE": TokenType.TABLE_SAMPLE,
845            "SQL_DOUBLE": TokenType.DOUBLE,
846            "SQL_VARCHAR": TokenType.VARCHAR,
847            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
848            "TAG": TokenType.TAG,
849            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
850            "TOP": TokenType.TOP,
851            "WAREHOUSE": TokenType.WAREHOUSE,
852            "STREAMLIT": TokenType.STREAMLIT,
853        }
854        KEYWORDS.pop("/*+")
855
856        SINGLE_TOKENS = {
857            **tokens.Tokenizer.SINGLE_TOKENS,
858            "$": TokenType.PARAMETER,
859        }
860
861        VAR_SINGLE_TOKENS = {"$"}
862
863        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 865    class Generator(generator.Generator):
 866        PARAMETER_TOKEN = "$"
 867        MATCHED_BY_SOURCE = False
 868        SINGLE_STRING_INTERVAL = True
 869        JOIN_HINTS = False
 870        TABLE_HINTS = False
 871        QUERY_HINTS = False
 872        AGGREGATE_FILTER_SUPPORTED = False
 873        SUPPORTS_TABLE_COPY = False
 874        COLLATE_IS_FUNC = True
 875        LIMIT_ONLY_LITERALS = True
 876        JSON_KEY_VALUE_PAIR_SEP = ","
 877        INSERT_OVERWRITE = " OVERWRITE INTO"
 878        STRUCT_DELIMITER = ("(", ")")
 879        COPY_PARAMS_ARE_WRAPPED = False
 880        COPY_PARAMS_EQ_REQUIRED = True
 881        STAR_EXCEPT = "EXCLUDE"
 882        SUPPORTS_EXPLODING_PROJECTIONS = False
 883        ARRAY_CONCAT_IS_VAR_LEN = False
 884        SUPPORTS_CONVERT_TIMEZONE = True
 885        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 886        SUPPORTS_MEDIAN = True
 887        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 888
 889        TRANSFORMS = {
 890            **generator.Generator.TRANSFORMS,
 891            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 892            exp.ArgMax: rename_func("MAX_BY"),
 893            exp.ArgMin: rename_func("MIN_BY"),
 894            exp.Array: inline_array_sql,
 895            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 896            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 897            exp.AtTimeZone: lambda self, e: self.func(
 898                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 899            ),
 900            exp.BitwiseOr: rename_func("BITOR"),
 901            exp.BitwiseXor: rename_func("BITXOR"),
 902            exp.BitwiseLeftShift: rename_func("BITSHIFTLEFT"),
 903            exp.BitwiseRightShift: rename_func("BITSHIFTRIGHT"),
 904            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 905            exp.DateAdd: date_delta_sql("DATEADD"),
 906            exp.DateDiff: date_delta_sql("DATEDIFF"),
 907            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 908            exp.DatetimeDiff: timestampdiff_sql,
 909            exp.DateStrToDate: datestrtodate_sql,
 910            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 911            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 912            exp.DayOfYear: rename_func("DAYOFYEAR"),
 913            exp.Explode: rename_func("FLATTEN"),
 914            exp.Extract: rename_func("DATE_PART"),
 915            exp.FromTimeZone: lambda self, e: self.func(
 916                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 917            ),
 918            exp.GenerateSeries: lambda self, e: self.func(
 919                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 920            ),
 921            exp.GroupConcat: rename_func("LISTAGG"),
 922            exp.If: if_sql(name="IFF", false_value="NULL"),
 923            exp.JSONExtractArray: _json_extract_value_array_sql,
 924            exp.JSONExtractScalar: lambda self, e: self.func(
 925                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 926            ),
 927            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 928            exp.JSONPathRoot: lambda *_: "",
 929            exp.JSONValueArray: _json_extract_value_array_sql,
 930            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 931            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 932            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 933            exp.MakeInterval: no_make_interval_sql,
 934            exp.Max: max_or_greatest,
 935            exp.Min: min_or_least,
 936            exp.ParseJSON: lambda self, e: self.func(
 937                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 938            ),
 939            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 940            exp.PercentileCont: transforms.preprocess(
 941                [transforms.add_within_group_for_percentiles]
 942            ),
 943            exp.PercentileDisc: transforms.preprocess(
 944                [transforms.add_within_group_for_percentiles]
 945            ),
 946            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 947            exp.RegexpExtract: _regexpextract_sql,
 948            exp.RegexpExtractAll: _regexpextract_sql,
 949            exp.RegexpILike: _regexpilike_sql,
 950            exp.Rand: rename_func("RANDOM"),
 951            exp.Select: transforms.preprocess(
 952                [
 953                    transforms.eliminate_distinct_on,
 954                    transforms.explode_to_unnest(),
 955                    transforms.eliminate_semi_and_anti_joins,
 956                    _transform_generate_date_array,
 957                ]
 958            ),
 959            exp.SHA: rename_func("SHA1"),
 960            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 961            exp.StartsWith: rename_func("STARTSWITH"),
 962            exp.StrPosition: lambda self, e: strposition_sql(
 963                self, e, func_name="CHARINDEX", supports_position=True
 964            ),
 965            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 966            exp.Stuff: rename_func("INSERT"),
 967            exp.TimeAdd: date_delta_sql("TIMEADD"),
 968            exp.Timestamp: no_timestamp_sql,
 969            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 970            exp.TimestampDiff: lambda self, e: self.func(
 971                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 972            ),
 973            exp.TimestampTrunc: timestamptrunc_sql(),
 974            exp.TimeStrToTime: timestrtotime_sql,
 975            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 976            exp.ToArray: rename_func("TO_ARRAY"),
 977            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 978            exp.ToDouble: rename_func("TO_DOUBLE"),
 979            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 980            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 981            exp.TsOrDsToDate: lambda self, e: self.func(
 982                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 983            ),
 984            exp.TsOrDsToTime: lambda self, e: self.func(
 985                "TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
 986            ),
 987            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 988            exp.Uuid: rename_func("UUID_STRING"),
 989            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 990            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 991            exp.Xor: rename_func("BOOLXOR"),
 992            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 993                rename_func("EDITDISTANCE")
 994            ),
 995        }
 996
 997        SUPPORTED_JSON_PATH_PARTS = {
 998            exp.JSONPathKey,
 999            exp.JSONPathRoot,
1000            exp.JSONPathSubscript,
1001        }
1002
1003        TYPE_MAPPING = {
1004            **generator.Generator.TYPE_MAPPING,
1005            exp.DataType.Type.NESTED: "OBJECT",
1006            exp.DataType.Type.STRUCT: "OBJECT",
1007        }
1008
1009        PROPERTIES_LOCATION = {
1010            **generator.Generator.PROPERTIES_LOCATION,
1011            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
1012            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1013        }
1014
1015        UNSUPPORTED_VALUES_EXPRESSIONS = {
1016            exp.Map,
1017            exp.StarMap,
1018            exp.Struct,
1019            exp.VarMap,
1020        }
1021
1022        def with_properties(self, properties: exp.Properties) -> str:
1023            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1024
1025        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1026            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1027                values_as_table = False
1028
1029            return super().values_sql(expression, values_as_table=values_as_table)
1030
1031        def datatype_sql(self, expression: exp.DataType) -> str:
1032            expressions = expression.expressions
1033            if (
1034                expressions
1035                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1036                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1037            ):
1038                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1039                return "OBJECT"
1040
1041            return super().datatype_sql(expression)
1042
1043        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1044            return self.func(
1045                "TO_NUMBER",
1046                expression.this,
1047                expression.args.get("format"),
1048                expression.args.get("precision"),
1049                expression.args.get("scale"),
1050            )
1051
1052        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1053            milli = expression.args.get("milli")
1054            if milli is not None:
1055                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1056                expression.set("nano", milli_to_nano)
1057
1058            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1059
1060        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1061            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1062                return self.func("TO_GEOGRAPHY", expression.this)
1063            if expression.is_type(exp.DataType.Type.GEOMETRY):
1064                return self.func("TO_GEOMETRY", expression.this)
1065
1066            return super().cast_sql(expression, safe_prefix=safe_prefix)
1067
1068        def trycast_sql(self, expression: exp.TryCast) -> str:
1069            value = expression.this
1070
1071            if value.type is None:
1072                from sqlglot.optimizer.annotate_types import annotate_types
1073
1074                value = annotate_types(value)
1075
1076            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1077                return super().trycast_sql(expression)
1078
1079            # TRY_CAST only works for string values in Snowflake
1080            return self.cast_sql(expression)
1081
1082        def log_sql(self, expression: exp.Log) -> str:
1083            if not expression.expression:
1084                return self.func("LN", expression.this)
1085
1086            return super().log_sql(expression)
1087
1088        def unnest_sql(self, expression: exp.Unnest) -> str:
1089            unnest_alias = expression.args.get("alias")
1090            offset = expression.args.get("offset")
1091
1092            columns = [
1093                exp.to_identifier("seq"),
1094                exp.to_identifier("key"),
1095                exp.to_identifier("path"),
1096                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1097                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1098                or exp.to_identifier("value"),
1099                exp.to_identifier("this"),
1100            ]
1101
1102            if unnest_alias:
1103                unnest_alias.set("columns", columns)
1104            else:
1105                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1106
1107            table_input = self.sql(expression.expressions[0])
1108            if not table_input.startswith("INPUT =>"):
1109                table_input = f"INPUT => {table_input}"
1110
1111            explode = f"TABLE(FLATTEN({table_input}))"
1112            alias = self.sql(unnest_alias)
1113            alias = f" AS {alias}" if alias else ""
1114            return f"{explode}{alias}"
1115
1116        def show_sql(self, expression: exp.Show) -> str:
1117            terse = "TERSE " if expression.args.get("terse") else ""
1118            history = " HISTORY" if expression.args.get("history") else ""
1119            like = self.sql(expression, "like")
1120            like = f" LIKE {like}" if like else ""
1121
1122            scope = self.sql(expression, "scope")
1123            scope = f" {scope}" if scope else ""
1124
1125            scope_kind = self.sql(expression, "scope_kind")
1126            if scope_kind:
1127                scope_kind = f" IN {scope_kind}"
1128
1129            starts_with = self.sql(expression, "starts_with")
1130            if starts_with:
1131                starts_with = f" STARTS WITH {starts_with}"
1132
1133            limit = self.sql(expression, "limit")
1134
1135            from_ = self.sql(expression, "from")
1136            if from_:
1137                from_ = f" FROM {from_}"
1138
1139            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1140
1141        def describe_sql(self, expression: exp.Describe) -> str:
1142            # Default to table if kind is unknown
1143            kind_value = expression.args.get("kind") or "TABLE"
1144            kind = f" {kind_value}" if kind_value else ""
1145            this = f" {self.sql(expression, 'this')}"
1146            expressions = self.expressions(expression, flat=True)
1147            expressions = f" {expressions}" if expressions else ""
1148            return f"DESCRIBE{kind}{this}{expressions}"
1149
1150        def generatedasidentitycolumnconstraint_sql(
1151            self, expression: exp.GeneratedAsIdentityColumnConstraint
1152        ) -> str:
1153            start = expression.args.get("start")
1154            start = f" START {start}" if start else ""
1155            increment = expression.args.get("increment")
1156            increment = f" INCREMENT {increment}" if increment else ""
1157            return f"AUTOINCREMENT{start}{increment}"
1158
1159        def cluster_sql(self, expression: exp.Cluster) -> str:
1160            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1161
1162        def struct_sql(self, expression: exp.Struct) -> str:
1163            keys = []
1164            values = []
1165
1166            for i, e in enumerate(expression.expressions):
1167                if isinstance(e, exp.PropertyEQ):
1168                    keys.append(
1169                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1170                    )
1171                    values.append(e.expression)
1172                else:
1173                    keys.append(exp.Literal.string(f"_{i}"))
1174                    values.append(e)
1175
1176            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1177
1178        @unsupported_args("weight", "accuracy")
1179        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1180            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1181
1182        def alterset_sql(self, expression: exp.AlterSet) -> str:
1183            exprs = self.expressions(expression, flat=True)
1184            exprs = f" {exprs}" if exprs else ""
1185            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1186            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1187            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1188            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1189            tag = self.expressions(expression, key="tag", flat=True)
1190            tag = f" TAG {tag}" if tag else ""
1191
1192            return f"SET{exprs}{file_format}{copy_options}{tag}"
1193
1194        def strtotime_sql(self, expression: exp.StrToTime):
1195            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1196            return self.func(
1197                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1198            )
1199
1200        def timestampsub_sql(self, expression: exp.TimestampSub):
1201            return self.sql(
1202                exp.TimestampAdd(
1203                    this=expression.this,
1204                    expression=expression.expression * -1,
1205                    unit=expression.unit,
1206                )
1207            )
1208
1209        def jsonextract_sql(self, expression: exp.JSONExtract):
1210            this = expression.this
1211
1212            # JSON strings are valid coming from other dialects such as BQ
1213            return self.func(
1214                "GET_PATH",
1215                exp.ParseJSON(this=this) if this.is_string else this,
1216                expression.expression,
1217            )
1218
1219        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1220            this = expression.this
1221            if not isinstance(this, exp.TsOrDsToTimestamp):
1222                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1223
1224            return self.func("TO_CHAR", this, self.format_time(expression))
1225
1226        def datesub_sql(self, expression: exp.DateSub) -> str:
1227            value = expression.expression
1228            if value:
1229                value.replace(value * (-1))
1230            else:
1231                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1232
1233            return date_delta_sql("DATEADD")(self, expression)
1234
1235        def select_sql(self, expression: exp.Select) -> str:
1236            limit = expression.args.get("limit")
1237            offset = expression.args.get("offset")
1238            if offset and not limit:
1239                expression.limit(exp.Null(), copy=False)
1240            return super().select_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1022        def with_properties(self, properties: exp.Properties) -> str:
1023            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
1025        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1026            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1027                values_as_table = False
1028
1029            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
1031        def datatype_sql(self, expression: exp.DataType) -> str:
1032            expressions = expression.expressions
1033            if (
1034                expressions
1035                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1036                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1037            ):
1038                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1039                return "OBJECT"
1040
1041            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1043        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1044            return self.func(
1045                "TO_NUMBER",
1046                expression.this,
1047                expression.args.get("format"),
1048                expression.args.get("precision"),
1049                expression.args.get("scale"),
1050            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1052        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1053            milli = expression.args.get("milli")
1054            if milli is not None:
1055                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1056                expression.set("nano", milli_to_nano)
1057
1058            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1060        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1061            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1062                return self.func("TO_GEOGRAPHY", expression.this)
1063            if expression.is_type(exp.DataType.Type.GEOMETRY):
1064                return self.func("TO_GEOMETRY", expression.this)
1065
1066            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1068        def trycast_sql(self, expression: exp.TryCast) -> str:
1069            value = expression.this
1070
1071            if value.type is None:
1072                from sqlglot.optimizer.annotate_types import annotate_types
1073
1074                value = annotate_types(value)
1075
1076            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1077                return super().trycast_sql(expression)
1078
1079            # TRY_CAST only works for string values in Snowflake
1080            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1082        def log_sql(self, expression: exp.Log) -> str:
1083            if not expression.expression:
1084                return self.func("LN", expression.this)
1085
1086            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1088        def unnest_sql(self, expression: exp.Unnest) -> str:
1089            unnest_alias = expression.args.get("alias")
1090            offset = expression.args.get("offset")
1091
1092            columns = [
1093                exp.to_identifier("seq"),
1094                exp.to_identifier("key"),
1095                exp.to_identifier("path"),
1096                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1097                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1098                or exp.to_identifier("value"),
1099                exp.to_identifier("this"),
1100            ]
1101
1102            if unnest_alias:
1103                unnest_alias.set("columns", columns)
1104            else:
1105                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1106
1107            table_input = self.sql(expression.expressions[0])
1108            if not table_input.startswith("INPUT =>"):
1109                table_input = f"INPUT => {table_input}"
1110
1111            explode = f"TABLE(FLATTEN({table_input}))"
1112            alias = self.sql(unnest_alias)
1113            alias = f" AS {alias}" if alias else ""
1114            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1116        def show_sql(self, expression: exp.Show) -> str:
1117            terse = "TERSE " if expression.args.get("terse") else ""
1118            history = " HISTORY" if expression.args.get("history") else ""
1119            like = self.sql(expression, "like")
1120            like = f" LIKE {like}" if like else ""
1121
1122            scope = self.sql(expression, "scope")
1123            scope = f" {scope}" if scope else ""
1124
1125            scope_kind = self.sql(expression, "scope_kind")
1126            if scope_kind:
1127                scope_kind = f" IN {scope_kind}"
1128
1129            starts_with = self.sql(expression, "starts_with")
1130            if starts_with:
1131                starts_with = f" STARTS WITH {starts_with}"
1132
1133            limit = self.sql(expression, "limit")
1134
1135            from_ = self.sql(expression, "from")
1136            if from_:
1137                from_ = f" FROM {from_}"
1138
1139            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1141        def describe_sql(self, expression: exp.Describe) -> str:
1142            # Default to table if kind is unknown
1143            kind_value = expression.args.get("kind") or "TABLE"
1144            kind = f" {kind_value}" if kind_value else ""
1145            this = f" {self.sql(expression, 'this')}"
1146            expressions = self.expressions(expression, flat=True)
1147            expressions = f" {expressions}" if expressions else ""
1148            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1150        def generatedasidentitycolumnconstraint_sql(
1151            self, expression: exp.GeneratedAsIdentityColumnConstraint
1152        ) -> str:
1153            start = expression.args.get("start")
1154            start = f" START {start}" if start else ""
1155            increment = expression.args.get("increment")
1156            increment = f" INCREMENT {increment}" if increment else ""
1157            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1159        def cluster_sql(self, expression: exp.Cluster) -> str:
1160            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1162        def struct_sql(self, expression: exp.Struct) -> str:
1163            keys = []
1164            values = []
1165
1166            for i, e in enumerate(expression.expressions):
1167                if isinstance(e, exp.PropertyEQ):
1168                    keys.append(
1169                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1170                    )
1171                    values.append(e.expression)
1172                else:
1173                    keys.append(exp.Literal.string(f"_{i}"))
1174                    values.append(e)
1175
1176            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1178        @unsupported_args("weight", "accuracy")
1179        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1180            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1182        def alterset_sql(self, expression: exp.AlterSet) -> str:
1183            exprs = self.expressions(expression, flat=True)
1184            exprs = f" {exprs}" if exprs else ""
1185            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1186            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1187            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1188            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1189            tag = self.expressions(expression, key="tag", flat=True)
1190            tag = f" TAG {tag}" if tag else ""
1191
1192            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1194        def strtotime_sql(self, expression: exp.StrToTime):
1195            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1196            return self.func(
1197                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1198            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1200        def timestampsub_sql(self, expression: exp.TimestampSub):
1201            return self.sql(
1202                exp.TimestampAdd(
1203                    this=expression.this,
1204                    expression=expression.expression * -1,
1205                    unit=expression.unit,
1206                )
1207            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1209        def jsonextract_sql(self, expression: exp.JSONExtract):
1210            this = expression.this
1211
1212            # JSON strings are valid coming from other dialects such as BQ
1213            return self.func(
1214                "GET_PATH",
1215                exp.ParseJSON(this=this) if this.is_string else this,
1216                expression.expression,
1217            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1219        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1220            this = expression.this
1221            if not isinstance(this, exp.TsOrDsToTimestamp):
1222                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1223
1224            return self.func("TO_CHAR", this, self.format_time(expression))
def datesub_sql(self, expression: sqlglot.expressions.DateSub) -> str:
1226        def datesub_sql(self, expression: exp.DateSub) -> str:
1227            value = expression.expression
1228            if value:
1229                value.replace(value * (-1))
1230            else:
1231                self.unsupported("DateSub cannot be transpiled if the subtracted count is unknown")
1232
1233            return date_delta_sql("DATEADD")(self, expression)
def select_sql(self, expression: sqlglot.expressions.Select) -> str:
1235        def select_sql(self, expression: exp.Select) -> str:
1236            limit = expression.args.get("limit")
1237            offset = expression.args.get("offset")
1238            if offset and not limit:
1239                expression.limit(exp.Null(), copy=False)
1240            return super().select_sql(expression)
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql