Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_safe_divide_sql,
  26    no_timestamp_sql,
  27    timestampdiff_sql,
  28    no_make_interval_sql,
  29)
  30from sqlglot.generator import unsupported_args
  31from sqlglot.helper import flatten, is_float, is_int, seq_get
  32from sqlglot.tokens import TokenType
  33
  34if t.TYPE_CHECKING:
  35    from sqlglot._typing import E
  36
  37
  38# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  39def _build_datetime(
  40    name: str, kind: exp.DataType.Type, safe: bool = False
  41) -> t.Callable[[t.List], exp.Func]:
  42    def _builder(args: t.List) -> exp.Func:
  43        value = seq_get(args, 0)
  44        scale_or_fmt = seq_get(args, 1)
  45
  46        int_value = value is not None and is_int(value.name)
  47        int_scale_or_fmt = scale_or_fmt is not None and scale_or_fmt.is_int
  48
  49        if isinstance(value, exp.Literal) or (value and scale_or_fmt):
  50            # Converts calls like `TO_TIME('01:02:03')` into casts
  51            if len(args) == 1 and value.is_string and not int_value:
  52                return (
  53                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  54                    if safe
  55                    else exp.cast(value, kind)
  56                )
  57
  58            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  59            # cases so we can transpile them, since they're relatively common
  60            if kind == exp.DataType.Type.TIMESTAMP:
  61                if not safe and (int_value or int_scale_or_fmt):
  62                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  63                    # it's not easily transpilable
  64                    return exp.UnixToTime(this=value, scale=scale_or_fmt)
  65                if not int_scale_or_fmt and not is_float(value.name):
  66                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  67                    expr.set("safe", safe)
  68                    return expr
  69
  70        if kind == exp.DataType.Type.DATE and not int_value:
  71            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  72            formatted_exp.set("safe", safe)
  73            return formatted_exp
  74
  75        return exp.Anonymous(this=name, expressions=args)
  76
  77    return _builder
  78
  79
  80def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  81    expression = parser.build_var_map(args)
  82
  83    if isinstance(expression, exp.StarMap):
  84        return expression
  85
  86    return exp.Struct(
  87        expressions=[
  88            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  89        ]
  90    )
  91
  92
  93def _build_datediff(args: t.List) -> exp.DateDiff:
  94    return exp.DateDiff(
  95        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  96    )
  97
  98
  99def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 100    def _builder(args: t.List) -> E:
 101        return expr_type(
 102            this=seq_get(args, 2),
 103            expression=seq_get(args, 1),
 104            unit=map_date_part(seq_get(args, 0)),
 105        )
 106
 107    return _builder
 108
 109
 110def _build_bitor(args: t.List) -> exp.BitwiseOr | exp.Anonymous:
 111    if len(args) == 3:
 112        return exp.Anonymous(this="BITOR", expressions=args)
 113
 114    return binary_from_function(exp.BitwiseOr)(args)
 115
 116
 117# https://docs.snowflake.com/en/sql-reference/functions/div0
 118def _build_if_from_div0(args: t.List) -> exp.If:
 119    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 120    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 121
 122    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 123        exp.Is(this=lhs, expression=exp.null()).not_()
 124    )
 125    true = exp.Literal.number(0)
 126    false = exp.Div(this=lhs, expression=rhs)
 127    return exp.If(this=cond, true=true, false=false)
 128
 129
 130# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 131def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 132    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 133    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 134
 135
 136# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 137def _build_if_from_nullifzero(args: t.List) -> exp.If:
 138    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 139    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 140
 141
 142def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 143    flag = expression.text("flag")
 144
 145    if "i" not in flag:
 146        flag += "i"
 147
 148    return self.func(
 149        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 150    )
 151
 152
 153def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 154    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 155
 156    if not regexp_replace.args.get("replacement"):
 157        regexp_replace.set("replacement", exp.Literal.string(""))
 158
 159    return regexp_replace
 160
 161
 162def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 163    def _parse(self: Snowflake.Parser) -> exp.Show:
 164        return self._parse_show_snowflake(*args, **kwargs)
 165
 166    return _parse
 167
 168
 169def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 170    trunc = date_trunc_to_time(args)
 171    trunc.set("unit", map_date_part(trunc.args["unit"]))
 172    return trunc
 173
 174
 175def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 176    """
 177    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 178    so we need to unqualify them.
 179
 180    Example:
 181        >>> from sqlglot import parse_one
 182        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 183        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 184        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 185    """
 186    if isinstance(expression, exp.Pivot) and expression.unpivot:
 187        expression = transforms.unqualify_columns(expression)
 188
 189    return expression
 190
 191
 192def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 193    assert isinstance(expression, exp.Create)
 194
 195    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 196        if expression.this in exp.DataType.NESTED_TYPES:
 197            expression.set("expressions", None)
 198        return expression
 199
 200    props = expression.args.get("properties")
 201    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 202        for schema_expression in expression.this.expressions:
 203            if isinstance(schema_expression, exp.ColumnDef):
 204                column_type = schema_expression.kind
 205                if isinstance(column_type, exp.DataType):
 206                    column_type.transform(_flatten_structured_type, copy=False)
 207
 208    return expression
 209
 210
 211def _unnest_generate_date_array(unnest: exp.Unnest) -> None:
 212    generate_date_array = unnest.expressions[0]
 213    start = generate_date_array.args.get("start")
 214    end = generate_date_array.args.get("end")
 215    step = generate_date_array.args.get("step")
 216
 217    if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 218        return
 219
 220    unit = step.args.get("unit")
 221
 222    unnest_alias = unnest.args.get("alias")
 223    if unnest_alias:
 224        unnest_alias = unnest_alias.copy()
 225        sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 226    else:
 227        sequence_value_name = "value"
 228
 229    # We'll add the next sequence value to the starting date and project the result
 230    date_add = _build_date_time_add(exp.DateAdd)(
 231        [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 232    ).as_(sequence_value_name)
 233
 234    # We use DATEDIFF to compute the number of sequence values needed
 235    number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 236        [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 237    )
 238
 239    unnest.set("expressions", [number_sequence])
 240    unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 241
 242
 243def _transform_generate_date_array(expression: exp.Expression) -> exp.Expression:
 244    if isinstance(expression, exp.Select):
 245        for generate_date_array in expression.find_all(exp.GenerateDateArray):
 246            parent = generate_date_array.parent
 247
 248            # If GENERATE_DATE_ARRAY is used directly as an array (e.g passed into ARRAY_LENGTH), the transformed Snowflake
 249            # query is the following (it'll be unnested properly on the next iteration due to copy):
 250            # SELECT ref(GENERATE_DATE_ARRAY(...)) -> SELECT ref((SELECT ARRAY_AGG(*) FROM UNNEST(GENERATE_DATE_ARRAY(...))))
 251            if not isinstance(parent, exp.Unnest):
 252                unnest = exp.Unnest(expressions=[generate_date_array.copy()])
 253                generate_date_array.replace(
 254                    exp.select(exp.ArrayAgg(this=exp.Star())).from_(unnest).subquery()
 255                )
 256
 257            if (
 258                isinstance(parent, exp.Unnest)
 259                and isinstance(parent.parent, (exp.From, exp.Join))
 260                and len(parent.expressions) == 1
 261            ):
 262                _unnest_generate_date_array(parent)
 263
 264    return expression
 265
 266
 267def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 268    def _builder(args: t.List) -> E:
 269        return expr_type(
 270            this=seq_get(args, 0),
 271            expression=seq_get(args, 1),
 272            position=seq_get(args, 2),
 273            occurrence=seq_get(args, 3),
 274            parameters=seq_get(args, 4),
 275            group=seq_get(args, 5) or exp.Literal.number(0),
 276        )
 277
 278    return _builder
 279
 280
 281def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 282    # Other dialects don't support all of the following parameters, so we need to
 283    # generate default values as necessary to ensure the transpilation is correct
 284    group = expression.args.get("group")
 285
 286    # To avoid generating all these default values, we set group to None if
 287    # it's 0 (also default value) which doesn't trigger the following chain
 288    if group and group.name == "0":
 289        group = None
 290
 291    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 292    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 293    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 294
 295    return self.func(
 296        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 297        expression.this,
 298        expression.expression,
 299        position,
 300        occurrence,
 301        parameters,
 302        group,
 303    )
 304
 305
 306def _json_extract_value_array_sql(
 307    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 308) -> str:
 309    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 310    ident = exp.to_identifier("x")
 311
 312    if isinstance(expression, exp.JSONValueArray):
 313        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 314    else:
 315        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 316
 317    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 318
 319    return self.func("TRANSFORM", json_extract, transform_lambda)
 320
 321
 322class Snowflake(Dialect):
 323    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 324    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 325    NULL_ORDERING = "nulls_are_large"
 326    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 327    SUPPORTS_USER_DEFINED_TYPES = False
 328    SUPPORTS_SEMI_ANTI_JOIN = False
 329    PREFER_CTE_ALIAS_COLUMN = True
 330    TABLESAMPLE_SIZE_IS_PERCENT = True
 331    COPY_PARAMS_ARE_CSV = False
 332    ARRAY_AGG_INCLUDES_NULLS = None
 333
 334    TIME_MAPPING = {
 335        "YYYY": "%Y",
 336        "yyyy": "%Y",
 337        "YY": "%y",
 338        "yy": "%y",
 339        "MMMM": "%B",
 340        "mmmm": "%B",
 341        "MON": "%b",
 342        "mon": "%b",
 343        "MM": "%m",
 344        "mm": "%m",
 345        "DD": "%d",
 346        "dd": "%-d",
 347        "DY": "%a",
 348        "dy": "%w",
 349        "HH24": "%H",
 350        "hh24": "%H",
 351        "HH12": "%I",
 352        "hh12": "%I",
 353        "MI": "%M",
 354        "mi": "%M",
 355        "SS": "%S",
 356        "ss": "%S",
 357        "FF": "%f",
 358        "ff": "%f",
 359        "FF6": "%f",
 360        "ff6": "%f",
 361    }
 362
 363    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 364        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 365        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 366        if (
 367            isinstance(expression, exp.Identifier)
 368            and isinstance(expression.parent, exp.Table)
 369            and expression.name.lower() == "dual"
 370        ):
 371            return expression  # type: ignore
 372
 373        return super().quote_identifier(expression, identify=identify)
 374
 375    class Parser(parser.Parser):
 376        IDENTIFY_PIVOT_STRINGS = True
 377        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 378        COLON_IS_VARIANT_EXTRACT = True
 379
 380        ID_VAR_TOKENS = {
 381            *parser.Parser.ID_VAR_TOKENS,
 382            TokenType.MATCH_CONDITION,
 383        }
 384
 385        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 386        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 387
 388        FUNCTIONS = {
 389            **parser.Parser.FUNCTIONS,
 390            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 391            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 392            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 393                this=seq_get(args, 1), expression=seq_get(args, 0)
 394            ),
 395            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 396                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 397                start=seq_get(args, 0),
 398                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 399                step=seq_get(args, 2),
 400            ),
 401            "BITXOR": binary_from_function(exp.BitwiseXor),
 402            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 403            "BITOR": _build_bitor,
 404            "BIT_OR": _build_bitor,
 405            "BOOLXOR": binary_from_function(exp.Xor),
 406            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 407            "DATE_TRUNC": _date_trunc_to_time,
 408            "DATEADD": _build_date_time_add(exp.DateAdd),
 409            "DATEDIFF": _build_datediff,
 410            "DIV0": _build_if_from_div0,
 411            "EDITDISTANCE": lambda args: exp.Levenshtein(
 412                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 413            ),
 414            "FLATTEN": exp.Explode.from_arg_list,
 415            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 416                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 417            ),
 418            "IFF": exp.If.from_arg_list,
 419            "LAST_DAY": lambda args: exp.LastDay(
 420                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 421            ),
 422            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 423            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 424            "LISTAGG": exp.GroupConcat.from_arg_list,
 425            "NULLIFZERO": _build_if_from_nullifzero,
 426            "OBJECT_CONSTRUCT": _build_object_construct,
 427            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 428            "REGEXP_REPLACE": _build_regexp_replace,
 429            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 430            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 431            "RLIKE": exp.RegexpLike.from_arg_list,
 432            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 433            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 434            "TIMEDIFF": _build_datediff,
 435            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 436            "TIMESTAMPDIFF": _build_datediff,
 437            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 438            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 439            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 440            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 441            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 442            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 443            "TRY_TO_TIMESTAMP": _build_datetime(
 444                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 445            ),
 446            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 447            "TO_NUMBER": lambda args: exp.ToNumber(
 448                this=seq_get(args, 0),
 449                format=seq_get(args, 1),
 450                precision=seq_get(args, 2),
 451                scale=seq_get(args, 3),
 452            ),
 453            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 454            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 455            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 456            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 457            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 458            "TO_VARCHAR": exp.ToChar.from_arg_list,
 459            "ZEROIFNULL": _build_if_from_zeroifnull,
 460        }
 461
 462        FUNCTION_PARSERS = {
 463            **parser.Parser.FUNCTION_PARSERS,
 464            "DATE_PART": lambda self: self._parse_date_part(),
 465            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 466        }
 467        FUNCTION_PARSERS.pop("TRIM")
 468
 469        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 470
 471        RANGE_PARSERS = {
 472            **parser.Parser.RANGE_PARSERS,
 473            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 474            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 475        }
 476
 477        ALTER_PARSERS = {
 478            **parser.Parser.ALTER_PARSERS,
 479            "UNSET": lambda self: self.expression(
 480                exp.Set,
 481                tag=self._match_text_seq("TAG"),
 482                expressions=self._parse_csv(self._parse_id_var),
 483                unset=True,
 484            ),
 485        }
 486
 487        STATEMENT_PARSERS = {
 488            **parser.Parser.STATEMENT_PARSERS,
 489            TokenType.SHOW: lambda self: self._parse_show(),
 490        }
 491
 492        PROPERTY_PARSERS = {
 493            **parser.Parser.PROPERTY_PARSERS,
 494            "LOCATION": lambda self: self._parse_location_property(),
 495            "TAG": lambda self: self._parse_tag(),
 496        }
 497
 498        TYPE_CONVERTERS = {
 499            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 500            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 501        }
 502
 503        SHOW_PARSERS = {
 504            "SCHEMAS": _show_parser("SCHEMAS"),
 505            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 506            "OBJECTS": _show_parser("OBJECTS"),
 507            "TERSE OBJECTS": _show_parser("OBJECTS"),
 508            "TABLES": _show_parser("TABLES"),
 509            "TERSE TABLES": _show_parser("TABLES"),
 510            "VIEWS": _show_parser("VIEWS"),
 511            "TERSE VIEWS": _show_parser("VIEWS"),
 512            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 513            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 514            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 515            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 516            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 517            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 518            "SEQUENCES": _show_parser("SEQUENCES"),
 519            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 520            "COLUMNS": _show_parser("COLUMNS"),
 521            "USERS": _show_parser("USERS"),
 522            "TERSE USERS": _show_parser("USERS"),
 523        }
 524
 525        CONSTRAINT_PARSERS = {
 526            **parser.Parser.CONSTRAINT_PARSERS,
 527            "WITH": lambda self: self._parse_with_constraint(),
 528            "MASKING": lambda self: self._parse_with_constraint(),
 529            "PROJECTION": lambda self: self._parse_with_constraint(),
 530            "TAG": lambda self: self._parse_with_constraint(),
 531        }
 532
 533        STAGED_FILE_SINGLE_TOKENS = {
 534            TokenType.DOT,
 535            TokenType.MOD,
 536            TokenType.SLASH,
 537        }
 538
 539        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 540
 541        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 542
 543        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 544
 545        LAMBDAS = {
 546            **parser.Parser.LAMBDAS,
 547            TokenType.ARROW: lambda self, expressions: self.expression(
 548                exp.Lambda,
 549                this=self._replace_lambda(
 550                    self._parse_assignment(),
 551                    expressions,
 552                ),
 553                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 554            ),
 555        }
 556
 557        def _negate_range(
 558            self, this: t.Optional[exp.Expression] = None
 559        ) -> t.Optional[exp.Expression]:
 560            if not this:
 561                return this
 562
 563            query = this.args.get("query")
 564            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 565                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 566                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 567                # which can produce different results (most likely a SnowFlake bug).
 568                #
 569                # https://docs.snowflake.com/en/sql-reference/functions/in
 570                # Context: https://github.com/tobymao/sqlglot/issues/3890
 571                return self.expression(
 572                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 573                )
 574
 575            return self.expression(exp.Not, this=this)
 576
 577        def _parse_tag(self) -> exp.Tags:
 578            return self.expression(
 579                exp.Tags,
 580                expressions=self._parse_wrapped_csv(self._parse_property),
 581            )
 582
 583        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 584            if self._prev.token_type != TokenType.WITH:
 585                self._retreat(self._index - 1)
 586
 587            if self._match_text_seq("MASKING", "POLICY"):
 588                policy = self._parse_column()
 589                return self.expression(
 590                    exp.MaskingPolicyColumnConstraint,
 591                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 592                    expressions=self._match(TokenType.USING)
 593                    and self._parse_wrapped_csv(self._parse_id_var),
 594                )
 595            if self._match_text_seq("PROJECTION", "POLICY"):
 596                policy = self._parse_column()
 597                return self.expression(
 598                    exp.ProjectionPolicyColumnConstraint,
 599                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 600                )
 601            if self._match(TokenType.TAG):
 602                return self._parse_tag()
 603
 604            return None
 605
 606        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 607            if self._match(TokenType.TAG):
 608                return self._parse_tag()
 609
 610            return super()._parse_with_property()
 611
 612        def _parse_create(self) -> exp.Create | exp.Command:
 613            expression = super()._parse_create()
 614            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 615                # Replace the Table node with the enclosed Identifier
 616                expression.this.replace(expression.this.this)
 617
 618            return expression
 619
 620        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 621        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 622        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 623            this = self._parse_var() or self._parse_type()
 624
 625            if not this:
 626                return None
 627
 628            self._match(TokenType.COMMA)
 629            expression = self._parse_bitwise()
 630            this = map_date_part(this)
 631            name = this.name.upper()
 632
 633            if name.startswith("EPOCH"):
 634                if name == "EPOCH_MILLISECOND":
 635                    scale = 10**3
 636                elif name == "EPOCH_MICROSECOND":
 637                    scale = 10**6
 638                elif name == "EPOCH_NANOSECOND":
 639                    scale = 10**9
 640                else:
 641                    scale = None
 642
 643                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 644                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 645
 646                if scale:
 647                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 648
 649                return to_unix
 650
 651            return self.expression(exp.Extract, this=this, expression=expression)
 652
 653        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 654            if is_map:
 655                # Keys are strings in Snowflake's objects, see also:
 656                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 657                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 658                return self._parse_slice(self._parse_string())
 659
 660            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 661
 662        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 663            lateral = super()._parse_lateral()
 664            if not lateral:
 665                return lateral
 666
 667            if isinstance(lateral.this, exp.Explode):
 668                table_alias = lateral.args.get("alias")
 669                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 670                if table_alias and not table_alias.args.get("columns"):
 671                    table_alias.set("columns", columns)
 672                elif not table_alias:
 673                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 674
 675            return lateral
 676
 677        def _parse_table_parts(
 678            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 679        ) -> exp.Table:
 680            # https://docs.snowflake.com/en/user-guide/querying-stage
 681            if self._match(TokenType.STRING, advance=False):
 682                table = self._parse_string()
 683            elif self._match_text_seq("@", advance=False):
 684                table = self._parse_location_path()
 685            else:
 686                table = None
 687
 688            if table:
 689                file_format = None
 690                pattern = None
 691
 692                wrapped = self._match(TokenType.L_PAREN)
 693                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 694                    if self._match_text_seq("FILE_FORMAT", "=>"):
 695                        file_format = self._parse_string() or super()._parse_table_parts(
 696                            is_db_reference=is_db_reference
 697                        )
 698                    elif self._match_text_seq("PATTERN", "=>"):
 699                        pattern = self._parse_string()
 700                    else:
 701                        break
 702
 703                    self._match(TokenType.COMMA)
 704
 705                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 706            else:
 707                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 708
 709            return table
 710
 711        def _parse_id_var(
 712            self,
 713            any_token: bool = True,
 714            tokens: t.Optional[t.Collection[TokenType]] = None,
 715        ) -> t.Optional[exp.Expression]:
 716            if self._match_text_seq("IDENTIFIER", "("):
 717                identifier = (
 718                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 719                    or self._parse_string()
 720                )
 721                self._match_r_paren()
 722                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 723
 724            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 725
 726        def _parse_show_snowflake(self, this: str) -> exp.Show:
 727            scope = None
 728            scope_kind = None
 729
 730            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 731            # which is syntactically valid but has no effect on the output
 732            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 733
 734            history = self._match_text_seq("HISTORY")
 735
 736            like = self._parse_string() if self._match(TokenType.LIKE) else None
 737
 738            if self._match(TokenType.IN):
 739                if self._match_text_seq("ACCOUNT"):
 740                    scope_kind = "ACCOUNT"
 741                elif self._match_set(self.DB_CREATABLES):
 742                    scope_kind = self._prev.text.upper()
 743                    if self._curr:
 744                        scope = self._parse_table_parts()
 745                elif self._curr:
 746                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 747                    scope = self._parse_table_parts()
 748
 749            return self.expression(
 750                exp.Show,
 751                **{
 752                    "terse": terse,
 753                    "this": this,
 754                    "history": history,
 755                    "like": like,
 756                    "scope": scope,
 757                    "scope_kind": scope_kind,
 758                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 759                    "limit": self._parse_limit(),
 760                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 761                },
 762            )
 763
 764        def _parse_location_property(self) -> exp.LocationProperty:
 765            self._match(TokenType.EQ)
 766            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 767
 768        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 769            # Parse either a subquery or a staged file
 770            return (
 771                self._parse_select(table=True, parse_subquery_alias=False)
 772                if self._match(TokenType.L_PAREN, advance=False)
 773                else self._parse_table_parts()
 774            )
 775
 776        def _parse_location_path(self) -> exp.Var:
 777            parts = [self._advance_any(ignore_reserved=True)]
 778
 779            # We avoid consuming a comma token because external tables like @foo and @bar
 780            # can be joined in a query with a comma separator, as well as closing paren
 781            # in case of subqueries
 782            while self._is_connected() and not self._match_set(
 783                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 784            ):
 785                parts.append(self._advance_any(ignore_reserved=True))
 786
 787            return exp.var("".join(part.text for part in parts if part))
 788
 789        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 790            this = super()._parse_lambda_arg()
 791
 792            if not this:
 793                return this
 794
 795            typ = self._parse_types()
 796
 797            if typ:
 798                return self.expression(exp.Cast, this=this, to=typ)
 799
 800            return this
 801
 802        def _parse_foreign_key(self) -> exp.ForeignKey:
 803            # inlineFK, the REFERENCES columns are implied
 804            if self._match(TokenType.REFERENCES, advance=False):
 805                return self.expression(exp.ForeignKey)
 806
 807            # outoflineFK, explicitly names the columns
 808            return super()._parse_foreign_key()
 809
 810    class Tokenizer(tokens.Tokenizer):
 811        STRING_ESCAPES = ["\\", "'"]
 812        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 813        RAW_STRINGS = ["$$"]
 814        COMMENTS = ["--", "//", ("/*", "*/")]
 815        NESTED_COMMENTS = False
 816
 817        KEYWORDS = {
 818            **tokens.Tokenizer.KEYWORDS,
 819            "BYTEINT": TokenType.INT,
 820            "CHAR VARYING": TokenType.VARCHAR,
 821            "CHARACTER VARYING": TokenType.VARCHAR,
 822            "EXCLUDE": TokenType.EXCEPT,
 823            "ILIKE ANY": TokenType.ILIKE_ANY,
 824            "LIKE ANY": TokenType.LIKE_ANY,
 825            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 826            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 827            "MINUS": TokenType.EXCEPT,
 828            "NCHAR VARYING": TokenType.VARCHAR,
 829            "PUT": TokenType.COMMAND,
 830            "REMOVE": TokenType.COMMAND,
 831            "RM": TokenType.COMMAND,
 832            "SAMPLE": TokenType.TABLE_SAMPLE,
 833            "SQL_DOUBLE": TokenType.DOUBLE,
 834            "SQL_VARCHAR": TokenType.VARCHAR,
 835            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 836            "TAG": TokenType.TAG,
 837            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 838            "TOP": TokenType.TOP,
 839            "WAREHOUSE": TokenType.WAREHOUSE,
 840            "STREAMLIT": TokenType.STREAMLIT,
 841        }
 842        KEYWORDS.pop("/*+")
 843
 844        SINGLE_TOKENS = {
 845            **tokens.Tokenizer.SINGLE_TOKENS,
 846            "$": TokenType.PARAMETER,
 847        }
 848
 849        VAR_SINGLE_TOKENS = {"$"}
 850
 851        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 852
 853    class Generator(generator.Generator):
 854        PARAMETER_TOKEN = "$"
 855        MATCHED_BY_SOURCE = False
 856        SINGLE_STRING_INTERVAL = True
 857        JOIN_HINTS = False
 858        TABLE_HINTS = False
 859        QUERY_HINTS = False
 860        AGGREGATE_FILTER_SUPPORTED = False
 861        SUPPORTS_TABLE_COPY = False
 862        COLLATE_IS_FUNC = True
 863        LIMIT_ONLY_LITERALS = True
 864        JSON_KEY_VALUE_PAIR_SEP = ","
 865        INSERT_OVERWRITE = " OVERWRITE INTO"
 866        STRUCT_DELIMITER = ("(", ")")
 867        COPY_PARAMS_ARE_WRAPPED = False
 868        COPY_PARAMS_EQ_REQUIRED = True
 869        STAR_EXCEPT = "EXCLUDE"
 870        SUPPORTS_EXPLODING_PROJECTIONS = False
 871        ARRAY_CONCAT_IS_VAR_LEN = False
 872        SUPPORTS_CONVERT_TIMEZONE = True
 873        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 874        SUPPORTS_MEDIAN = True
 875        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 876
 877        TRANSFORMS = {
 878            **generator.Generator.TRANSFORMS,
 879            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 880            exp.ArgMax: rename_func("MAX_BY"),
 881            exp.ArgMin: rename_func("MIN_BY"),
 882            exp.Array: inline_array_sql,
 883            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 884            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 885            exp.AtTimeZone: lambda self, e: self.func(
 886                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 887            ),
 888            exp.BitwiseXor: rename_func("BITXOR"),
 889            exp.BitwiseOr: rename_func("BITOR"),
 890            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 891            exp.DateAdd: date_delta_sql("DATEADD"),
 892            exp.DateDiff: date_delta_sql("DATEDIFF"),
 893            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 894            exp.DatetimeDiff: timestampdiff_sql,
 895            exp.DateStrToDate: datestrtodate_sql,
 896            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 897            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 898            exp.DayOfYear: rename_func("DAYOFYEAR"),
 899            exp.Explode: rename_func("FLATTEN"),
 900            exp.Extract: rename_func("DATE_PART"),
 901            exp.FromTimeZone: lambda self, e: self.func(
 902                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 903            ),
 904            exp.GenerateSeries: lambda self, e: self.func(
 905                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 906            ),
 907            exp.GroupConcat: rename_func("LISTAGG"),
 908            exp.If: if_sql(name="IFF", false_value="NULL"),
 909            exp.JSONExtractArray: _json_extract_value_array_sql,
 910            exp.JSONExtractScalar: lambda self, e: self.func(
 911                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 912            ),
 913            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 914            exp.JSONPathRoot: lambda *_: "",
 915            exp.JSONValueArray: _json_extract_value_array_sql,
 916            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 917            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 918            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 919            exp.MakeInterval: no_make_interval_sql,
 920            exp.Max: max_or_greatest,
 921            exp.Min: min_or_least,
 922            exp.ParseJSON: lambda self, e: self.func(
 923                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 924            ),
 925            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 926            exp.PercentileCont: transforms.preprocess(
 927                [transforms.add_within_group_for_percentiles]
 928            ),
 929            exp.PercentileDisc: transforms.preprocess(
 930                [transforms.add_within_group_for_percentiles]
 931            ),
 932            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 933            exp.RegexpExtract: _regexpextract_sql,
 934            exp.RegexpExtractAll: _regexpextract_sql,
 935            exp.RegexpILike: _regexpilike_sql,
 936            exp.Rand: rename_func("RANDOM"),
 937            exp.Select: transforms.preprocess(
 938                [
 939                    transforms.eliminate_distinct_on,
 940                    transforms.explode_to_unnest(),
 941                    transforms.eliminate_semi_and_anti_joins,
 942                    _transform_generate_date_array,
 943                ]
 944            ),
 945            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 946            exp.SHA: rename_func("SHA1"),
 947            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 948            exp.StartsWith: rename_func("STARTSWITH"),
 949            exp.StrPosition: lambda self, e: self.func(
 950                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 951            ),
 952            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 953            exp.Stuff: rename_func("INSERT"),
 954            exp.TimeAdd: date_delta_sql("TIMEADD"),
 955            exp.Timestamp: no_timestamp_sql,
 956            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 957            exp.TimestampDiff: lambda self, e: self.func(
 958                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 959            ),
 960            exp.TimestampTrunc: timestamptrunc_sql(),
 961            exp.TimeStrToTime: timestrtotime_sql,
 962            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 963            exp.ToArray: rename_func("TO_ARRAY"),
 964            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 965            exp.ToDouble: rename_func("TO_DOUBLE"),
 966            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 967            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 968            exp.TsOrDsToDate: lambda self, e: self.func(
 969                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 970            ),
 971            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 972            exp.Uuid: rename_func("UUID_STRING"),
 973            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 974            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 975            exp.Xor: rename_func("BOOLXOR"),
 976            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 977                rename_func("EDITDISTANCE")
 978            ),
 979        }
 980
 981        SUPPORTED_JSON_PATH_PARTS = {
 982            exp.JSONPathKey,
 983            exp.JSONPathRoot,
 984            exp.JSONPathSubscript,
 985        }
 986
 987        TYPE_MAPPING = {
 988            **generator.Generator.TYPE_MAPPING,
 989            exp.DataType.Type.NESTED: "OBJECT",
 990            exp.DataType.Type.STRUCT: "OBJECT",
 991        }
 992
 993        PROPERTIES_LOCATION = {
 994            **generator.Generator.PROPERTIES_LOCATION,
 995            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 996            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 997        }
 998
 999        UNSUPPORTED_VALUES_EXPRESSIONS = {
1000            exp.Map,
1001            exp.StarMap,
1002            exp.Struct,
1003            exp.VarMap,
1004        }
1005
1006        def with_properties(self, properties: exp.Properties) -> str:
1007            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1008
1009        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1010            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1011                values_as_table = False
1012
1013            return super().values_sql(expression, values_as_table=values_as_table)
1014
1015        def datatype_sql(self, expression: exp.DataType) -> str:
1016            expressions = expression.expressions
1017            if (
1018                expressions
1019                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1020                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1021            ):
1022                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1023                return "OBJECT"
1024
1025            return super().datatype_sql(expression)
1026
1027        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1028            return self.func(
1029                "TO_NUMBER",
1030                expression.this,
1031                expression.args.get("format"),
1032                expression.args.get("precision"),
1033                expression.args.get("scale"),
1034            )
1035
1036        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1037            milli = expression.args.get("milli")
1038            if milli is not None:
1039                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1040                expression.set("nano", milli_to_nano)
1041
1042            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1043
1044        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1045            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1046                return self.func("TO_GEOGRAPHY", expression.this)
1047            if expression.is_type(exp.DataType.Type.GEOMETRY):
1048                return self.func("TO_GEOMETRY", expression.this)
1049
1050            return super().cast_sql(expression, safe_prefix=safe_prefix)
1051
1052        def trycast_sql(self, expression: exp.TryCast) -> str:
1053            value = expression.this
1054
1055            if value.type is None:
1056                from sqlglot.optimizer.annotate_types import annotate_types
1057
1058                value = annotate_types(value)
1059
1060            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1061                return super().trycast_sql(expression)
1062
1063            # TRY_CAST only works for string values in Snowflake
1064            return self.cast_sql(expression)
1065
1066        def log_sql(self, expression: exp.Log) -> str:
1067            if not expression.expression:
1068                return self.func("LN", expression.this)
1069
1070            return super().log_sql(expression)
1071
1072        def unnest_sql(self, expression: exp.Unnest) -> str:
1073            unnest_alias = expression.args.get("alias")
1074            offset = expression.args.get("offset")
1075
1076            columns = [
1077                exp.to_identifier("seq"),
1078                exp.to_identifier("key"),
1079                exp.to_identifier("path"),
1080                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1081                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1082                or exp.to_identifier("value"),
1083                exp.to_identifier("this"),
1084            ]
1085
1086            if unnest_alias:
1087                unnest_alias.set("columns", columns)
1088            else:
1089                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1090
1091            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1092            alias = self.sql(unnest_alias)
1093            alias = f" AS {alias}" if alias else ""
1094            return f"{explode}{alias}"
1095
1096        def show_sql(self, expression: exp.Show) -> str:
1097            terse = "TERSE " if expression.args.get("terse") else ""
1098            history = " HISTORY" if expression.args.get("history") else ""
1099            like = self.sql(expression, "like")
1100            like = f" LIKE {like}" if like else ""
1101
1102            scope = self.sql(expression, "scope")
1103            scope = f" {scope}" if scope else ""
1104
1105            scope_kind = self.sql(expression, "scope_kind")
1106            if scope_kind:
1107                scope_kind = f" IN {scope_kind}"
1108
1109            starts_with = self.sql(expression, "starts_with")
1110            if starts_with:
1111                starts_with = f" STARTS WITH {starts_with}"
1112
1113            limit = self.sql(expression, "limit")
1114
1115            from_ = self.sql(expression, "from")
1116            if from_:
1117                from_ = f" FROM {from_}"
1118
1119            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1120
1121        def describe_sql(self, expression: exp.Describe) -> str:
1122            # Default to table if kind is unknown
1123            kind_value = expression.args.get("kind") or "TABLE"
1124            kind = f" {kind_value}" if kind_value else ""
1125            this = f" {self.sql(expression, 'this')}"
1126            expressions = self.expressions(expression, flat=True)
1127            expressions = f" {expressions}" if expressions else ""
1128            return f"DESCRIBE{kind}{this}{expressions}"
1129
1130        def generatedasidentitycolumnconstraint_sql(
1131            self, expression: exp.GeneratedAsIdentityColumnConstraint
1132        ) -> str:
1133            start = expression.args.get("start")
1134            start = f" START {start}" if start else ""
1135            increment = expression.args.get("increment")
1136            increment = f" INCREMENT {increment}" if increment else ""
1137            return f"AUTOINCREMENT{start}{increment}"
1138
1139        def cluster_sql(self, expression: exp.Cluster) -> str:
1140            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1141
1142        def struct_sql(self, expression: exp.Struct) -> str:
1143            keys = []
1144            values = []
1145
1146            for i, e in enumerate(expression.expressions):
1147                if isinstance(e, exp.PropertyEQ):
1148                    keys.append(
1149                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1150                    )
1151                    values.append(e.expression)
1152                else:
1153                    keys.append(exp.Literal.string(f"_{i}"))
1154                    values.append(e)
1155
1156            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1157
1158        @unsupported_args("weight", "accuracy")
1159        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1160            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1161
1162        def alterset_sql(self, expression: exp.AlterSet) -> str:
1163            exprs = self.expressions(expression, flat=True)
1164            exprs = f" {exprs}" if exprs else ""
1165            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1166            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1167            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1168            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1169            tag = self.expressions(expression, key="tag", flat=True)
1170            tag = f" TAG {tag}" if tag else ""
1171
1172            return f"SET{exprs}{file_format}{copy_options}{tag}"
1173
1174        def strtotime_sql(self, expression: exp.StrToTime):
1175            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1176            return self.func(
1177                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1178            )
1179
1180        def timestampsub_sql(self, expression: exp.TimestampSub):
1181            return self.sql(
1182                exp.TimestampAdd(
1183                    this=expression.this,
1184                    expression=expression.expression * -1,
1185                    unit=expression.unit,
1186                )
1187            )
1188
1189        def jsonextract_sql(self, expression: exp.JSONExtract):
1190            this = expression.this
1191
1192            # JSON strings are valid coming from other dialects such as BQ
1193            return self.func(
1194                "GET_PATH",
1195                exp.ParseJSON(this=this) if this.is_string else this,
1196                expression.expression,
1197            )
1198
1199        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1200            this = expression.this
1201            if not isinstance(this, exp.TsOrDsToTimestamp):
1202                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1203
1204            return self.func("TO_CHAR", this, self.format_time(expression))
class Snowflake(sqlglot.dialects.dialect.Dialect):
 323class Snowflake(Dialect):
 324    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 325    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 326    NULL_ORDERING = "nulls_are_large"
 327    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 328    SUPPORTS_USER_DEFINED_TYPES = False
 329    SUPPORTS_SEMI_ANTI_JOIN = False
 330    PREFER_CTE_ALIAS_COLUMN = True
 331    TABLESAMPLE_SIZE_IS_PERCENT = True
 332    COPY_PARAMS_ARE_CSV = False
 333    ARRAY_AGG_INCLUDES_NULLS = None
 334
 335    TIME_MAPPING = {
 336        "YYYY": "%Y",
 337        "yyyy": "%Y",
 338        "YY": "%y",
 339        "yy": "%y",
 340        "MMMM": "%B",
 341        "mmmm": "%B",
 342        "MON": "%b",
 343        "mon": "%b",
 344        "MM": "%m",
 345        "mm": "%m",
 346        "DD": "%d",
 347        "dd": "%-d",
 348        "DY": "%a",
 349        "dy": "%w",
 350        "HH24": "%H",
 351        "hh24": "%H",
 352        "HH12": "%I",
 353        "hh12": "%I",
 354        "MI": "%M",
 355        "mi": "%M",
 356        "SS": "%S",
 357        "ss": "%S",
 358        "FF": "%f",
 359        "ff": "%f",
 360        "FF6": "%f",
 361        "ff6": "%f",
 362    }
 363
 364    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 365        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 366        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 367        if (
 368            isinstance(expression, exp.Identifier)
 369            and isinstance(expression.parent, exp.Table)
 370            and expression.name.lower() == "dual"
 371        ):
 372            return expression  # type: ignore
 373
 374        return super().quote_identifier(expression, identify=identify)
 375
 376    class Parser(parser.Parser):
 377        IDENTIFY_PIVOT_STRINGS = True
 378        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 379        COLON_IS_VARIANT_EXTRACT = True
 380
 381        ID_VAR_TOKENS = {
 382            *parser.Parser.ID_VAR_TOKENS,
 383            TokenType.MATCH_CONDITION,
 384        }
 385
 386        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 387        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 388
 389        FUNCTIONS = {
 390            **parser.Parser.FUNCTIONS,
 391            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 392            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 393            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 394                this=seq_get(args, 1), expression=seq_get(args, 0)
 395            ),
 396            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 397                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 398                start=seq_get(args, 0),
 399                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 400                step=seq_get(args, 2),
 401            ),
 402            "BITXOR": binary_from_function(exp.BitwiseXor),
 403            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 404            "BITOR": _build_bitor,
 405            "BIT_OR": _build_bitor,
 406            "BOOLXOR": binary_from_function(exp.Xor),
 407            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 408            "DATE_TRUNC": _date_trunc_to_time,
 409            "DATEADD": _build_date_time_add(exp.DateAdd),
 410            "DATEDIFF": _build_datediff,
 411            "DIV0": _build_if_from_div0,
 412            "EDITDISTANCE": lambda args: exp.Levenshtein(
 413                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 414            ),
 415            "FLATTEN": exp.Explode.from_arg_list,
 416            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 417                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 418            ),
 419            "IFF": exp.If.from_arg_list,
 420            "LAST_DAY": lambda args: exp.LastDay(
 421                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 422            ),
 423            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 424            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 425            "LISTAGG": exp.GroupConcat.from_arg_list,
 426            "NULLIFZERO": _build_if_from_nullifzero,
 427            "OBJECT_CONSTRUCT": _build_object_construct,
 428            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 429            "REGEXP_REPLACE": _build_regexp_replace,
 430            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 431            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 432            "RLIKE": exp.RegexpLike.from_arg_list,
 433            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 434            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 435            "TIMEDIFF": _build_datediff,
 436            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 437            "TIMESTAMPDIFF": _build_datediff,
 438            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 439            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 440            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 441            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 442            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 443            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 444            "TRY_TO_TIMESTAMP": _build_datetime(
 445                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 446            ),
 447            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 448            "TO_NUMBER": lambda args: exp.ToNumber(
 449                this=seq_get(args, 0),
 450                format=seq_get(args, 1),
 451                precision=seq_get(args, 2),
 452                scale=seq_get(args, 3),
 453            ),
 454            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 455            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 456            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 457            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 458            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 459            "TO_VARCHAR": exp.ToChar.from_arg_list,
 460            "ZEROIFNULL": _build_if_from_zeroifnull,
 461        }
 462
 463        FUNCTION_PARSERS = {
 464            **parser.Parser.FUNCTION_PARSERS,
 465            "DATE_PART": lambda self: self._parse_date_part(),
 466            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 467        }
 468        FUNCTION_PARSERS.pop("TRIM")
 469
 470        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 471
 472        RANGE_PARSERS = {
 473            **parser.Parser.RANGE_PARSERS,
 474            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 475            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 476        }
 477
 478        ALTER_PARSERS = {
 479            **parser.Parser.ALTER_PARSERS,
 480            "UNSET": lambda self: self.expression(
 481                exp.Set,
 482                tag=self._match_text_seq("TAG"),
 483                expressions=self._parse_csv(self._parse_id_var),
 484                unset=True,
 485            ),
 486        }
 487
 488        STATEMENT_PARSERS = {
 489            **parser.Parser.STATEMENT_PARSERS,
 490            TokenType.SHOW: lambda self: self._parse_show(),
 491        }
 492
 493        PROPERTY_PARSERS = {
 494            **parser.Parser.PROPERTY_PARSERS,
 495            "LOCATION": lambda self: self._parse_location_property(),
 496            "TAG": lambda self: self._parse_tag(),
 497        }
 498
 499        TYPE_CONVERTERS = {
 500            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 501            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 502        }
 503
 504        SHOW_PARSERS = {
 505            "SCHEMAS": _show_parser("SCHEMAS"),
 506            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 507            "OBJECTS": _show_parser("OBJECTS"),
 508            "TERSE OBJECTS": _show_parser("OBJECTS"),
 509            "TABLES": _show_parser("TABLES"),
 510            "TERSE TABLES": _show_parser("TABLES"),
 511            "VIEWS": _show_parser("VIEWS"),
 512            "TERSE VIEWS": _show_parser("VIEWS"),
 513            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 514            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 515            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 516            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 517            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 518            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 519            "SEQUENCES": _show_parser("SEQUENCES"),
 520            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 521            "COLUMNS": _show_parser("COLUMNS"),
 522            "USERS": _show_parser("USERS"),
 523            "TERSE USERS": _show_parser("USERS"),
 524        }
 525
 526        CONSTRAINT_PARSERS = {
 527            **parser.Parser.CONSTRAINT_PARSERS,
 528            "WITH": lambda self: self._parse_with_constraint(),
 529            "MASKING": lambda self: self._parse_with_constraint(),
 530            "PROJECTION": lambda self: self._parse_with_constraint(),
 531            "TAG": lambda self: self._parse_with_constraint(),
 532        }
 533
 534        STAGED_FILE_SINGLE_TOKENS = {
 535            TokenType.DOT,
 536            TokenType.MOD,
 537            TokenType.SLASH,
 538        }
 539
 540        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 541
 542        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 543
 544        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 545
 546        LAMBDAS = {
 547            **parser.Parser.LAMBDAS,
 548            TokenType.ARROW: lambda self, expressions: self.expression(
 549                exp.Lambda,
 550                this=self._replace_lambda(
 551                    self._parse_assignment(),
 552                    expressions,
 553                ),
 554                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 555            ),
 556        }
 557
 558        def _negate_range(
 559            self, this: t.Optional[exp.Expression] = None
 560        ) -> t.Optional[exp.Expression]:
 561            if not this:
 562                return this
 563
 564            query = this.args.get("query")
 565            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 566                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 567                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 568                # which can produce different results (most likely a SnowFlake bug).
 569                #
 570                # https://docs.snowflake.com/en/sql-reference/functions/in
 571                # Context: https://github.com/tobymao/sqlglot/issues/3890
 572                return self.expression(
 573                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 574                )
 575
 576            return self.expression(exp.Not, this=this)
 577
 578        def _parse_tag(self) -> exp.Tags:
 579            return self.expression(
 580                exp.Tags,
 581                expressions=self._parse_wrapped_csv(self._parse_property),
 582            )
 583
 584        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 585            if self._prev.token_type != TokenType.WITH:
 586                self._retreat(self._index - 1)
 587
 588            if self._match_text_seq("MASKING", "POLICY"):
 589                policy = self._parse_column()
 590                return self.expression(
 591                    exp.MaskingPolicyColumnConstraint,
 592                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 593                    expressions=self._match(TokenType.USING)
 594                    and self._parse_wrapped_csv(self._parse_id_var),
 595                )
 596            if self._match_text_seq("PROJECTION", "POLICY"):
 597                policy = self._parse_column()
 598                return self.expression(
 599                    exp.ProjectionPolicyColumnConstraint,
 600                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 601                )
 602            if self._match(TokenType.TAG):
 603                return self._parse_tag()
 604
 605            return None
 606
 607        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
 608            if self._match(TokenType.TAG):
 609                return self._parse_tag()
 610
 611            return super()._parse_with_property()
 612
 613        def _parse_create(self) -> exp.Create | exp.Command:
 614            expression = super()._parse_create()
 615            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 616                # Replace the Table node with the enclosed Identifier
 617                expression.this.replace(expression.this.this)
 618
 619            return expression
 620
 621        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 622        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 623        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 624            this = self._parse_var() or self._parse_type()
 625
 626            if not this:
 627                return None
 628
 629            self._match(TokenType.COMMA)
 630            expression = self._parse_bitwise()
 631            this = map_date_part(this)
 632            name = this.name.upper()
 633
 634            if name.startswith("EPOCH"):
 635                if name == "EPOCH_MILLISECOND":
 636                    scale = 10**3
 637                elif name == "EPOCH_MICROSECOND":
 638                    scale = 10**6
 639                elif name == "EPOCH_NANOSECOND":
 640                    scale = 10**9
 641                else:
 642                    scale = None
 643
 644                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 645                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 646
 647                if scale:
 648                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 649
 650                return to_unix
 651
 652            return self.expression(exp.Extract, this=this, expression=expression)
 653
 654        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 655            if is_map:
 656                # Keys are strings in Snowflake's objects, see also:
 657                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 658                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 659                return self._parse_slice(self._parse_string())
 660
 661            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 662
 663        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 664            lateral = super()._parse_lateral()
 665            if not lateral:
 666                return lateral
 667
 668            if isinstance(lateral.this, exp.Explode):
 669                table_alias = lateral.args.get("alias")
 670                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 671                if table_alias and not table_alias.args.get("columns"):
 672                    table_alias.set("columns", columns)
 673                elif not table_alias:
 674                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 675
 676            return lateral
 677
 678        def _parse_table_parts(
 679            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 680        ) -> exp.Table:
 681            # https://docs.snowflake.com/en/user-guide/querying-stage
 682            if self._match(TokenType.STRING, advance=False):
 683                table = self._parse_string()
 684            elif self._match_text_seq("@", advance=False):
 685                table = self._parse_location_path()
 686            else:
 687                table = None
 688
 689            if table:
 690                file_format = None
 691                pattern = None
 692
 693                wrapped = self._match(TokenType.L_PAREN)
 694                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 695                    if self._match_text_seq("FILE_FORMAT", "=>"):
 696                        file_format = self._parse_string() or super()._parse_table_parts(
 697                            is_db_reference=is_db_reference
 698                        )
 699                    elif self._match_text_seq("PATTERN", "=>"):
 700                        pattern = self._parse_string()
 701                    else:
 702                        break
 703
 704                    self._match(TokenType.COMMA)
 705
 706                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 707            else:
 708                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 709
 710            return table
 711
 712        def _parse_id_var(
 713            self,
 714            any_token: bool = True,
 715            tokens: t.Optional[t.Collection[TokenType]] = None,
 716        ) -> t.Optional[exp.Expression]:
 717            if self._match_text_seq("IDENTIFIER", "("):
 718                identifier = (
 719                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 720                    or self._parse_string()
 721                )
 722                self._match_r_paren()
 723                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 724
 725            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 726
 727        def _parse_show_snowflake(self, this: str) -> exp.Show:
 728            scope = None
 729            scope_kind = None
 730
 731            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 732            # which is syntactically valid but has no effect on the output
 733            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 734
 735            history = self._match_text_seq("HISTORY")
 736
 737            like = self._parse_string() if self._match(TokenType.LIKE) else None
 738
 739            if self._match(TokenType.IN):
 740                if self._match_text_seq("ACCOUNT"):
 741                    scope_kind = "ACCOUNT"
 742                elif self._match_set(self.DB_CREATABLES):
 743                    scope_kind = self._prev.text.upper()
 744                    if self._curr:
 745                        scope = self._parse_table_parts()
 746                elif self._curr:
 747                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 748                    scope = self._parse_table_parts()
 749
 750            return self.expression(
 751                exp.Show,
 752                **{
 753                    "terse": terse,
 754                    "this": this,
 755                    "history": history,
 756                    "like": like,
 757                    "scope": scope,
 758                    "scope_kind": scope_kind,
 759                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 760                    "limit": self._parse_limit(),
 761                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 762                },
 763            )
 764
 765        def _parse_location_property(self) -> exp.LocationProperty:
 766            self._match(TokenType.EQ)
 767            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 768
 769        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 770            # Parse either a subquery or a staged file
 771            return (
 772                self._parse_select(table=True, parse_subquery_alias=False)
 773                if self._match(TokenType.L_PAREN, advance=False)
 774                else self._parse_table_parts()
 775            )
 776
 777        def _parse_location_path(self) -> exp.Var:
 778            parts = [self._advance_any(ignore_reserved=True)]
 779
 780            # We avoid consuming a comma token because external tables like @foo and @bar
 781            # can be joined in a query with a comma separator, as well as closing paren
 782            # in case of subqueries
 783            while self._is_connected() and not self._match_set(
 784                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 785            ):
 786                parts.append(self._advance_any(ignore_reserved=True))
 787
 788            return exp.var("".join(part.text for part in parts if part))
 789
 790        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 791            this = super()._parse_lambda_arg()
 792
 793            if not this:
 794                return this
 795
 796            typ = self._parse_types()
 797
 798            if typ:
 799                return self.expression(exp.Cast, this=this, to=typ)
 800
 801            return this
 802
 803        def _parse_foreign_key(self) -> exp.ForeignKey:
 804            # inlineFK, the REFERENCES columns are implied
 805            if self._match(TokenType.REFERENCES, advance=False):
 806                return self.expression(exp.ForeignKey)
 807
 808            # outoflineFK, explicitly names the columns
 809            return super()._parse_foreign_key()
 810
 811    class Tokenizer(tokens.Tokenizer):
 812        STRING_ESCAPES = ["\\", "'"]
 813        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 814        RAW_STRINGS = ["$$"]
 815        COMMENTS = ["--", "//", ("/*", "*/")]
 816        NESTED_COMMENTS = False
 817
 818        KEYWORDS = {
 819            **tokens.Tokenizer.KEYWORDS,
 820            "BYTEINT": TokenType.INT,
 821            "CHAR VARYING": TokenType.VARCHAR,
 822            "CHARACTER VARYING": TokenType.VARCHAR,
 823            "EXCLUDE": TokenType.EXCEPT,
 824            "ILIKE ANY": TokenType.ILIKE_ANY,
 825            "LIKE ANY": TokenType.LIKE_ANY,
 826            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 827            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 828            "MINUS": TokenType.EXCEPT,
 829            "NCHAR VARYING": TokenType.VARCHAR,
 830            "PUT": TokenType.COMMAND,
 831            "REMOVE": TokenType.COMMAND,
 832            "RM": TokenType.COMMAND,
 833            "SAMPLE": TokenType.TABLE_SAMPLE,
 834            "SQL_DOUBLE": TokenType.DOUBLE,
 835            "SQL_VARCHAR": TokenType.VARCHAR,
 836            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 837            "TAG": TokenType.TAG,
 838            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 839            "TOP": TokenType.TOP,
 840            "WAREHOUSE": TokenType.WAREHOUSE,
 841            "STREAMLIT": TokenType.STREAMLIT,
 842        }
 843        KEYWORDS.pop("/*+")
 844
 845        SINGLE_TOKENS = {
 846            **tokens.Tokenizer.SINGLE_TOKENS,
 847            "$": TokenType.PARAMETER,
 848        }
 849
 850        VAR_SINGLE_TOKENS = {"$"}
 851
 852        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 853
 854    class Generator(generator.Generator):
 855        PARAMETER_TOKEN = "$"
 856        MATCHED_BY_SOURCE = False
 857        SINGLE_STRING_INTERVAL = True
 858        JOIN_HINTS = False
 859        TABLE_HINTS = False
 860        QUERY_HINTS = False
 861        AGGREGATE_FILTER_SUPPORTED = False
 862        SUPPORTS_TABLE_COPY = False
 863        COLLATE_IS_FUNC = True
 864        LIMIT_ONLY_LITERALS = True
 865        JSON_KEY_VALUE_PAIR_SEP = ","
 866        INSERT_OVERWRITE = " OVERWRITE INTO"
 867        STRUCT_DELIMITER = ("(", ")")
 868        COPY_PARAMS_ARE_WRAPPED = False
 869        COPY_PARAMS_EQ_REQUIRED = True
 870        STAR_EXCEPT = "EXCLUDE"
 871        SUPPORTS_EXPLODING_PROJECTIONS = False
 872        ARRAY_CONCAT_IS_VAR_LEN = False
 873        SUPPORTS_CONVERT_TIMEZONE = True
 874        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 875        SUPPORTS_MEDIAN = True
 876        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 877
 878        TRANSFORMS = {
 879            **generator.Generator.TRANSFORMS,
 880            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 881            exp.ArgMax: rename_func("MAX_BY"),
 882            exp.ArgMin: rename_func("MIN_BY"),
 883            exp.Array: inline_array_sql,
 884            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 885            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 886            exp.AtTimeZone: lambda self, e: self.func(
 887                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 888            ),
 889            exp.BitwiseXor: rename_func("BITXOR"),
 890            exp.BitwiseOr: rename_func("BITOR"),
 891            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 892            exp.DateAdd: date_delta_sql("DATEADD"),
 893            exp.DateDiff: date_delta_sql("DATEDIFF"),
 894            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 895            exp.DatetimeDiff: timestampdiff_sql,
 896            exp.DateStrToDate: datestrtodate_sql,
 897            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 898            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 899            exp.DayOfYear: rename_func("DAYOFYEAR"),
 900            exp.Explode: rename_func("FLATTEN"),
 901            exp.Extract: rename_func("DATE_PART"),
 902            exp.FromTimeZone: lambda self, e: self.func(
 903                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 904            ),
 905            exp.GenerateSeries: lambda self, e: self.func(
 906                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 907            ),
 908            exp.GroupConcat: rename_func("LISTAGG"),
 909            exp.If: if_sql(name="IFF", false_value="NULL"),
 910            exp.JSONExtractArray: _json_extract_value_array_sql,
 911            exp.JSONExtractScalar: lambda self, e: self.func(
 912                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 913            ),
 914            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 915            exp.JSONPathRoot: lambda *_: "",
 916            exp.JSONValueArray: _json_extract_value_array_sql,
 917            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 918            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 919            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 920            exp.MakeInterval: no_make_interval_sql,
 921            exp.Max: max_or_greatest,
 922            exp.Min: min_or_least,
 923            exp.ParseJSON: lambda self, e: self.func(
 924                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 925            ),
 926            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 927            exp.PercentileCont: transforms.preprocess(
 928                [transforms.add_within_group_for_percentiles]
 929            ),
 930            exp.PercentileDisc: transforms.preprocess(
 931                [transforms.add_within_group_for_percentiles]
 932            ),
 933            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 934            exp.RegexpExtract: _regexpextract_sql,
 935            exp.RegexpExtractAll: _regexpextract_sql,
 936            exp.RegexpILike: _regexpilike_sql,
 937            exp.Rand: rename_func("RANDOM"),
 938            exp.Select: transforms.preprocess(
 939                [
 940                    transforms.eliminate_distinct_on,
 941                    transforms.explode_to_unnest(),
 942                    transforms.eliminate_semi_and_anti_joins,
 943                    _transform_generate_date_array,
 944                ]
 945            ),
 946            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 947            exp.SHA: rename_func("SHA1"),
 948            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 949            exp.StartsWith: rename_func("STARTSWITH"),
 950            exp.StrPosition: lambda self, e: self.func(
 951                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 952            ),
 953            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 954            exp.Stuff: rename_func("INSERT"),
 955            exp.TimeAdd: date_delta_sql("TIMEADD"),
 956            exp.Timestamp: no_timestamp_sql,
 957            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 958            exp.TimestampDiff: lambda self, e: self.func(
 959                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 960            ),
 961            exp.TimestampTrunc: timestamptrunc_sql(),
 962            exp.TimeStrToTime: timestrtotime_sql,
 963            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 964            exp.ToArray: rename_func("TO_ARRAY"),
 965            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 966            exp.ToDouble: rename_func("TO_DOUBLE"),
 967            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 968            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 969            exp.TsOrDsToDate: lambda self, e: self.func(
 970                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 971            ),
 972            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 973            exp.Uuid: rename_func("UUID_STRING"),
 974            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 975            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 976            exp.Xor: rename_func("BOOLXOR"),
 977            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 978                rename_func("EDITDISTANCE")
 979            ),
 980        }
 981
 982        SUPPORTED_JSON_PATH_PARTS = {
 983            exp.JSONPathKey,
 984            exp.JSONPathRoot,
 985            exp.JSONPathSubscript,
 986        }
 987
 988        TYPE_MAPPING = {
 989            **generator.Generator.TYPE_MAPPING,
 990            exp.DataType.Type.NESTED: "OBJECT",
 991            exp.DataType.Type.STRUCT: "OBJECT",
 992        }
 993
 994        PROPERTIES_LOCATION = {
 995            **generator.Generator.PROPERTIES_LOCATION,
 996            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 997            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 998        }
 999
1000        UNSUPPORTED_VALUES_EXPRESSIONS = {
1001            exp.Map,
1002            exp.StarMap,
1003            exp.Struct,
1004            exp.VarMap,
1005        }
1006
1007        def with_properties(self, properties: exp.Properties) -> str:
1008            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1009
1010        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1011            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1012                values_as_table = False
1013
1014            return super().values_sql(expression, values_as_table=values_as_table)
1015
1016        def datatype_sql(self, expression: exp.DataType) -> str:
1017            expressions = expression.expressions
1018            if (
1019                expressions
1020                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1021                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1022            ):
1023                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1024                return "OBJECT"
1025
1026            return super().datatype_sql(expression)
1027
1028        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1029            return self.func(
1030                "TO_NUMBER",
1031                expression.this,
1032                expression.args.get("format"),
1033                expression.args.get("precision"),
1034                expression.args.get("scale"),
1035            )
1036
1037        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1038            milli = expression.args.get("milli")
1039            if milli is not None:
1040                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1041                expression.set("nano", milli_to_nano)
1042
1043            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1044
1045        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1046            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1047                return self.func("TO_GEOGRAPHY", expression.this)
1048            if expression.is_type(exp.DataType.Type.GEOMETRY):
1049                return self.func("TO_GEOMETRY", expression.this)
1050
1051            return super().cast_sql(expression, safe_prefix=safe_prefix)
1052
1053        def trycast_sql(self, expression: exp.TryCast) -> str:
1054            value = expression.this
1055
1056            if value.type is None:
1057                from sqlglot.optimizer.annotate_types import annotate_types
1058
1059                value = annotate_types(value)
1060
1061            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1062                return super().trycast_sql(expression)
1063
1064            # TRY_CAST only works for string values in Snowflake
1065            return self.cast_sql(expression)
1066
1067        def log_sql(self, expression: exp.Log) -> str:
1068            if not expression.expression:
1069                return self.func("LN", expression.this)
1070
1071            return super().log_sql(expression)
1072
1073        def unnest_sql(self, expression: exp.Unnest) -> str:
1074            unnest_alias = expression.args.get("alias")
1075            offset = expression.args.get("offset")
1076
1077            columns = [
1078                exp.to_identifier("seq"),
1079                exp.to_identifier("key"),
1080                exp.to_identifier("path"),
1081                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1082                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1083                or exp.to_identifier("value"),
1084                exp.to_identifier("this"),
1085            ]
1086
1087            if unnest_alias:
1088                unnest_alias.set("columns", columns)
1089            else:
1090                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1091
1092            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1093            alias = self.sql(unnest_alias)
1094            alias = f" AS {alias}" if alias else ""
1095            return f"{explode}{alias}"
1096
1097        def show_sql(self, expression: exp.Show) -> str:
1098            terse = "TERSE " if expression.args.get("terse") else ""
1099            history = " HISTORY" if expression.args.get("history") else ""
1100            like = self.sql(expression, "like")
1101            like = f" LIKE {like}" if like else ""
1102
1103            scope = self.sql(expression, "scope")
1104            scope = f" {scope}" if scope else ""
1105
1106            scope_kind = self.sql(expression, "scope_kind")
1107            if scope_kind:
1108                scope_kind = f" IN {scope_kind}"
1109
1110            starts_with = self.sql(expression, "starts_with")
1111            if starts_with:
1112                starts_with = f" STARTS WITH {starts_with}"
1113
1114            limit = self.sql(expression, "limit")
1115
1116            from_ = self.sql(expression, "from")
1117            if from_:
1118                from_ = f" FROM {from_}"
1119
1120            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1121
1122        def describe_sql(self, expression: exp.Describe) -> str:
1123            # Default to table if kind is unknown
1124            kind_value = expression.args.get("kind") or "TABLE"
1125            kind = f" {kind_value}" if kind_value else ""
1126            this = f" {self.sql(expression, 'this')}"
1127            expressions = self.expressions(expression, flat=True)
1128            expressions = f" {expressions}" if expressions else ""
1129            return f"DESCRIBE{kind}{this}{expressions}"
1130
1131        def generatedasidentitycolumnconstraint_sql(
1132            self, expression: exp.GeneratedAsIdentityColumnConstraint
1133        ) -> str:
1134            start = expression.args.get("start")
1135            start = f" START {start}" if start else ""
1136            increment = expression.args.get("increment")
1137            increment = f" INCREMENT {increment}" if increment else ""
1138            return f"AUTOINCREMENT{start}{increment}"
1139
1140        def cluster_sql(self, expression: exp.Cluster) -> str:
1141            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1142
1143        def struct_sql(self, expression: exp.Struct) -> str:
1144            keys = []
1145            values = []
1146
1147            for i, e in enumerate(expression.expressions):
1148                if isinstance(e, exp.PropertyEQ):
1149                    keys.append(
1150                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1151                    )
1152                    values.append(e.expression)
1153                else:
1154                    keys.append(exp.Literal.string(f"_{i}"))
1155                    values.append(e)
1156
1157            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1158
1159        @unsupported_args("weight", "accuracy")
1160        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1161            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1162
1163        def alterset_sql(self, expression: exp.AlterSet) -> str:
1164            exprs = self.expressions(expression, flat=True)
1165            exprs = f" {exprs}" if exprs else ""
1166            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1167            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1168            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1169            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1170            tag = self.expressions(expression, key="tag", flat=True)
1171            tag = f" TAG {tag}" if tag else ""
1172
1173            return f"SET{exprs}{file_format}{copy_options}{tag}"
1174
1175        def strtotime_sql(self, expression: exp.StrToTime):
1176            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1177            return self.func(
1178                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1179            )
1180
1181        def timestampsub_sql(self, expression: exp.TimestampSub):
1182            return self.sql(
1183                exp.TimestampAdd(
1184                    this=expression.this,
1185                    expression=expression.expression * -1,
1186                    unit=expression.unit,
1187                )
1188            )
1189
1190        def jsonextract_sql(self, expression: exp.JSONExtract):
1191            this = expression.this
1192
1193            # JSON strings are valid coming from other dialects such as BQ
1194            return self.func(
1195                "GET_PATH",
1196                exp.ParseJSON(this=this) if this.is_string else this,
1197                expression.expression,
1198            )
1199
1200        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1201            this = expression.this
1202            if not isinstance(this, exp.TsOrDsToTimestamp):
1203                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1204
1205            return self.func("TO_CHAR", this, self.format_time(expression))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
364    def quote_identifier(self, expression: E, identify: bool = True) -> E:
365        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
366        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
367        if (
368            isinstance(expression, exp.Identifier)
369            and isinstance(expression.parent, exp.Table)
370            and expression.name.lower() == "dual"
371        ):
372            return expression  # type: ignore
373
374        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
376    class Parser(parser.Parser):
377        IDENTIFY_PIVOT_STRINGS = True
378        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
379        COLON_IS_VARIANT_EXTRACT = True
380
381        ID_VAR_TOKENS = {
382            *parser.Parser.ID_VAR_TOKENS,
383            TokenType.MATCH_CONDITION,
384        }
385
386        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
387        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
388
389        FUNCTIONS = {
390            **parser.Parser.FUNCTIONS,
391            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
392            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
393            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
394                this=seq_get(args, 1), expression=seq_get(args, 0)
395            ),
396            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
397                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
398                start=seq_get(args, 0),
399                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
400                step=seq_get(args, 2),
401            ),
402            "BITXOR": binary_from_function(exp.BitwiseXor),
403            "BIT_XOR": binary_from_function(exp.BitwiseXor),
404            "BITOR": _build_bitor,
405            "BIT_OR": _build_bitor,
406            "BOOLXOR": binary_from_function(exp.Xor),
407            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
408            "DATE_TRUNC": _date_trunc_to_time,
409            "DATEADD": _build_date_time_add(exp.DateAdd),
410            "DATEDIFF": _build_datediff,
411            "DIV0": _build_if_from_div0,
412            "EDITDISTANCE": lambda args: exp.Levenshtein(
413                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
414            ),
415            "FLATTEN": exp.Explode.from_arg_list,
416            "GET_PATH": lambda args, dialect: exp.JSONExtract(
417                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
418            ),
419            "IFF": exp.If.from_arg_list,
420            "LAST_DAY": lambda args: exp.LastDay(
421                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
422            ),
423            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
424            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
425            "LISTAGG": exp.GroupConcat.from_arg_list,
426            "NULLIFZERO": _build_if_from_nullifzero,
427            "OBJECT_CONSTRUCT": _build_object_construct,
428            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
429            "REGEXP_REPLACE": _build_regexp_replace,
430            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
431            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
432            "RLIKE": exp.RegexpLike.from_arg_list,
433            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
434            "TIMEADD": _build_date_time_add(exp.TimeAdd),
435            "TIMEDIFF": _build_datediff,
436            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
437            "TIMESTAMPDIFF": _build_datediff,
438            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
439            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
440            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
441            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
442            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
443            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
444            "TRY_TO_TIMESTAMP": _build_datetime(
445                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
446            ),
447            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
448            "TO_NUMBER": lambda args: exp.ToNumber(
449                this=seq_get(args, 0),
450                format=seq_get(args, 1),
451                precision=seq_get(args, 2),
452                scale=seq_get(args, 3),
453            ),
454            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
455            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
456            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
457            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
458            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
459            "TO_VARCHAR": exp.ToChar.from_arg_list,
460            "ZEROIFNULL": _build_if_from_zeroifnull,
461        }
462
463        FUNCTION_PARSERS = {
464            **parser.Parser.FUNCTION_PARSERS,
465            "DATE_PART": lambda self: self._parse_date_part(),
466            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
467        }
468        FUNCTION_PARSERS.pop("TRIM")
469
470        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
471
472        RANGE_PARSERS = {
473            **parser.Parser.RANGE_PARSERS,
474            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
475            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
476        }
477
478        ALTER_PARSERS = {
479            **parser.Parser.ALTER_PARSERS,
480            "UNSET": lambda self: self.expression(
481                exp.Set,
482                tag=self._match_text_seq("TAG"),
483                expressions=self._parse_csv(self._parse_id_var),
484                unset=True,
485            ),
486        }
487
488        STATEMENT_PARSERS = {
489            **parser.Parser.STATEMENT_PARSERS,
490            TokenType.SHOW: lambda self: self._parse_show(),
491        }
492
493        PROPERTY_PARSERS = {
494            **parser.Parser.PROPERTY_PARSERS,
495            "LOCATION": lambda self: self._parse_location_property(),
496            "TAG": lambda self: self._parse_tag(),
497        }
498
499        TYPE_CONVERTERS = {
500            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
501            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
502        }
503
504        SHOW_PARSERS = {
505            "SCHEMAS": _show_parser("SCHEMAS"),
506            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
507            "OBJECTS": _show_parser("OBJECTS"),
508            "TERSE OBJECTS": _show_parser("OBJECTS"),
509            "TABLES": _show_parser("TABLES"),
510            "TERSE TABLES": _show_parser("TABLES"),
511            "VIEWS": _show_parser("VIEWS"),
512            "TERSE VIEWS": _show_parser("VIEWS"),
513            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
514            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
515            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
516            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
517            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
518            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
519            "SEQUENCES": _show_parser("SEQUENCES"),
520            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
521            "COLUMNS": _show_parser("COLUMNS"),
522            "USERS": _show_parser("USERS"),
523            "TERSE USERS": _show_parser("USERS"),
524        }
525
526        CONSTRAINT_PARSERS = {
527            **parser.Parser.CONSTRAINT_PARSERS,
528            "WITH": lambda self: self._parse_with_constraint(),
529            "MASKING": lambda self: self._parse_with_constraint(),
530            "PROJECTION": lambda self: self._parse_with_constraint(),
531            "TAG": lambda self: self._parse_with_constraint(),
532        }
533
534        STAGED_FILE_SINGLE_TOKENS = {
535            TokenType.DOT,
536            TokenType.MOD,
537            TokenType.SLASH,
538        }
539
540        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
541
542        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
543
544        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
545
546        LAMBDAS = {
547            **parser.Parser.LAMBDAS,
548            TokenType.ARROW: lambda self, expressions: self.expression(
549                exp.Lambda,
550                this=self._replace_lambda(
551                    self._parse_assignment(),
552                    expressions,
553                ),
554                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
555            ),
556        }
557
558        def _negate_range(
559            self, this: t.Optional[exp.Expression] = None
560        ) -> t.Optional[exp.Expression]:
561            if not this:
562                return this
563
564            query = this.args.get("query")
565            if isinstance(this, exp.In) and isinstance(query, exp.Query):
566                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
567                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
568                # which can produce different results (most likely a SnowFlake bug).
569                #
570                # https://docs.snowflake.com/en/sql-reference/functions/in
571                # Context: https://github.com/tobymao/sqlglot/issues/3890
572                return self.expression(
573                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
574                )
575
576            return self.expression(exp.Not, this=this)
577
578        def _parse_tag(self) -> exp.Tags:
579            return self.expression(
580                exp.Tags,
581                expressions=self._parse_wrapped_csv(self._parse_property),
582            )
583
584        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
585            if self._prev.token_type != TokenType.WITH:
586                self._retreat(self._index - 1)
587
588            if self._match_text_seq("MASKING", "POLICY"):
589                policy = self._parse_column()
590                return self.expression(
591                    exp.MaskingPolicyColumnConstraint,
592                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
593                    expressions=self._match(TokenType.USING)
594                    and self._parse_wrapped_csv(self._parse_id_var),
595                )
596            if self._match_text_seq("PROJECTION", "POLICY"):
597                policy = self._parse_column()
598                return self.expression(
599                    exp.ProjectionPolicyColumnConstraint,
600                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
601                )
602            if self._match(TokenType.TAG):
603                return self._parse_tag()
604
605            return None
606
607        def _parse_with_property(self) -> t.Optional[exp.Expression] | t.List[exp.Expression]:
608            if self._match(TokenType.TAG):
609                return self._parse_tag()
610
611            return super()._parse_with_property()
612
613        def _parse_create(self) -> exp.Create | exp.Command:
614            expression = super()._parse_create()
615            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
616                # Replace the Table node with the enclosed Identifier
617                expression.this.replace(expression.this.this)
618
619            return expression
620
621        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
622        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
623        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
624            this = self._parse_var() or self._parse_type()
625
626            if not this:
627                return None
628
629            self._match(TokenType.COMMA)
630            expression = self._parse_bitwise()
631            this = map_date_part(this)
632            name = this.name.upper()
633
634            if name.startswith("EPOCH"):
635                if name == "EPOCH_MILLISECOND":
636                    scale = 10**3
637                elif name == "EPOCH_MICROSECOND":
638                    scale = 10**6
639                elif name == "EPOCH_NANOSECOND":
640                    scale = 10**9
641                else:
642                    scale = None
643
644                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
645                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
646
647                if scale:
648                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
649
650                return to_unix
651
652            return self.expression(exp.Extract, this=this, expression=expression)
653
654        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
655            if is_map:
656                # Keys are strings in Snowflake's objects, see also:
657                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
658                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
659                return self._parse_slice(self._parse_string())
660
661            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
662
663        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
664            lateral = super()._parse_lateral()
665            if not lateral:
666                return lateral
667
668            if isinstance(lateral.this, exp.Explode):
669                table_alias = lateral.args.get("alias")
670                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
671                if table_alias and not table_alias.args.get("columns"):
672                    table_alias.set("columns", columns)
673                elif not table_alias:
674                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
675
676            return lateral
677
678        def _parse_table_parts(
679            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
680        ) -> exp.Table:
681            # https://docs.snowflake.com/en/user-guide/querying-stage
682            if self._match(TokenType.STRING, advance=False):
683                table = self._parse_string()
684            elif self._match_text_seq("@", advance=False):
685                table = self._parse_location_path()
686            else:
687                table = None
688
689            if table:
690                file_format = None
691                pattern = None
692
693                wrapped = self._match(TokenType.L_PAREN)
694                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
695                    if self._match_text_seq("FILE_FORMAT", "=>"):
696                        file_format = self._parse_string() or super()._parse_table_parts(
697                            is_db_reference=is_db_reference
698                        )
699                    elif self._match_text_seq("PATTERN", "=>"):
700                        pattern = self._parse_string()
701                    else:
702                        break
703
704                    self._match(TokenType.COMMA)
705
706                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
707            else:
708                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
709
710            return table
711
712        def _parse_id_var(
713            self,
714            any_token: bool = True,
715            tokens: t.Optional[t.Collection[TokenType]] = None,
716        ) -> t.Optional[exp.Expression]:
717            if self._match_text_seq("IDENTIFIER", "("):
718                identifier = (
719                    super()._parse_id_var(any_token=any_token, tokens=tokens)
720                    or self._parse_string()
721                )
722                self._match_r_paren()
723                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
724
725            return super()._parse_id_var(any_token=any_token, tokens=tokens)
726
727        def _parse_show_snowflake(self, this: str) -> exp.Show:
728            scope = None
729            scope_kind = None
730
731            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
732            # which is syntactically valid but has no effect on the output
733            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
734
735            history = self._match_text_seq("HISTORY")
736
737            like = self._parse_string() if self._match(TokenType.LIKE) else None
738
739            if self._match(TokenType.IN):
740                if self._match_text_seq("ACCOUNT"):
741                    scope_kind = "ACCOUNT"
742                elif self._match_set(self.DB_CREATABLES):
743                    scope_kind = self._prev.text.upper()
744                    if self._curr:
745                        scope = self._parse_table_parts()
746                elif self._curr:
747                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
748                    scope = self._parse_table_parts()
749
750            return self.expression(
751                exp.Show,
752                **{
753                    "terse": terse,
754                    "this": this,
755                    "history": history,
756                    "like": like,
757                    "scope": scope,
758                    "scope_kind": scope_kind,
759                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
760                    "limit": self._parse_limit(),
761                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
762                },
763            )
764
765        def _parse_location_property(self) -> exp.LocationProperty:
766            self._match(TokenType.EQ)
767            return self.expression(exp.LocationProperty, this=self._parse_location_path())
768
769        def _parse_file_location(self) -> t.Optional[exp.Expression]:
770            # Parse either a subquery or a staged file
771            return (
772                self._parse_select(table=True, parse_subquery_alias=False)
773                if self._match(TokenType.L_PAREN, advance=False)
774                else self._parse_table_parts()
775            )
776
777        def _parse_location_path(self) -> exp.Var:
778            parts = [self._advance_any(ignore_reserved=True)]
779
780            # We avoid consuming a comma token because external tables like @foo and @bar
781            # can be joined in a query with a comma separator, as well as closing paren
782            # in case of subqueries
783            while self._is_connected() and not self._match_set(
784                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
785            ):
786                parts.append(self._advance_any(ignore_reserved=True))
787
788            return exp.var("".join(part.text for part in parts if part))
789
790        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
791            this = super()._parse_lambda_arg()
792
793            if not this:
794                return this
795
796            typ = self._parse_types()
797
798            if typ:
799                return self.expression(exp.Cast, this=this, to=typ)
800
801            return this
802
803        def _parse_foreign_key(self) -> exp.ForeignKey:
804            # inlineFK, the REFERENCES columns are implied
805            if self._match(TokenType.REFERENCES, advance=False):
806                return self.expression(exp.ForeignKey)
807
808            # outoflineFK, explicitly names the columns
809            return super()._parse_foreign_key()

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.UINT128: 'UINT128'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.FILTER: 'FILTER'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.DESC: 'DESC'>, <TokenType.SET: 'SET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.FINAL: 'FINAL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SUPER: 'SUPER'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATE32: 'DATE32'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.END: 'END'>, <TokenType.NULL: 'NULL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.CACHE: 'CACHE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.LEFT: 'LEFT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.KILL: 'KILL'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UINT: 'UINT'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.USE: 'USE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INET: 'INET'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MAP: 'MAP'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.FULL: 'FULL'>, <TokenType.ROW: 'ROW'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TAG: 'TAG'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.INT256: 'INT256'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.RANGE: 'RANGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.IS: 'IS'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.APPLY: 'APPLY'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INT: 'INT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.RING: 'RING'>, <TokenType.NAME: 'NAME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.JSON: 'JSON'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VAR: 'VAR'>, <TokenType.DIV: 'DIV'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TABLE: 'TABLE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.ALL: 'ALL'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ASC: 'ASC'>, <TokenType.UUID: 'UUID'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.SOME: 'SOME'>, <TokenType.POINT: 'POINT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ANY: 'ANY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.MODEL: 'MODEL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.SHOW: 'SHOW'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.ENUM: 'ENUM'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DETACH: 'DETACH'>, <TokenType.TOP: 'TOP'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.INT128: 'INT128'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COPY: 'COPY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LIST: 'LIST'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DATE: 'DATE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.XML: 'XML'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SINK: 'SINK'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.CASE: 'CASE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>}
TABLE_ALIAS_TOKENS = {<TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.UINT128: 'UINT128'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.FILTER: 'FILTER'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.DESC: 'DESC'>, <TokenType.SET: 'SET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.IPV4: 'IPV4'>, <TokenType.FINAL: 'FINAL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SUPER: 'SUPER'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATE32: 'DATE32'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DELETE: 'DELETE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.END: 'END'>, <TokenType.NULL: 'NULL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.CACHE: 'CACHE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UINT: 'UINT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.USE: 'USE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INET: 'INET'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MAP: 'MAP'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.CHAR: 'CHAR'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TAG: 'TAG'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.INT256: 'INT256'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.RANGE: 'RANGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.IS: 'IS'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INT: 'INT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIME: 'TIME'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.RING: 'RING'>, <TokenType.NAME: 'NAME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.JSON: 'JSON'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.VAR: 'VAR'>, <TokenType.DIV: 'DIV'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TABLE: 'TABLE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.ALL: 'ALL'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ASC: 'ASC'>, <TokenType.UUID: 'UUID'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.SOME: 'SOME'>, <TokenType.POINT: 'POINT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ANY: 'ANY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.CUBE: 'CUBE'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.MODEL: 'MODEL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.SHOW: 'SHOW'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.ENUM: 'ENUM'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DETACH: 'DETACH'>, <TokenType.TOP: 'TOP'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.INT128: 'INT128'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COPY: 'COPY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.LIST: 'LIST'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DATE: 'DATE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.XML: 'XML'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SINK: 'SINK'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.CASE: 'CASE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.KILL: 'KILL'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BITOR': <function _build_bitor>, 'BIT_OR': <function _build_bitor>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'IMPORTED KEYS', 'SEQUENCES', 'UNIQUE KEYS', 'TABLES', 'VIEWS'}
NON_TABLE_CREATABLES = {'STORAGE INTEGRATION', 'TAG', 'STREAMLIT', 'WAREHOUSE'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
811    class Tokenizer(tokens.Tokenizer):
812        STRING_ESCAPES = ["\\", "'"]
813        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
814        RAW_STRINGS = ["$$"]
815        COMMENTS = ["--", "//", ("/*", "*/")]
816        NESTED_COMMENTS = False
817
818        KEYWORDS = {
819            **tokens.Tokenizer.KEYWORDS,
820            "BYTEINT": TokenType.INT,
821            "CHAR VARYING": TokenType.VARCHAR,
822            "CHARACTER VARYING": TokenType.VARCHAR,
823            "EXCLUDE": TokenType.EXCEPT,
824            "ILIKE ANY": TokenType.ILIKE_ANY,
825            "LIKE ANY": TokenType.LIKE_ANY,
826            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
827            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
828            "MINUS": TokenType.EXCEPT,
829            "NCHAR VARYING": TokenType.VARCHAR,
830            "PUT": TokenType.COMMAND,
831            "REMOVE": TokenType.COMMAND,
832            "RM": TokenType.COMMAND,
833            "SAMPLE": TokenType.TABLE_SAMPLE,
834            "SQL_DOUBLE": TokenType.DOUBLE,
835            "SQL_VARCHAR": TokenType.VARCHAR,
836            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
837            "TAG": TokenType.TAG,
838            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
839            "TOP": TokenType.TOP,
840            "WAREHOUSE": TokenType.WAREHOUSE,
841            "STREAMLIT": TokenType.STREAMLIT,
842        }
843        KEYWORDS.pop("/*+")
844
845        SINGLE_TOKENS = {
846            **tokens.Tokenizer.SINGLE_TOKENS,
847            "$": TokenType.PARAMETER,
848        }
849
850        VAR_SINGLE_TOKENS = {"$"}
851
852        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 854    class Generator(generator.Generator):
 855        PARAMETER_TOKEN = "$"
 856        MATCHED_BY_SOURCE = False
 857        SINGLE_STRING_INTERVAL = True
 858        JOIN_HINTS = False
 859        TABLE_HINTS = False
 860        QUERY_HINTS = False
 861        AGGREGATE_FILTER_SUPPORTED = False
 862        SUPPORTS_TABLE_COPY = False
 863        COLLATE_IS_FUNC = True
 864        LIMIT_ONLY_LITERALS = True
 865        JSON_KEY_VALUE_PAIR_SEP = ","
 866        INSERT_OVERWRITE = " OVERWRITE INTO"
 867        STRUCT_DELIMITER = ("(", ")")
 868        COPY_PARAMS_ARE_WRAPPED = False
 869        COPY_PARAMS_EQ_REQUIRED = True
 870        STAR_EXCEPT = "EXCLUDE"
 871        SUPPORTS_EXPLODING_PROJECTIONS = False
 872        ARRAY_CONCAT_IS_VAR_LEN = False
 873        SUPPORTS_CONVERT_TIMEZONE = True
 874        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 875        SUPPORTS_MEDIAN = True
 876        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 877
 878        TRANSFORMS = {
 879            **generator.Generator.TRANSFORMS,
 880            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 881            exp.ArgMax: rename_func("MAX_BY"),
 882            exp.ArgMin: rename_func("MIN_BY"),
 883            exp.Array: inline_array_sql,
 884            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 885            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 886            exp.AtTimeZone: lambda self, e: self.func(
 887                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 888            ),
 889            exp.BitwiseXor: rename_func("BITXOR"),
 890            exp.BitwiseOr: rename_func("BITOR"),
 891            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 892            exp.DateAdd: date_delta_sql("DATEADD"),
 893            exp.DateDiff: date_delta_sql("DATEDIFF"),
 894            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 895            exp.DatetimeDiff: timestampdiff_sql,
 896            exp.DateStrToDate: datestrtodate_sql,
 897            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 898            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 899            exp.DayOfYear: rename_func("DAYOFYEAR"),
 900            exp.Explode: rename_func("FLATTEN"),
 901            exp.Extract: rename_func("DATE_PART"),
 902            exp.FromTimeZone: lambda self, e: self.func(
 903                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 904            ),
 905            exp.GenerateSeries: lambda self, e: self.func(
 906                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 907            ),
 908            exp.GroupConcat: rename_func("LISTAGG"),
 909            exp.If: if_sql(name="IFF", false_value="NULL"),
 910            exp.JSONExtractArray: _json_extract_value_array_sql,
 911            exp.JSONExtractScalar: lambda self, e: self.func(
 912                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 913            ),
 914            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 915            exp.JSONPathRoot: lambda *_: "",
 916            exp.JSONValueArray: _json_extract_value_array_sql,
 917            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 918            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 919            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 920            exp.MakeInterval: no_make_interval_sql,
 921            exp.Max: max_or_greatest,
 922            exp.Min: min_or_least,
 923            exp.ParseJSON: lambda self, e: self.func(
 924                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 925            ),
 926            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 927            exp.PercentileCont: transforms.preprocess(
 928                [transforms.add_within_group_for_percentiles]
 929            ),
 930            exp.PercentileDisc: transforms.preprocess(
 931                [transforms.add_within_group_for_percentiles]
 932            ),
 933            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 934            exp.RegexpExtract: _regexpextract_sql,
 935            exp.RegexpExtractAll: _regexpextract_sql,
 936            exp.RegexpILike: _regexpilike_sql,
 937            exp.Rand: rename_func("RANDOM"),
 938            exp.Select: transforms.preprocess(
 939                [
 940                    transforms.eliminate_distinct_on,
 941                    transforms.explode_to_unnest(),
 942                    transforms.eliminate_semi_and_anti_joins,
 943                    _transform_generate_date_array,
 944                ]
 945            ),
 946            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 947            exp.SHA: rename_func("SHA1"),
 948            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 949            exp.StartsWith: rename_func("STARTSWITH"),
 950            exp.StrPosition: lambda self, e: self.func(
 951                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 952            ),
 953            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 954            exp.Stuff: rename_func("INSERT"),
 955            exp.TimeAdd: date_delta_sql("TIMEADD"),
 956            exp.Timestamp: no_timestamp_sql,
 957            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 958            exp.TimestampDiff: lambda self, e: self.func(
 959                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 960            ),
 961            exp.TimestampTrunc: timestamptrunc_sql(),
 962            exp.TimeStrToTime: timestrtotime_sql,
 963            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 964            exp.ToArray: rename_func("TO_ARRAY"),
 965            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 966            exp.ToDouble: rename_func("TO_DOUBLE"),
 967            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 968            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 969            exp.TsOrDsToDate: lambda self, e: self.func(
 970                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 971            ),
 972            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 973            exp.Uuid: rename_func("UUID_STRING"),
 974            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 975            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 976            exp.Xor: rename_func("BOOLXOR"),
 977            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 978                rename_func("EDITDISTANCE")
 979            ),
 980        }
 981
 982        SUPPORTED_JSON_PATH_PARTS = {
 983            exp.JSONPathKey,
 984            exp.JSONPathRoot,
 985            exp.JSONPathSubscript,
 986        }
 987
 988        TYPE_MAPPING = {
 989            **generator.Generator.TYPE_MAPPING,
 990            exp.DataType.Type.NESTED: "OBJECT",
 991            exp.DataType.Type.STRUCT: "OBJECT",
 992        }
 993
 994        PROPERTIES_LOCATION = {
 995            **generator.Generator.PROPERTIES_LOCATION,
 996            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 997            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 998        }
 999
1000        UNSUPPORTED_VALUES_EXPRESSIONS = {
1001            exp.Map,
1002            exp.StarMap,
1003            exp.Struct,
1004            exp.VarMap,
1005        }
1006
1007        def with_properties(self, properties: exp.Properties) -> str:
1008            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
1009
1010        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1011            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1012                values_as_table = False
1013
1014            return super().values_sql(expression, values_as_table=values_as_table)
1015
1016        def datatype_sql(self, expression: exp.DataType) -> str:
1017            expressions = expression.expressions
1018            if (
1019                expressions
1020                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1021                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1022            ):
1023                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1024                return "OBJECT"
1025
1026            return super().datatype_sql(expression)
1027
1028        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1029            return self.func(
1030                "TO_NUMBER",
1031                expression.this,
1032                expression.args.get("format"),
1033                expression.args.get("precision"),
1034                expression.args.get("scale"),
1035            )
1036
1037        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1038            milli = expression.args.get("milli")
1039            if milli is not None:
1040                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1041                expression.set("nano", milli_to_nano)
1042
1043            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
1044
1045        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1046            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1047                return self.func("TO_GEOGRAPHY", expression.this)
1048            if expression.is_type(exp.DataType.Type.GEOMETRY):
1049                return self.func("TO_GEOMETRY", expression.this)
1050
1051            return super().cast_sql(expression, safe_prefix=safe_prefix)
1052
1053        def trycast_sql(self, expression: exp.TryCast) -> str:
1054            value = expression.this
1055
1056            if value.type is None:
1057                from sqlglot.optimizer.annotate_types import annotate_types
1058
1059                value = annotate_types(value)
1060
1061            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1062                return super().trycast_sql(expression)
1063
1064            # TRY_CAST only works for string values in Snowflake
1065            return self.cast_sql(expression)
1066
1067        def log_sql(self, expression: exp.Log) -> str:
1068            if not expression.expression:
1069                return self.func("LN", expression.this)
1070
1071            return super().log_sql(expression)
1072
1073        def unnest_sql(self, expression: exp.Unnest) -> str:
1074            unnest_alias = expression.args.get("alias")
1075            offset = expression.args.get("offset")
1076
1077            columns = [
1078                exp.to_identifier("seq"),
1079                exp.to_identifier("key"),
1080                exp.to_identifier("path"),
1081                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1082                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1083                or exp.to_identifier("value"),
1084                exp.to_identifier("this"),
1085            ]
1086
1087            if unnest_alias:
1088                unnest_alias.set("columns", columns)
1089            else:
1090                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1091
1092            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1093            alias = self.sql(unnest_alias)
1094            alias = f" AS {alias}" if alias else ""
1095            return f"{explode}{alias}"
1096
1097        def show_sql(self, expression: exp.Show) -> str:
1098            terse = "TERSE " if expression.args.get("terse") else ""
1099            history = " HISTORY" if expression.args.get("history") else ""
1100            like = self.sql(expression, "like")
1101            like = f" LIKE {like}" if like else ""
1102
1103            scope = self.sql(expression, "scope")
1104            scope = f" {scope}" if scope else ""
1105
1106            scope_kind = self.sql(expression, "scope_kind")
1107            if scope_kind:
1108                scope_kind = f" IN {scope_kind}"
1109
1110            starts_with = self.sql(expression, "starts_with")
1111            if starts_with:
1112                starts_with = f" STARTS WITH {starts_with}"
1113
1114            limit = self.sql(expression, "limit")
1115
1116            from_ = self.sql(expression, "from")
1117            if from_:
1118                from_ = f" FROM {from_}"
1119
1120            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1121
1122        def describe_sql(self, expression: exp.Describe) -> str:
1123            # Default to table if kind is unknown
1124            kind_value = expression.args.get("kind") or "TABLE"
1125            kind = f" {kind_value}" if kind_value else ""
1126            this = f" {self.sql(expression, 'this')}"
1127            expressions = self.expressions(expression, flat=True)
1128            expressions = f" {expressions}" if expressions else ""
1129            return f"DESCRIBE{kind}{this}{expressions}"
1130
1131        def generatedasidentitycolumnconstraint_sql(
1132            self, expression: exp.GeneratedAsIdentityColumnConstraint
1133        ) -> str:
1134            start = expression.args.get("start")
1135            start = f" START {start}" if start else ""
1136            increment = expression.args.get("increment")
1137            increment = f" INCREMENT {increment}" if increment else ""
1138            return f"AUTOINCREMENT{start}{increment}"
1139
1140        def cluster_sql(self, expression: exp.Cluster) -> str:
1141            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1142
1143        def struct_sql(self, expression: exp.Struct) -> str:
1144            keys = []
1145            values = []
1146
1147            for i, e in enumerate(expression.expressions):
1148                if isinstance(e, exp.PropertyEQ):
1149                    keys.append(
1150                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1151                    )
1152                    values.append(e.expression)
1153                else:
1154                    keys.append(exp.Literal.string(f"_{i}"))
1155                    values.append(e)
1156
1157            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1158
1159        @unsupported_args("weight", "accuracy")
1160        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1161            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1162
1163        def alterset_sql(self, expression: exp.AlterSet) -> str:
1164            exprs = self.expressions(expression, flat=True)
1165            exprs = f" {exprs}" if exprs else ""
1166            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1167            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1168            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1169            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1170            tag = self.expressions(expression, key="tag", flat=True)
1171            tag = f" TAG {tag}" if tag else ""
1172
1173            return f"SET{exprs}{file_format}{copy_options}{tag}"
1174
1175        def strtotime_sql(self, expression: exp.StrToTime):
1176            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1177            return self.func(
1178                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1179            )
1180
1181        def timestampsub_sql(self, expression: exp.TimestampSub):
1182            return self.sql(
1183                exp.TimestampAdd(
1184                    this=expression.this,
1185                    expression=expression.expression * -1,
1186                    unit=expression.unit,
1187                )
1188            )
1189
1190        def jsonextract_sql(self, expression: exp.JSONExtract):
1191            this = expression.this
1192
1193            # JSON strings are valid coming from other dialects such as BQ
1194            return self.func(
1195                "GET_PATH",
1196                exp.ParseJSON(this=this) if this.is_string else this,
1197                expression.expression,
1198            )
1199
1200        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1201            this = expression.this
1202            if not isinstance(this, exp.TsOrDsToTimestamp):
1203                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1204
1205            return self.func("TO_CHAR", this, self.format_time(expression))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.StarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1007        def with_properties(self, properties: exp.Properties) -> str:
1008            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
1010        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
1011            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
1012                values_as_table = False
1013
1014            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
1016        def datatype_sql(self, expression: exp.DataType) -> str:
1017            expressions = expression.expressions
1018            if (
1019                expressions
1020                and expression.is_type(*exp.DataType.STRUCT_TYPES)
1021                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
1022            ):
1023                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
1024                return "OBJECT"
1025
1026            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
1028        def tonumber_sql(self, expression: exp.ToNumber) -> str:
1029            return self.func(
1030                "TO_NUMBER",
1031                expression.this,
1032                expression.args.get("format"),
1033                expression.args.get("precision"),
1034                expression.args.get("scale"),
1035            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
1037        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
1038            milli = expression.args.get("milli")
1039            if milli is not None:
1040                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
1041                expression.set("nano", milli_to_nano)
1042
1043            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1045        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1046            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1047                return self.func("TO_GEOGRAPHY", expression.this)
1048            if expression.is_type(exp.DataType.Type.GEOMETRY):
1049                return self.func("TO_GEOMETRY", expression.this)
1050
1051            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1053        def trycast_sql(self, expression: exp.TryCast) -> str:
1054            value = expression.this
1055
1056            if value.type is None:
1057                from sqlglot.optimizer.annotate_types import annotate_types
1058
1059                value = annotate_types(value)
1060
1061            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1062                return super().trycast_sql(expression)
1063
1064            # TRY_CAST only works for string values in Snowflake
1065            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1067        def log_sql(self, expression: exp.Log) -> str:
1068            if not expression.expression:
1069                return self.func("LN", expression.this)
1070
1071            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1073        def unnest_sql(self, expression: exp.Unnest) -> str:
1074            unnest_alias = expression.args.get("alias")
1075            offset = expression.args.get("offset")
1076
1077            columns = [
1078                exp.to_identifier("seq"),
1079                exp.to_identifier("key"),
1080                exp.to_identifier("path"),
1081                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1082                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1083                or exp.to_identifier("value"),
1084                exp.to_identifier("this"),
1085            ]
1086
1087            if unnest_alias:
1088                unnest_alias.set("columns", columns)
1089            else:
1090                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1091
1092            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1093            alias = self.sql(unnest_alias)
1094            alias = f" AS {alias}" if alias else ""
1095            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1097        def show_sql(self, expression: exp.Show) -> str:
1098            terse = "TERSE " if expression.args.get("terse") else ""
1099            history = " HISTORY" if expression.args.get("history") else ""
1100            like = self.sql(expression, "like")
1101            like = f" LIKE {like}" if like else ""
1102
1103            scope = self.sql(expression, "scope")
1104            scope = f" {scope}" if scope else ""
1105
1106            scope_kind = self.sql(expression, "scope_kind")
1107            if scope_kind:
1108                scope_kind = f" IN {scope_kind}"
1109
1110            starts_with = self.sql(expression, "starts_with")
1111            if starts_with:
1112                starts_with = f" STARTS WITH {starts_with}"
1113
1114            limit = self.sql(expression, "limit")
1115
1116            from_ = self.sql(expression, "from")
1117            if from_:
1118                from_ = f" FROM {from_}"
1119
1120            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1122        def describe_sql(self, expression: exp.Describe) -> str:
1123            # Default to table if kind is unknown
1124            kind_value = expression.args.get("kind") or "TABLE"
1125            kind = f" {kind_value}" if kind_value else ""
1126            this = f" {self.sql(expression, 'this')}"
1127            expressions = self.expressions(expression, flat=True)
1128            expressions = f" {expressions}" if expressions else ""
1129            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1131        def generatedasidentitycolumnconstraint_sql(
1132            self, expression: exp.GeneratedAsIdentityColumnConstraint
1133        ) -> str:
1134            start = expression.args.get("start")
1135            start = f" START {start}" if start else ""
1136            increment = expression.args.get("increment")
1137            increment = f" INCREMENT {increment}" if increment else ""
1138            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1140        def cluster_sql(self, expression: exp.Cluster) -> str:
1141            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1143        def struct_sql(self, expression: exp.Struct) -> str:
1144            keys = []
1145            values = []
1146
1147            for i, e in enumerate(expression.expressions):
1148                if isinstance(e, exp.PropertyEQ):
1149                    keys.append(
1150                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1151                    )
1152                    values.append(e.expression)
1153                else:
1154                    keys.append(exp.Literal.string(f"_{i}"))
1155                    values.append(e)
1156
1157            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1159        @unsupported_args("weight", "accuracy")
1160        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1161            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1163        def alterset_sql(self, expression: exp.AlterSet) -> str:
1164            exprs = self.expressions(expression, flat=True)
1165            exprs = f" {exprs}" if exprs else ""
1166            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1167            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1168            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1169            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1170            tag = self.expressions(expression, key="tag", flat=True)
1171            tag = f" TAG {tag}" if tag else ""
1172
1173            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1175        def strtotime_sql(self, expression: exp.StrToTime):
1176            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1177            return self.func(
1178                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1179            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1181        def timestampsub_sql(self, expression: exp.TimestampSub):
1182            return self.sql(
1183                exp.TimestampAdd(
1184                    this=expression.this,
1185                    expression=expression.expression * -1,
1186                    unit=expression.unit,
1187                )
1188            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1190        def jsonextract_sql(self, expression: exp.JSONExtract):
1191            this = expression.this
1192
1193            # JSON strings are valid coming from other dialects such as BQ
1194            return self.func(
1195                "GET_PATH",
1196                exp.ParseJSON(this=this) if this.is_string else this,
1197                expression.expression,
1198            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1200        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1201            this = expression.this
1202            if not isinstance(this, exp.TsOrDsToTimestamp):
1203                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1204
1205            return self.func("TO_CHAR", this, self.format_time(expression))
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql