Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    date_delta_sql,
  12    date_trunc_to_time,
  13    datestrtodate_sql,
  14    build_formatted_time,
  15    if_sql,
  16    inline_array_sql,
  17    max_or_greatest,
  18    min_or_least,
  19    rename_func,
  20    timestamptrunc_sql,
  21    timestrtotime_sql,
  22    var_map_sql,
  23)
  24from sqlglot.helper import flatten, is_float, is_int, seq_get
  25from sqlglot.tokens import TokenType
  26
  27if t.TYPE_CHECKING:
  28    from sqlglot._typing import E
  29
  30
  31# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  32def _build_datetime(
  33    name: str, kind: exp.DataType.Type, safe: bool = False
  34) -> t.Callable[[t.List], exp.Func]:
  35    def _builder(args: t.List) -> exp.Func:
  36        value = seq_get(args, 0)
  37        int_value = value is not None and is_int(value.name)
  38
  39        if isinstance(value, exp.Literal):
  40            # Converts calls like `TO_TIME('01:02:03')` into casts
  41            if len(args) == 1 and value.is_string and not int_value:
  42                return exp.cast(value, kind)
  43
  44            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  45            # cases so we can transpile them, since they're relatively common
  46            if kind == exp.DataType.Type.TIMESTAMP:
  47                if int_value:
  48                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  49                if not is_float(value.this):
  50                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  51
  52        if kind == exp.DataType.Type.DATE and not int_value:
  53            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  54            formatted_exp.set("safe", safe)
  55            return formatted_exp
  56
  57        return exp.Anonymous(this=name, expressions=args)
  58
  59    return _builder
  60
  61
  62def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  63    expression = parser.build_var_map(args)
  64
  65    if isinstance(expression, exp.StarMap):
  66        return expression
  67
  68    return exp.Struct(
  69        expressions=[
  70            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  71        ]
  72    )
  73
  74
  75def _build_datediff(args: t.List) -> exp.DateDiff:
  76    return exp.DateDiff(
  77        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
  78    )
  79
  80
  81def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  82    def _builder(args: t.List) -> E:
  83        return expr_type(
  84            this=seq_get(args, 2),
  85            expression=seq_get(args, 1),
  86            unit=_map_date_part(seq_get(args, 0)),
  87        )
  88
  89    return _builder
  90
  91
  92# https://docs.snowflake.com/en/sql-reference/functions/div0
  93def _build_if_from_div0(args: t.List) -> exp.If:
  94    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  95    true = exp.Literal.number(0)
  96    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  97    return exp.If(this=cond, true=true, false=false)
  98
  99
 100# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 101def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 102    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 103    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 104
 105
 106# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 107def _build_if_from_nullifzero(args: t.List) -> exp.If:
 108    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 109    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 110
 111
 112def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 113    flag = expression.text("flag")
 114
 115    if "i" not in flag:
 116        flag += "i"
 117
 118    return self.func(
 119        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 120    )
 121
 122
 123def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 124    if len(args) == 3:
 125        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 126    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 127
 128
 129def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 130    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 131
 132    if not regexp_replace.args.get("replacement"):
 133        regexp_replace.set("replacement", exp.Literal.string(""))
 134
 135    return regexp_replace
 136
 137
 138def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 139    def _parse(self: Snowflake.Parser) -> exp.Show:
 140        return self._parse_show_snowflake(*args, **kwargs)
 141
 142    return _parse
 143
 144
 145DATE_PART_MAPPING = {
 146    "Y": "YEAR",
 147    "YY": "YEAR",
 148    "YYY": "YEAR",
 149    "YYYY": "YEAR",
 150    "YR": "YEAR",
 151    "YEARS": "YEAR",
 152    "YRS": "YEAR",
 153    "MM": "MONTH",
 154    "MON": "MONTH",
 155    "MONS": "MONTH",
 156    "MONTHS": "MONTH",
 157    "D": "DAY",
 158    "DD": "DAY",
 159    "DAYS": "DAY",
 160    "DAYOFMONTH": "DAY",
 161    "WEEKDAY": "DAYOFWEEK",
 162    "DOW": "DAYOFWEEK",
 163    "DW": "DAYOFWEEK",
 164    "WEEKDAY_ISO": "DAYOFWEEKISO",
 165    "DOW_ISO": "DAYOFWEEKISO",
 166    "DW_ISO": "DAYOFWEEKISO",
 167    "YEARDAY": "DAYOFYEAR",
 168    "DOY": "DAYOFYEAR",
 169    "DY": "DAYOFYEAR",
 170    "W": "WEEK",
 171    "WK": "WEEK",
 172    "WEEKOFYEAR": "WEEK",
 173    "WOY": "WEEK",
 174    "WY": "WEEK",
 175    "WEEK_ISO": "WEEKISO",
 176    "WEEKOFYEARISO": "WEEKISO",
 177    "WEEKOFYEAR_ISO": "WEEKISO",
 178    "Q": "QUARTER",
 179    "QTR": "QUARTER",
 180    "QTRS": "QUARTER",
 181    "QUARTERS": "QUARTER",
 182    "H": "HOUR",
 183    "HH": "HOUR",
 184    "HR": "HOUR",
 185    "HOURS": "HOUR",
 186    "HRS": "HOUR",
 187    "M": "MINUTE",
 188    "MI": "MINUTE",
 189    "MIN": "MINUTE",
 190    "MINUTES": "MINUTE",
 191    "MINS": "MINUTE",
 192    "S": "SECOND",
 193    "SEC": "SECOND",
 194    "SECONDS": "SECOND",
 195    "SECS": "SECOND",
 196    "MS": "MILLISECOND",
 197    "MSEC": "MILLISECOND",
 198    "MILLISECONDS": "MILLISECOND",
 199    "US": "MICROSECOND",
 200    "USEC": "MICROSECOND",
 201    "MICROSECONDS": "MICROSECOND",
 202    "NS": "NANOSECOND",
 203    "NSEC": "NANOSECOND",
 204    "NANOSEC": "NANOSECOND",
 205    "NSECOND": "NANOSECOND",
 206    "NSECONDS": "NANOSECOND",
 207    "NANOSECS": "NANOSECOND",
 208    "EPOCH": "EPOCH_SECOND",
 209    "EPOCH_SECONDS": "EPOCH_SECOND",
 210    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
 211    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
 212    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
 213    "TZH": "TIMEZONE_HOUR",
 214    "TZM": "TIMEZONE_MINUTE",
 215}
 216
 217
 218@t.overload
 219def _map_date_part(part: exp.Expression) -> exp.Var:
 220    pass
 221
 222
 223@t.overload
 224def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 225    pass
 226
 227
 228def _map_date_part(part):
 229    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
 230    return exp.var(mapped) if mapped else part
 231
 232
 233def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 234    trunc = date_trunc_to_time(args)
 235    trunc.set("unit", _map_date_part(trunc.args["unit"]))
 236    return trunc
 237
 238
 239def _build_timestamp_from_parts(args: t.List) -> exp.Func:
 240    if len(args) == 2:
 241        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
 242        # so we parse this into Anonymous for now instead of introducing complexity
 243        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
 244
 245    return exp.TimestampFromParts.from_arg_list(args)
 246
 247
 248def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 249    """
 250    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 251    so we need to unqualify them.
 252
 253    Example:
 254        >>> from sqlglot import parse_one
 255        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 256        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 257        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 258    """
 259    if isinstance(expression, exp.Pivot) and expression.unpivot:
 260        expression = transforms.unqualify_columns(expression)
 261
 262    return expression
 263
 264
 265def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 266    assert isinstance(expression, exp.Create)
 267
 268    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 269        if expression.this in exp.DataType.NESTED_TYPES:
 270            expression.set("expressions", None)
 271        return expression
 272
 273    props = expression.args.get("properties")
 274    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 275        for schema_expression in expression.this.expressions:
 276            if isinstance(schema_expression, exp.ColumnDef):
 277                column_type = schema_expression.kind
 278                if isinstance(column_type, exp.DataType):
 279                    column_type.transform(_flatten_structured_type, copy=False)
 280
 281    return expression
 282
 283
 284class Snowflake(Dialect):
 285    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 286    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 287    NULL_ORDERING = "nulls_are_large"
 288    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 289    SUPPORTS_USER_DEFINED_TYPES = False
 290    SUPPORTS_SEMI_ANTI_JOIN = False
 291    PREFER_CTE_ALIAS_COLUMN = True
 292    TABLESAMPLE_SIZE_IS_PERCENT = True
 293    COPY_PARAMS_ARE_CSV = False
 294
 295    TIME_MAPPING = {
 296        "YYYY": "%Y",
 297        "yyyy": "%Y",
 298        "YY": "%y",
 299        "yy": "%y",
 300        "MMMM": "%B",
 301        "mmmm": "%B",
 302        "MON": "%b",
 303        "mon": "%b",
 304        "MM": "%m",
 305        "mm": "%m",
 306        "DD": "%d",
 307        "dd": "%-d",
 308        "DY": "%a",
 309        "dy": "%w",
 310        "HH24": "%H",
 311        "hh24": "%H",
 312        "HH12": "%I",
 313        "hh12": "%I",
 314        "MI": "%M",
 315        "mi": "%M",
 316        "SS": "%S",
 317        "ss": "%S",
 318        "FF": "%f",
 319        "ff": "%f",
 320        "FF6": "%f",
 321        "ff6": "%f",
 322    }
 323
 324    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 325        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 326        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 327        if (
 328            isinstance(expression, exp.Identifier)
 329            and isinstance(expression.parent, exp.Table)
 330            and expression.name.lower() == "dual"
 331        ):
 332            return expression  # type: ignore
 333
 334        return super().quote_identifier(expression, identify=identify)
 335
 336    class Parser(parser.Parser):
 337        IDENTIFY_PIVOT_STRINGS = True
 338        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 339        COLON_IS_JSON_EXTRACT = True
 340
 341        ID_VAR_TOKENS = {
 342            *parser.Parser.ID_VAR_TOKENS,
 343            TokenType.MATCH_CONDITION,
 344        }
 345
 346        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 347        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 348
 349        FUNCTIONS = {
 350            **parser.Parser.FUNCTIONS,
 351            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 352            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 353            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 354            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 355                this=seq_get(args, 1), expression=seq_get(args, 0)
 356            ),
 357            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 358                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 359                start=seq_get(args, 0),
 360                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 361                step=seq_get(args, 2),
 362            ),
 363            "BITXOR": binary_from_function(exp.BitwiseXor),
 364            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 365            "BOOLXOR": binary_from_function(exp.Xor),
 366            "CONVERT_TIMEZONE": _build_convert_timezone,
 367            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 368            "DATE_TRUNC": _date_trunc_to_time,
 369            "DATEADD": _build_date_time_add(exp.DateAdd),
 370            "DATEDIFF": _build_datediff,
 371            "DIV0": _build_if_from_div0,
 372            "FLATTEN": exp.Explode.from_arg_list,
 373            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 374                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 375            ),
 376            "IFF": exp.If.from_arg_list,
 377            "LAST_DAY": lambda args: exp.LastDay(
 378                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 379            ),
 380            "LISTAGG": exp.GroupConcat.from_arg_list,
 381            "MEDIAN": lambda args: exp.PercentileCont(
 382                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 383            ),
 384            "NULLIFZERO": _build_if_from_nullifzero,
 385            "OBJECT_CONSTRUCT": _build_object_construct,
 386            "REGEXP_REPLACE": _build_regexp_replace,
 387            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 388            "RLIKE": exp.RegexpLike.from_arg_list,
 389            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 390            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 391            "TIMEDIFF": _build_datediff,
 392            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 393            "TIMESTAMPDIFF": _build_datediff,
 394            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 395            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 396            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 397            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 398            "TO_NUMBER": lambda args: exp.ToNumber(
 399                this=seq_get(args, 0),
 400                format=seq_get(args, 1),
 401                precision=seq_get(args, 2),
 402                scale=seq_get(args, 3),
 403            ),
 404            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 405            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 406            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 407            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 408            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 409            "TO_VARCHAR": exp.ToChar.from_arg_list,
 410            "ZEROIFNULL": _build_if_from_zeroifnull,
 411        }
 412
 413        FUNCTION_PARSERS = {
 414            **parser.Parser.FUNCTION_PARSERS,
 415            "DATE_PART": lambda self: self._parse_date_part(),
 416            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 417        }
 418        FUNCTION_PARSERS.pop("TRIM")
 419
 420        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 421
 422        RANGE_PARSERS = {
 423            **parser.Parser.RANGE_PARSERS,
 424            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 425            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 426        }
 427
 428        ALTER_PARSERS = {
 429            **parser.Parser.ALTER_PARSERS,
 430            "UNSET": lambda self: self.expression(
 431                exp.Set,
 432                tag=self._match_text_seq("TAG"),
 433                expressions=self._parse_csv(self._parse_id_var),
 434                unset=True,
 435            ),
 436            "SWAP": lambda self: self._parse_alter_table_swap(),
 437        }
 438
 439        STATEMENT_PARSERS = {
 440            **parser.Parser.STATEMENT_PARSERS,
 441            TokenType.SHOW: lambda self: self._parse_show(),
 442        }
 443
 444        PROPERTY_PARSERS = {
 445            **parser.Parser.PROPERTY_PARSERS,
 446            "LOCATION": lambda self: self._parse_location_property(),
 447        }
 448
 449        TYPE_CONVERTER = {
 450            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 451            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 452        }
 453
 454        SHOW_PARSERS = {
 455            "SCHEMAS": _show_parser("SCHEMAS"),
 456            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 457            "OBJECTS": _show_parser("OBJECTS"),
 458            "TERSE OBJECTS": _show_parser("OBJECTS"),
 459            "TABLES": _show_parser("TABLES"),
 460            "TERSE TABLES": _show_parser("TABLES"),
 461            "VIEWS": _show_parser("VIEWS"),
 462            "TERSE VIEWS": _show_parser("VIEWS"),
 463            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 464            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 465            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 466            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 467            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 468            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 469            "SEQUENCES": _show_parser("SEQUENCES"),
 470            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 471            "COLUMNS": _show_parser("COLUMNS"),
 472            "USERS": _show_parser("USERS"),
 473            "TERSE USERS": _show_parser("USERS"),
 474        }
 475
 476        CONSTRAINT_PARSERS = {
 477            **parser.Parser.CONSTRAINT_PARSERS,
 478            "WITH": lambda self: self._parse_with_constraint(),
 479            "MASKING": lambda self: self._parse_with_constraint(),
 480            "PROJECTION": lambda self: self._parse_with_constraint(),
 481            "TAG": lambda self: self._parse_with_constraint(),
 482        }
 483
 484        STAGED_FILE_SINGLE_TOKENS = {
 485            TokenType.DOT,
 486            TokenType.MOD,
 487            TokenType.SLASH,
 488        }
 489
 490        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 491
 492        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 493
 494        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 495
 496        LAMBDAS = {
 497            **parser.Parser.LAMBDAS,
 498            TokenType.ARROW: lambda self, expressions: self.expression(
 499                exp.Lambda,
 500                this=self._replace_lambda(
 501                    self._parse_conjunction(),
 502                    expressions,
 503                ),
 504                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 505            ),
 506        }
 507
 508        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 509            if self._prev.token_type != TokenType.WITH:
 510                self._retreat(self._index - 1)
 511
 512            if self._match_text_seq("MASKING", "POLICY"):
 513                return self.expression(
 514                    exp.MaskingPolicyColumnConstraint,
 515                    this=self._parse_id_var(),
 516                    expressions=self._match(TokenType.USING)
 517                    and self._parse_wrapped_csv(self._parse_id_var),
 518                )
 519            if self._match_text_seq("PROJECTION", "POLICY"):
 520                return self.expression(
 521                    exp.ProjectionPolicyColumnConstraint, this=self._parse_id_var()
 522                )
 523            if self._match(TokenType.TAG):
 524                return self.expression(
 525                    exp.TagColumnConstraint,
 526                    expressions=self._parse_wrapped_csv(self._parse_property),
 527                )
 528
 529            return None
 530
 531        def _parse_create(self) -> exp.Create | exp.Command:
 532            expression = super()._parse_create()
 533            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 534                # Replace the Table node with the enclosed Identifier
 535                expression.this.replace(expression.this.this)
 536
 537            return expression
 538
 539        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 540        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 541        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 542            this = self._parse_var() or self._parse_type()
 543
 544            if not this:
 545                return None
 546
 547            self._match(TokenType.COMMA)
 548            expression = self._parse_bitwise()
 549            this = _map_date_part(this)
 550            name = this.name.upper()
 551
 552            if name.startswith("EPOCH"):
 553                if name == "EPOCH_MILLISECOND":
 554                    scale = 10**3
 555                elif name == "EPOCH_MICROSECOND":
 556                    scale = 10**6
 557                elif name == "EPOCH_NANOSECOND":
 558                    scale = 10**9
 559                else:
 560                    scale = None
 561
 562                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 563                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 564
 565                if scale:
 566                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 567
 568                return to_unix
 569
 570            return self.expression(exp.Extract, this=this, expression=expression)
 571
 572        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 573            if is_map:
 574                # Keys are strings in Snowflake's objects, see also:
 575                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 576                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 577                return self._parse_slice(self._parse_string())
 578
 579            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 580
 581        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 582            lateral = super()._parse_lateral()
 583            if not lateral:
 584                return lateral
 585
 586            if isinstance(lateral.this, exp.Explode):
 587                table_alias = lateral.args.get("alias")
 588                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 589                if table_alias and not table_alias.args.get("columns"):
 590                    table_alias.set("columns", columns)
 591                elif not table_alias:
 592                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 593
 594            return lateral
 595
 596        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 597            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 598            index = self._index
 599            if self._match_texts(("AT", "BEFORE")):
 600                this = self._prev.text.upper()
 601                kind = (
 602                    self._match(TokenType.L_PAREN)
 603                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 604                    and self._prev.text.upper()
 605                )
 606                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 607
 608                if expression:
 609                    self._match_r_paren()
 610                    when = self.expression(
 611                        exp.HistoricalData, this=this, kind=kind, expression=expression
 612                    )
 613                    table.set("when", when)
 614                else:
 615                    self._retreat(index)
 616
 617            return table
 618
 619        def _parse_table_parts(
 620            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 621        ) -> exp.Table:
 622            # https://docs.snowflake.com/en/user-guide/querying-stage
 623            if self._match(TokenType.STRING, advance=False):
 624                table = self._parse_string()
 625            elif self._match_text_seq("@", advance=False):
 626                table = self._parse_location_path()
 627            else:
 628                table = None
 629
 630            if table:
 631                file_format = None
 632                pattern = None
 633
 634                wrapped = self._match(TokenType.L_PAREN)
 635                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 636                    if self._match_text_seq("FILE_FORMAT", "=>"):
 637                        file_format = self._parse_string() or super()._parse_table_parts(
 638                            is_db_reference=is_db_reference
 639                        )
 640                    elif self._match_text_seq("PATTERN", "=>"):
 641                        pattern = self._parse_string()
 642                    else:
 643                        break
 644
 645                    self._match(TokenType.COMMA)
 646
 647                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 648            else:
 649                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 650
 651            return self._parse_at_before(table)
 652
 653        def _parse_id_var(
 654            self,
 655            any_token: bool = True,
 656            tokens: t.Optional[t.Collection[TokenType]] = None,
 657        ) -> t.Optional[exp.Expression]:
 658            if self._match_text_seq("IDENTIFIER", "("):
 659                identifier = (
 660                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 661                    or self._parse_string()
 662                )
 663                self._match_r_paren()
 664                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 665
 666            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 667
 668        def _parse_show_snowflake(self, this: str) -> exp.Show:
 669            scope = None
 670            scope_kind = None
 671
 672            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 673            # which is syntactically valid but has no effect on the output
 674            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 675
 676            history = self._match_text_seq("HISTORY")
 677
 678            like = self._parse_string() if self._match(TokenType.LIKE) else None
 679
 680            if self._match(TokenType.IN):
 681                if self._match_text_seq("ACCOUNT"):
 682                    scope_kind = "ACCOUNT"
 683                elif self._match_set(self.DB_CREATABLES):
 684                    scope_kind = self._prev.text.upper()
 685                    if self._curr:
 686                        scope = self._parse_table_parts()
 687                elif self._curr:
 688                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 689                    scope = self._parse_table_parts()
 690
 691            return self.expression(
 692                exp.Show,
 693                **{
 694                    "terse": terse,
 695                    "this": this,
 696                    "history": history,
 697                    "like": like,
 698                    "scope": scope,
 699                    "scope_kind": scope_kind,
 700                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 701                    "limit": self._parse_limit(),
 702                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 703                },
 704            )
 705
 706        def _parse_alter_table_swap(self) -> exp.SwapTable:
 707            self._match_text_seq("WITH")
 708            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 709
 710        def _parse_location_property(self) -> exp.LocationProperty:
 711            self._match(TokenType.EQ)
 712            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 713
 714        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 715            # Parse either a subquery or a staged file
 716            return (
 717                self._parse_select(table=True)
 718                if self._match(TokenType.L_PAREN, advance=False)
 719                else self._parse_table_parts()
 720            )
 721
 722        def _parse_location_path(self) -> exp.Var:
 723            parts = [self._advance_any(ignore_reserved=True)]
 724
 725            # We avoid consuming a comma token because external tables like @foo and @bar
 726            # can be joined in a query with a comma separator, as well as closing paren
 727            # in case of subqueries
 728            while self._is_connected() and not self._match_set(
 729                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 730            ):
 731                parts.append(self._advance_any(ignore_reserved=True))
 732
 733            return exp.var("".join(part.text for part in parts if part))
 734
 735        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 736            this = super()._parse_lambda_arg()
 737
 738            if not this:
 739                return this
 740
 741            typ = self._parse_types()
 742
 743            if typ:
 744                return self.expression(exp.Cast, this=this, to=typ)
 745
 746            return this
 747
 748    class Tokenizer(tokens.Tokenizer):
 749        STRING_ESCAPES = ["\\", "'"]
 750        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 751        RAW_STRINGS = ["$$"]
 752        COMMENTS = ["--", "//", ("/*", "*/")]
 753
 754        KEYWORDS = {
 755            **tokens.Tokenizer.KEYWORDS,
 756            "BYTEINT": TokenType.INT,
 757            "CHAR VARYING": TokenType.VARCHAR,
 758            "CHARACTER VARYING": TokenType.VARCHAR,
 759            "EXCLUDE": TokenType.EXCEPT,
 760            "ILIKE ANY": TokenType.ILIKE_ANY,
 761            "LIKE ANY": TokenType.LIKE_ANY,
 762            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 763            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 764            "MINUS": TokenType.EXCEPT,
 765            "NCHAR VARYING": TokenType.VARCHAR,
 766            "PUT": TokenType.COMMAND,
 767            "REMOVE": TokenType.COMMAND,
 768            "RM": TokenType.COMMAND,
 769            "SAMPLE": TokenType.TABLE_SAMPLE,
 770            "SQL_DOUBLE": TokenType.DOUBLE,
 771            "SQL_VARCHAR": TokenType.VARCHAR,
 772            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 773            "TAG": TokenType.TAG,
 774            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 775            "TOP": TokenType.TOP,
 776            "WAREHOUSE": TokenType.WAREHOUSE,
 777            "STREAMLIT": TokenType.STREAMLIT,
 778        }
 779
 780        SINGLE_TOKENS = {
 781            **tokens.Tokenizer.SINGLE_TOKENS,
 782            "$": TokenType.PARAMETER,
 783        }
 784
 785        VAR_SINGLE_TOKENS = {"$"}
 786
 787        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 788
 789    class Generator(generator.Generator):
 790        PARAMETER_TOKEN = "$"
 791        MATCHED_BY_SOURCE = False
 792        SINGLE_STRING_INTERVAL = True
 793        JOIN_HINTS = False
 794        TABLE_HINTS = False
 795        QUERY_HINTS = False
 796        AGGREGATE_FILTER_SUPPORTED = False
 797        SUPPORTS_TABLE_COPY = False
 798        COLLATE_IS_FUNC = True
 799        LIMIT_ONLY_LITERALS = True
 800        JSON_KEY_VALUE_PAIR_SEP = ","
 801        INSERT_OVERWRITE = " OVERWRITE INTO"
 802        STRUCT_DELIMITER = ("(", ")")
 803        COPY_PARAMS_ARE_WRAPPED = False
 804        COPY_PARAMS_EQ_REQUIRED = True
 805        STAR_EXCEPT = "EXCLUDE"
 806
 807        TRANSFORMS = {
 808            **generator.Generator.TRANSFORMS,
 809            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 810            exp.ArgMax: rename_func("MAX_BY"),
 811            exp.ArgMin: rename_func("MIN_BY"),
 812            exp.Array: inline_array_sql,
 813            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 814            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 815            exp.AtTimeZone: lambda self, e: self.func(
 816                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 817            ),
 818            exp.BitwiseXor: rename_func("BITXOR"),
 819            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 820            exp.DateAdd: date_delta_sql("DATEADD"),
 821            exp.DateDiff: date_delta_sql("DATEDIFF"),
 822            exp.DateStrToDate: datestrtodate_sql,
 823            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 824            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 825            exp.DayOfYear: rename_func("DAYOFYEAR"),
 826            exp.Explode: rename_func("FLATTEN"),
 827            exp.Extract: rename_func("DATE_PART"),
 828            exp.FromTimeZone: lambda self, e: self.func(
 829                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 830            ),
 831            exp.GenerateSeries: lambda self, e: self.func(
 832                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 833            ),
 834            exp.GroupConcat: rename_func("LISTAGG"),
 835            exp.If: if_sql(name="IFF", false_value="NULL"),
 836            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 837            exp.JSONExtractScalar: lambda self, e: self.func(
 838                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 839            ),
 840            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 841            exp.JSONPathRoot: lambda *_: "",
 842            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 843            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 844            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 845            exp.Max: max_or_greatest,
 846            exp.Min: min_or_least,
 847            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 848            exp.PercentileCont: transforms.preprocess(
 849                [transforms.add_within_group_for_percentiles]
 850            ),
 851            exp.PercentileDisc: transforms.preprocess(
 852                [transforms.add_within_group_for_percentiles]
 853            ),
 854            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 855            exp.RegexpILike: _regexpilike_sql,
 856            exp.Rand: rename_func("RANDOM"),
 857            exp.Select: transforms.preprocess(
 858                [
 859                    transforms.eliminate_distinct_on,
 860                    transforms.explode_to_unnest(),
 861                    transforms.eliminate_semi_and_anti_joins,
 862                ]
 863            ),
 864            exp.SHA: rename_func("SHA1"),
 865            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 866            exp.StartsWith: rename_func("STARTSWITH"),
 867            exp.StrPosition: lambda self, e: self.func(
 868                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 869            ),
 870            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 871            exp.Stuff: rename_func("INSERT"),
 872            exp.TimeAdd: date_delta_sql("TIMEADD"),
 873            exp.TimestampDiff: lambda self, e: self.func(
 874                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 875            ),
 876            exp.TimestampTrunc: timestamptrunc_sql(),
 877            exp.TimeStrToTime: timestrtotime_sql,
 878            exp.TimeToStr: lambda self, e: self.func(
 879                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 880            ),
 881            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 882            exp.ToArray: rename_func("TO_ARRAY"),
 883            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 884            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 885            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 886            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 887            exp.TsOrDsToDate: lambda self, e: self.func(
 888                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 889            ),
 890            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 891            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 892            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 893            exp.Xor: rename_func("BOOLXOR"),
 894        }
 895
 896        SUPPORTED_JSON_PATH_PARTS = {
 897            exp.JSONPathKey,
 898            exp.JSONPathRoot,
 899            exp.JSONPathSubscript,
 900        }
 901
 902        TYPE_MAPPING = {
 903            **generator.Generator.TYPE_MAPPING,
 904            exp.DataType.Type.NESTED: "OBJECT",
 905            exp.DataType.Type.STRUCT: "OBJECT",
 906        }
 907
 908        PROPERTIES_LOCATION = {
 909            **generator.Generator.PROPERTIES_LOCATION,
 910            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 911            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 912        }
 913
 914        UNSUPPORTED_VALUES_EXPRESSIONS = {
 915            exp.Map,
 916            exp.StarMap,
 917            exp.Struct,
 918            exp.VarMap,
 919        }
 920
 921        def with_properties(self, properties: exp.Properties) -> str:
 922            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 923
 924        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 925            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 926                values_as_table = False
 927
 928            return super().values_sql(expression, values_as_table=values_as_table)
 929
 930        def datatype_sql(self, expression: exp.DataType) -> str:
 931            expressions = expression.expressions
 932            if (
 933                expressions
 934                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 935                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 936            ):
 937                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 938                return "OBJECT"
 939
 940            return super().datatype_sql(expression)
 941
 942        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 943            return self.func(
 944                "TO_NUMBER",
 945                expression.this,
 946                expression.args.get("format"),
 947                expression.args.get("precision"),
 948                expression.args.get("scale"),
 949            )
 950
 951        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 952            milli = expression.args.get("milli")
 953            if milli is not None:
 954                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 955                expression.set("nano", milli_to_nano)
 956
 957            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 958
 959        def trycast_sql(self, expression: exp.TryCast) -> str:
 960            value = expression.this
 961
 962            if value.type is None:
 963                from sqlglot.optimizer.annotate_types import annotate_types
 964
 965                value = annotate_types(value)
 966
 967            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 968                return super().trycast_sql(expression)
 969
 970            # TRY_CAST only works for string values in Snowflake
 971            return self.cast_sql(expression)
 972
 973        def log_sql(self, expression: exp.Log) -> str:
 974            if not expression.expression:
 975                return self.func("LN", expression.this)
 976
 977            return super().log_sql(expression)
 978
 979        def unnest_sql(self, expression: exp.Unnest) -> str:
 980            unnest_alias = expression.args.get("alias")
 981            offset = expression.args.get("offset")
 982
 983            columns = [
 984                exp.to_identifier("seq"),
 985                exp.to_identifier("key"),
 986                exp.to_identifier("path"),
 987                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 988                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 989                or exp.to_identifier("value"),
 990                exp.to_identifier("this"),
 991            ]
 992
 993            if unnest_alias:
 994                unnest_alias.set("columns", columns)
 995            else:
 996                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 997
 998            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 999            alias = self.sql(unnest_alias)
1000            alias = f" AS {alias}" if alias else ""
1001            return f"{explode}{alias}"
1002
1003        def show_sql(self, expression: exp.Show) -> str:
1004            terse = "TERSE " if expression.args.get("terse") else ""
1005            history = " HISTORY" if expression.args.get("history") else ""
1006            like = self.sql(expression, "like")
1007            like = f" LIKE {like}" if like else ""
1008
1009            scope = self.sql(expression, "scope")
1010            scope = f" {scope}" if scope else ""
1011
1012            scope_kind = self.sql(expression, "scope_kind")
1013            if scope_kind:
1014                scope_kind = f" IN {scope_kind}"
1015
1016            starts_with = self.sql(expression, "starts_with")
1017            if starts_with:
1018                starts_with = f" STARTS WITH {starts_with}"
1019
1020            limit = self.sql(expression, "limit")
1021
1022            from_ = self.sql(expression, "from")
1023            if from_:
1024                from_ = f" FROM {from_}"
1025
1026            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1027
1028        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1029            # Other dialects don't support all of the following parameters, so we need to
1030            # generate default values as necessary to ensure the transpilation is correct
1031            group = expression.args.get("group")
1032            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1033            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1034            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1035
1036            return self.func(
1037                "REGEXP_SUBSTR",
1038                expression.this,
1039                expression.expression,
1040                position,
1041                occurrence,
1042                parameters,
1043                group,
1044            )
1045
1046        def except_op(self, expression: exp.Except) -> str:
1047            if not expression.args.get("distinct"):
1048                self.unsupported("EXCEPT with All is not supported in Snowflake")
1049            return super().except_op(expression)
1050
1051        def intersect_op(self, expression: exp.Intersect) -> str:
1052            if not expression.args.get("distinct"):
1053                self.unsupported("INTERSECT with All is not supported in Snowflake")
1054            return super().intersect_op(expression)
1055
1056        def describe_sql(self, expression: exp.Describe) -> str:
1057            # Default to table if kind is unknown
1058            kind_value = expression.args.get("kind") or "TABLE"
1059            kind = f" {kind_value}" if kind_value else ""
1060            this = f" {self.sql(expression, 'this')}"
1061            expressions = self.expressions(expression, flat=True)
1062            expressions = f" {expressions}" if expressions else ""
1063            return f"DESCRIBE{kind}{this}{expressions}"
1064
1065        def generatedasidentitycolumnconstraint_sql(
1066            self, expression: exp.GeneratedAsIdentityColumnConstraint
1067        ) -> str:
1068            start = expression.args.get("start")
1069            start = f" START {start}" if start else ""
1070            increment = expression.args.get("increment")
1071            increment = f" INCREMENT {increment}" if increment else ""
1072            return f"AUTOINCREMENT{start}{increment}"
1073
1074        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1075            this = self.sql(expression, "this")
1076            return f"SWAP WITH {this}"
1077
1078        def cluster_sql(self, expression: exp.Cluster) -> str:
1079            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1080
1081        def struct_sql(self, expression: exp.Struct) -> str:
1082            keys = []
1083            values = []
1084
1085            for i, e in enumerate(expression.expressions):
1086                if isinstance(e, exp.PropertyEQ):
1087                    keys.append(
1088                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1089                    )
1090                    values.append(e.expression)
1091                else:
1092                    keys.append(exp.Literal.string(f"_{i}"))
1093                    values.append(e)
1094
1095            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1096
1097        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1098            if expression.args.get("weight") or expression.args.get("accuracy"):
1099                self.unsupported(
1100                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1101                )
1102
1103            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1104
1105        def alterset_sql(self, expression: exp.AlterSet) -> str:
1106            exprs = self.expressions(expression, flat=True)
1107            exprs = f" {exprs}" if exprs else ""
1108            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1109            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1110            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1111            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1112            tag = self.expressions(expression, key="tag", flat=True)
1113            tag = f" TAG {tag}" if tag else ""
1114
1115            return f"SET{exprs}{file_format}{copy_options}{tag}"
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
 285class Snowflake(Dialect):
 286    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 287    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 288    NULL_ORDERING = "nulls_are_large"
 289    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 290    SUPPORTS_USER_DEFINED_TYPES = False
 291    SUPPORTS_SEMI_ANTI_JOIN = False
 292    PREFER_CTE_ALIAS_COLUMN = True
 293    TABLESAMPLE_SIZE_IS_PERCENT = True
 294    COPY_PARAMS_ARE_CSV = False
 295
 296    TIME_MAPPING = {
 297        "YYYY": "%Y",
 298        "yyyy": "%Y",
 299        "YY": "%y",
 300        "yy": "%y",
 301        "MMMM": "%B",
 302        "mmmm": "%B",
 303        "MON": "%b",
 304        "mon": "%b",
 305        "MM": "%m",
 306        "mm": "%m",
 307        "DD": "%d",
 308        "dd": "%-d",
 309        "DY": "%a",
 310        "dy": "%w",
 311        "HH24": "%H",
 312        "hh24": "%H",
 313        "HH12": "%I",
 314        "hh12": "%I",
 315        "MI": "%M",
 316        "mi": "%M",
 317        "SS": "%S",
 318        "ss": "%S",
 319        "FF": "%f",
 320        "ff": "%f",
 321        "FF6": "%f",
 322        "ff6": "%f",
 323    }
 324
 325    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 326        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 327        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 328        if (
 329            isinstance(expression, exp.Identifier)
 330            and isinstance(expression.parent, exp.Table)
 331            and expression.name.lower() == "dual"
 332        ):
 333            return expression  # type: ignore
 334
 335        return super().quote_identifier(expression, identify=identify)
 336
 337    class Parser(parser.Parser):
 338        IDENTIFY_PIVOT_STRINGS = True
 339        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 340        COLON_IS_JSON_EXTRACT = True
 341
 342        ID_VAR_TOKENS = {
 343            *parser.Parser.ID_VAR_TOKENS,
 344            TokenType.MATCH_CONDITION,
 345        }
 346
 347        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 348        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 349
 350        FUNCTIONS = {
 351            **parser.Parser.FUNCTIONS,
 352            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 353            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 354            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 355            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 356                this=seq_get(args, 1), expression=seq_get(args, 0)
 357            ),
 358            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 359                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 360                start=seq_get(args, 0),
 361                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 362                step=seq_get(args, 2),
 363            ),
 364            "BITXOR": binary_from_function(exp.BitwiseXor),
 365            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 366            "BOOLXOR": binary_from_function(exp.Xor),
 367            "CONVERT_TIMEZONE": _build_convert_timezone,
 368            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 369            "DATE_TRUNC": _date_trunc_to_time,
 370            "DATEADD": _build_date_time_add(exp.DateAdd),
 371            "DATEDIFF": _build_datediff,
 372            "DIV0": _build_if_from_div0,
 373            "FLATTEN": exp.Explode.from_arg_list,
 374            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 375                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 376            ),
 377            "IFF": exp.If.from_arg_list,
 378            "LAST_DAY": lambda args: exp.LastDay(
 379                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 380            ),
 381            "LISTAGG": exp.GroupConcat.from_arg_list,
 382            "MEDIAN": lambda args: exp.PercentileCont(
 383                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 384            ),
 385            "NULLIFZERO": _build_if_from_nullifzero,
 386            "OBJECT_CONSTRUCT": _build_object_construct,
 387            "REGEXP_REPLACE": _build_regexp_replace,
 388            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 389            "RLIKE": exp.RegexpLike.from_arg_list,
 390            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 391            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 392            "TIMEDIFF": _build_datediff,
 393            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 394            "TIMESTAMPDIFF": _build_datediff,
 395            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 396            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 397            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 398            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 399            "TO_NUMBER": lambda args: exp.ToNumber(
 400                this=seq_get(args, 0),
 401                format=seq_get(args, 1),
 402                precision=seq_get(args, 2),
 403                scale=seq_get(args, 3),
 404            ),
 405            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 406            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 407            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 408            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 409            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 410            "TO_VARCHAR": exp.ToChar.from_arg_list,
 411            "ZEROIFNULL": _build_if_from_zeroifnull,
 412        }
 413
 414        FUNCTION_PARSERS = {
 415            **parser.Parser.FUNCTION_PARSERS,
 416            "DATE_PART": lambda self: self._parse_date_part(),
 417            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 418        }
 419        FUNCTION_PARSERS.pop("TRIM")
 420
 421        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 422
 423        RANGE_PARSERS = {
 424            **parser.Parser.RANGE_PARSERS,
 425            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 426            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 427        }
 428
 429        ALTER_PARSERS = {
 430            **parser.Parser.ALTER_PARSERS,
 431            "UNSET": lambda self: self.expression(
 432                exp.Set,
 433                tag=self._match_text_seq("TAG"),
 434                expressions=self._parse_csv(self._parse_id_var),
 435                unset=True,
 436            ),
 437            "SWAP": lambda self: self._parse_alter_table_swap(),
 438        }
 439
 440        STATEMENT_PARSERS = {
 441            **parser.Parser.STATEMENT_PARSERS,
 442            TokenType.SHOW: lambda self: self._parse_show(),
 443        }
 444
 445        PROPERTY_PARSERS = {
 446            **parser.Parser.PROPERTY_PARSERS,
 447            "LOCATION": lambda self: self._parse_location_property(),
 448        }
 449
 450        TYPE_CONVERTER = {
 451            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 452            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 453        }
 454
 455        SHOW_PARSERS = {
 456            "SCHEMAS": _show_parser("SCHEMAS"),
 457            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 458            "OBJECTS": _show_parser("OBJECTS"),
 459            "TERSE OBJECTS": _show_parser("OBJECTS"),
 460            "TABLES": _show_parser("TABLES"),
 461            "TERSE TABLES": _show_parser("TABLES"),
 462            "VIEWS": _show_parser("VIEWS"),
 463            "TERSE VIEWS": _show_parser("VIEWS"),
 464            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 465            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 466            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 467            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 468            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 469            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 470            "SEQUENCES": _show_parser("SEQUENCES"),
 471            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 472            "COLUMNS": _show_parser("COLUMNS"),
 473            "USERS": _show_parser("USERS"),
 474            "TERSE USERS": _show_parser("USERS"),
 475        }
 476
 477        CONSTRAINT_PARSERS = {
 478            **parser.Parser.CONSTRAINT_PARSERS,
 479            "WITH": lambda self: self._parse_with_constraint(),
 480            "MASKING": lambda self: self._parse_with_constraint(),
 481            "PROJECTION": lambda self: self._parse_with_constraint(),
 482            "TAG": lambda self: self._parse_with_constraint(),
 483        }
 484
 485        STAGED_FILE_SINGLE_TOKENS = {
 486            TokenType.DOT,
 487            TokenType.MOD,
 488            TokenType.SLASH,
 489        }
 490
 491        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 492
 493        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 494
 495        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 496
 497        LAMBDAS = {
 498            **parser.Parser.LAMBDAS,
 499            TokenType.ARROW: lambda self, expressions: self.expression(
 500                exp.Lambda,
 501                this=self._replace_lambda(
 502                    self._parse_conjunction(),
 503                    expressions,
 504                ),
 505                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 506            ),
 507        }
 508
 509        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 510            if self._prev.token_type != TokenType.WITH:
 511                self._retreat(self._index - 1)
 512
 513            if self._match_text_seq("MASKING", "POLICY"):
 514                return self.expression(
 515                    exp.MaskingPolicyColumnConstraint,
 516                    this=self._parse_id_var(),
 517                    expressions=self._match(TokenType.USING)
 518                    and self._parse_wrapped_csv(self._parse_id_var),
 519                )
 520            if self._match_text_seq("PROJECTION", "POLICY"):
 521                return self.expression(
 522                    exp.ProjectionPolicyColumnConstraint, this=self._parse_id_var()
 523                )
 524            if self._match(TokenType.TAG):
 525                return self.expression(
 526                    exp.TagColumnConstraint,
 527                    expressions=self._parse_wrapped_csv(self._parse_property),
 528                )
 529
 530            return None
 531
 532        def _parse_create(self) -> exp.Create | exp.Command:
 533            expression = super()._parse_create()
 534            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 535                # Replace the Table node with the enclosed Identifier
 536                expression.this.replace(expression.this.this)
 537
 538            return expression
 539
 540        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 541        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 542        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 543            this = self._parse_var() or self._parse_type()
 544
 545            if not this:
 546                return None
 547
 548            self._match(TokenType.COMMA)
 549            expression = self._parse_bitwise()
 550            this = _map_date_part(this)
 551            name = this.name.upper()
 552
 553            if name.startswith("EPOCH"):
 554                if name == "EPOCH_MILLISECOND":
 555                    scale = 10**3
 556                elif name == "EPOCH_MICROSECOND":
 557                    scale = 10**6
 558                elif name == "EPOCH_NANOSECOND":
 559                    scale = 10**9
 560                else:
 561                    scale = None
 562
 563                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 564                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 565
 566                if scale:
 567                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 568
 569                return to_unix
 570
 571            return self.expression(exp.Extract, this=this, expression=expression)
 572
 573        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 574            if is_map:
 575                # Keys are strings in Snowflake's objects, see also:
 576                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 577                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 578                return self._parse_slice(self._parse_string())
 579
 580            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 581
 582        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 583            lateral = super()._parse_lateral()
 584            if not lateral:
 585                return lateral
 586
 587            if isinstance(lateral.this, exp.Explode):
 588                table_alias = lateral.args.get("alias")
 589                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 590                if table_alias and not table_alias.args.get("columns"):
 591                    table_alias.set("columns", columns)
 592                elif not table_alias:
 593                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 594
 595            return lateral
 596
 597        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 598            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 599            index = self._index
 600            if self._match_texts(("AT", "BEFORE")):
 601                this = self._prev.text.upper()
 602                kind = (
 603                    self._match(TokenType.L_PAREN)
 604                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 605                    and self._prev.text.upper()
 606                )
 607                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 608
 609                if expression:
 610                    self._match_r_paren()
 611                    when = self.expression(
 612                        exp.HistoricalData, this=this, kind=kind, expression=expression
 613                    )
 614                    table.set("when", when)
 615                else:
 616                    self._retreat(index)
 617
 618            return table
 619
 620        def _parse_table_parts(
 621            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 622        ) -> exp.Table:
 623            # https://docs.snowflake.com/en/user-guide/querying-stage
 624            if self._match(TokenType.STRING, advance=False):
 625                table = self._parse_string()
 626            elif self._match_text_seq("@", advance=False):
 627                table = self._parse_location_path()
 628            else:
 629                table = None
 630
 631            if table:
 632                file_format = None
 633                pattern = None
 634
 635                wrapped = self._match(TokenType.L_PAREN)
 636                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 637                    if self._match_text_seq("FILE_FORMAT", "=>"):
 638                        file_format = self._parse_string() or super()._parse_table_parts(
 639                            is_db_reference=is_db_reference
 640                        )
 641                    elif self._match_text_seq("PATTERN", "=>"):
 642                        pattern = self._parse_string()
 643                    else:
 644                        break
 645
 646                    self._match(TokenType.COMMA)
 647
 648                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 649            else:
 650                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 651
 652            return self._parse_at_before(table)
 653
 654        def _parse_id_var(
 655            self,
 656            any_token: bool = True,
 657            tokens: t.Optional[t.Collection[TokenType]] = None,
 658        ) -> t.Optional[exp.Expression]:
 659            if self._match_text_seq("IDENTIFIER", "("):
 660                identifier = (
 661                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 662                    or self._parse_string()
 663                )
 664                self._match_r_paren()
 665                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 666
 667            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 668
 669        def _parse_show_snowflake(self, this: str) -> exp.Show:
 670            scope = None
 671            scope_kind = None
 672
 673            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 674            # which is syntactically valid but has no effect on the output
 675            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 676
 677            history = self._match_text_seq("HISTORY")
 678
 679            like = self._parse_string() if self._match(TokenType.LIKE) else None
 680
 681            if self._match(TokenType.IN):
 682                if self._match_text_seq("ACCOUNT"):
 683                    scope_kind = "ACCOUNT"
 684                elif self._match_set(self.DB_CREATABLES):
 685                    scope_kind = self._prev.text.upper()
 686                    if self._curr:
 687                        scope = self._parse_table_parts()
 688                elif self._curr:
 689                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 690                    scope = self._parse_table_parts()
 691
 692            return self.expression(
 693                exp.Show,
 694                **{
 695                    "terse": terse,
 696                    "this": this,
 697                    "history": history,
 698                    "like": like,
 699                    "scope": scope,
 700                    "scope_kind": scope_kind,
 701                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 702                    "limit": self._parse_limit(),
 703                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 704                },
 705            )
 706
 707        def _parse_alter_table_swap(self) -> exp.SwapTable:
 708            self._match_text_seq("WITH")
 709            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 710
 711        def _parse_location_property(self) -> exp.LocationProperty:
 712            self._match(TokenType.EQ)
 713            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 714
 715        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 716            # Parse either a subquery or a staged file
 717            return (
 718                self._parse_select(table=True)
 719                if self._match(TokenType.L_PAREN, advance=False)
 720                else self._parse_table_parts()
 721            )
 722
 723        def _parse_location_path(self) -> exp.Var:
 724            parts = [self._advance_any(ignore_reserved=True)]
 725
 726            # We avoid consuming a comma token because external tables like @foo and @bar
 727            # can be joined in a query with a comma separator, as well as closing paren
 728            # in case of subqueries
 729            while self._is_connected() and not self._match_set(
 730                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 731            ):
 732                parts.append(self._advance_any(ignore_reserved=True))
 733
 734            return exp.var("".join(part.text for part in parts if part))
 735
 736        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 737            this = super()._parse_lambda_arg()
 738
 739            if not this:
 740                return this
 741
 742            typ = self._parse_types()
 743
 744            if typ:
 745                return self.expression(exp.Cast, this=this, to=typ)
 746
 747            return this
 748
 749    class Tokenizer(tokens.Tokenizer):
 750        STRING_ESCAPES = ["\\", "'"]
 751        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 752        RAW_STRINGS = ["$$"]
 753        COMMENTS = ["--", "//", ("/*", "*/")]
 754
 755        KEYWORDS = {
 756            **tokens.Tokenizer.KEYWORDS,
 757            "BYTEINT": TokenType.INT,
 758            "CHAR VARYING": TokenType.VARCHAR,
 759            "CHARACTER VARYING": TokenType.VARCHAR,
 760            "EXCLUDE": TokenType.EXCEPT,
 761            "ILIKE ANY": TokenType.ILIKE_ANY,
 762            "LIKE ANY": TokenType.LIKE_ANY,
 763            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 764            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 765            "MINUS": TokenType.EXCEPT,
 766            "NCHAR VARYING": TokenType.VARCHAR,
 767            "PUT": TokenType.COMMAND,
 768            "REMOVE": TokenType.COMMAND,
 769            "RM": TokenType.COMMAND,
 770            "SAMPLE": TokenType.TABLE_SAMPLE,
 771            "SQL_DOUBLE": TokenType.DOUBLE,
 772            "SQL_VARCHAR": TokenType.VARCHAR,
 773            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 774            "TAG": TokenType.TAG,
 775            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 776            "TOP": TokenType.TOP,
 777            "WAREHOUSE": TokenType.WAREHOUSE,
 778            "STREAMLIT": TokenType.STREAMLIT,
 779        }
 780
 781        SINGLE_TOKENS = {
 782            **tokens.Tokenizer.SINGLE_TOKENS,
 783            "$": TokenType.PARAMETER,
 784        }
 785
 786        VAR_SINGLE_TOKENS = {"$"}
 787
 788        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 789
 790    class Generator(generator.Generator):
 791        PARAMETER_TOKEN = "$"
 792        MATCHED_BY_SOURCE = False
 793        SINGLE_STRING_INTERVAL = True
 794        JOIN_HINTS = False
 795        TABLE_HINTS = False
 796        QUERY_HINTS = False
 797        AGGREGATE_FILTER_SUPPORTED = False
 798        SUPPORTS_TABLE_COPY = False
 799        COLLATE_IS_FUNC = True
 800        LIMIT_ONLY_LITERALS = True
 801        JSON_KEY_VALUE_PAIR_SEP = ","
 802        INSERT_OVERWRITE = " OVERWRITE INTO"
 803        STRUCT_DELIMITER = ("(", ")")
 804        COPY_PARAMS_ARE_WRAPPED = False
 805        COPY_PARAMS_EQ_REQUIRED = True
 806        STAR_EXCEPT = "EXCLUDE"
 807
 808        TRANSFORMS = {
 809            **generator.Generator.TRANSFORMS,
 810            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 811            exp.ArgMax: rename_func("MAX_BY"),
 812            exp.ArgMin: rename_func("MIN_BY"),
 813            exp.Array: inline_array_sql,
 814            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 815            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 816            exp.AtTimeZone: lambda self, e: self.func(
 817                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 818            ),
 819            exp.BitwiseXor: rename_func("BITXOR"),
 820            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 821            exp.DateAdd: date_delta_sql("DATEADD"),
 822            exp.DateDiff: date_delta_sql("DATEDIFF"),
 823            exp.DateStrToDate: datestrtodate_sql,
 824            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 825            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 826            exp.DayOfYear: rename_func("DAYOFYEAR"),
 827            exp.Explode: rename_func("FLATTEN"),
 828            exp.Extract: rename_func("DATE_PART"),
 829            exp.FromTimeZone: lambda self, e: self.func(
 830                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 831            ),
 832            exp.GenerateSeries: lambda self, e: self.func(
 833                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 834            ),
 835            exp.GroupConcat: rename_func("LISTAGG"),
 836            exp.If: if_sql(name="IFF", false_value="NULL"),
 837            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 838            exp.JSONExtractScalar: lambda self, e: self.func(
 839                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 840            ),
 841            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 842            exp.JSONPathRoot: lambda *_: "",
 843            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 844            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 845            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 846            exp.Max: max_or_greatest,
 847            exp.Min: min_or_least,
 848            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 849            exp.PercentileCont: transforms.preprocess(
 850                [transforms.add_within_group_for_percentiles]
 851            ),
 852            exp.PercentileDisc: transforms.preprocess(
 853                [transforms.add_within_group_for_percentiles]
 854            ),
 855            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 856            exp.RegexpILike: _regexpilike_sql,
 857            exp.Rand: rename_func("RANDOM"),
 858            exp.Select: transforms.preprocess(
 859                [
 860                    transforms.eliminate_distinct_on,
 861                    transforms.explode_to_unnest(),
 862                    transforms.eliminate_semi_and_anti_joins,
 863                ]
 864            ),
 865            exp.SHA: rename_func("SHA1"),
 866            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 867            exp.StartsWith: rename_func("STARTSWITH"),
 868            exp.StrPosition: lambda self, e: self.func(
 869                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 870            ),
 871            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 872            exp.Stuff: rename_func("INSERT"),
 873            exp.TimeAdd: date_delta_sql("TIMEADD"),
 874            exp.TimestampDiff: lambda self, e: self.func(
 875                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 876            ),
 877            exp.TimestampTrunc: timestamptrunc_sql(),
 878            exp.TimeStrToTime: timestrtotime_sql,
 879            exp.TimeToStr: lambda self, e: self.func(
 880                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 881            ),
 882            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 883            exp.ToArray: rename_func("TO_ARRAY"),
 884            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 885            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 886            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 887            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 888            exp.TsOrDsToDate: lambda self, e: self.func(
 889                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 890            ),
 891            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 892            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 893            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 894            exp.Xor: rename_func("BOOLXOR"),
 895        }
 896
 897        SUPPORTED_JSON_PATH_PARTS = {
 898            exp.JSONPathKey,
 899            exp.JSONPathRoot,
 900            exp.JSONPathSubscript,
 901        }
 902
 903        TYPE_MAPPING = {
 904            **generator.Generator.TYPE_MAPPING,
 905            exp.DataType.Type.NESTED: "OBJECT",
 906            exp.DataType.Type.STRUCT: "OBJECT",
 907        }
 908
 909        PROPERTIES_LOCATION = {
 910            **generator.Generator.PROPERTIES_LOCATION,
 911            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 912            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 913        }
 914
 915        UNSUPPORTED_VALUES_EXPRESSIONS = {
 916            exp.Map,
 917            exp.StarMap,
 918            exp.Struct,
 919            exp.VarMap,
 920        }
 921
 922        def with_properties(self, properties: exp.Properties) -> str:
 923            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 924
 925        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 926            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 927                values_as_table = False
 928
 929            return super().values_sql(expression, values_as_table=values_as_table)
 930
 931        def datatype_sql(self, expression: exp.DataType) -> str:
 932            expressions = expression.expressions
 933            if (
 934                expressions
 935                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 936                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 937            ):
 938                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 939                return "OBJECT"
 940
 941            return super().datatype_sql(expression)
 942
 943        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 944            return self.func(
 945                "TO_NUMBER",
 946                expression.this,
 947                expression.args.get("format"),
 948                expression.args.get("precision"),
 949                expression.args.get("scale"),
 950            )
 951
 952        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 953            milli = expression.args.get("milli")
 954            if milli is not None:
 955                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 956                expression.set("nano", milli_to_nano)
 957
 958            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 959
 960        def trycast_sql(self, expression: exp.TryCast) -> str:
 961            value = expression.this
 962
 963            if value.type is None:
 964                from sqlglot.optimizer.annotate_types import annotate_types
 965
 966                value = annotate_types(value)
 967
 968            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 969                return super().trycast_sql(expression)
 970
 971            # TRY_CAST only works for string values in Snowflake
 972            return self.cast_sql(expression)
 973
 974        def log_sql(self, expression: exp.Log) -> str:
 975            if not expression.expression:
 976                return self.func("LN", expression.this)
 977
 978            return super().log_sql(expression)
 979
 980        def unnest_sql(self, expression: exp.Unnest) -> str:
 981            unnest_alias = expression.args.get("alias")
 982            offset = expression.args.get("offset")
 983
 984            columns = [
 985                exp.to_identifier("seq"),
 986                exp.to_identifier("key"),
 987                exp.to_identifier("path"),
 988                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 989                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 990                or exp.to_identifier("value"),
 991                exp.to_identifier("this"),
 992            ]
 993
 994            if unnest_alias:
 995                unnest_alias.set("columns", columns)
 996            else:
 997                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 998
 999            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1000            alias = self.sql(unnest_alias)
1001            alias = f" AS {alias}" if alias else ""
1002            return f"{explode}{alias}"
1003
1004        def show_sql(self, expression: exp.Show) -> str:
1005            terse = "TERSE " if expression.args.get("terse") else ""
1006            history = " HISTORY" if expression.args.get("history") else ""
1007            like = self.sql(expression, "like")
1008            like = f" LIKE {like}" if like else ""
1009
1010            scope = self.sql(expression, "scope")
1011            scope = f" {scope}" if scope else ""
1012
1013            scope_kind = self.sql(expression, "scope_kind")
1014            if scope_kind:
1015                scope_kind = f" IN {scope_kind}"
1016
1017            starts_with = self.sql(expression, "starts_with")
1018            if starts_with:
1019                starts_with = f" STARTS WITH {starts_with}"
1020
1021            limit = self.sql(expression, "limit")
1022
1023            from_ = self.sql(expression, "from")
1024            if from_:
1025                from_ = f" FROM {from_}"
1026
1027            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1028
1029        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1030            # Other dialects don't support all of the following parameters, so we need to
1031            # generate default values as necessary to ensure the transpilation is correct
1032            group = expression.args.get("group")
1033            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1034            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1035            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1036
1037            return self.func(
1038                "REGEXP_SUBSTR",
1039                expression.this,
1040                expression.expression,
1041                position,
1042                occurrence,
1043                parameters,
1044                group,
1045            )
1046
1047        def except_op(self, expression: exp.Except) -> str:
1048            if not expression.args.get("distinct"):
1049                self.unsupported("EXCEPT with All is not supported in Snowflake")
1050            return super().except_op(expression)
1051
1052        def intersect_op(self, expression: exp.Intersect) -> str:
1053            if not expression.args.get("distinct"):
1054                self.unsupported("INTERSECT with All is not supported in Snowflake")
1055            return super().intersect_op(expression)
1056
1057        def describe_sql(self, expression: exp.Describe) -> str:
1058            # Default to table if kind is unknown
1059            kind_value = expression.args.get("kind") or "TABLE"
1060            kind = f" {kind_value}" if kind_value else ""
1061            this = f" {self.sql(expression, 'this')}"
1062            expressions = self.expressions(expression, flat=True)
1063            expressions = f" {expressions}" if expressions else ""
1064            return f"DESCRIBE{kind}{this}{expressions}"
1065
1066        def generatedasidentitycolumnconstraint_sql(
1067            self, expression: exp.GeneratedAsIdentityColumnConstraint
1068        ) -> str:
1069            start = expression.args.get("start")
1070            start = f" START {start}" if start else ""
1071            increment = expression.args.get("increment")
1072            increment = f" INCREMENT {increment}" if increment else ""
1073            return f"AUTOINCREMENT{start}{increment}"
1074
1075        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1076            this = self.sql(expression, "this")
1077            return f"SWAP WITH {this}"
1078
1079        def cluster_sql(self, expression: exp.Cluster) -> str:
1080            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1081
1082        def struct_sql(self, expression: exp.Struct) -> str:
1083            keys = []
1084            values = []
1085
1086            for i, e in enumerate(expression.expressions):
1087                if isinstance(e, exp.PropertyEQ):
1088                    keys.append(
1089                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1090                    )
1091                    values.append(e.expression)
1092                else:
1093                    keys.append(exp.Literal.string(f"_{i}"))
1094                    values.append(e)
1095
1096            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1097
1098        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1099            if expression.args.get("weight") or expression.args.get("accuracy"):
1100                self.unsupported(
1101                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1102                )
1103
1104            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1105
1106        def alterset_sql(self, expression: exp.AlterSet) -> str:
1107            exprs = self.expressions(expression, flat=True)
1108            exprs = f" {exprs}" if exprs else ""
1109            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1110            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1111            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1112            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1113            tag = self.expressions(expression, key="tag", flat=True)
1114            tag = f" TAG {tag}" if tag else ""
1115
1116            return f"SET{exprs}{file_format}{copy_options}{tag}"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False
TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
325    def quote_identifier(self, expression: E, identify: bool = True) -> E:
326        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
327        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
328        if (
329            isinstance(expression, exp.Identifier)
330            and isinstance(expression.parent, exp.Table)
331            and expression.name.lower() == "dual"
332        ):
333            return expression  # type: ignore
334
335        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
337    class Parser(parser.Parser):
338        IDENTIFY_PIVOT_STRINGS = True
339        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
340        COLON_IS_JSON_EXTRACT = True
341
342        ID_VAR_TOKENS = {
343            *parser.Parser.ID_VAR_TOKENS,
344            TokenType.MATCH_CONDITION,
345        }
346
347        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
348        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
349
350        FUNCTIONS = {
351            **parser.Parser.FUNCTIONS,
352            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
353            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
354            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
355            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
356                this=seq_get(args, 1), expression=seq_get(args, 0)
357            ),
358            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
359                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
360                start=seq_get(args, 0),
361                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
362                step=seq_get(args, 2),
363            ),
364            "BITXOR": binary_from_function(exp.BitwiseXor),
365            "BIT_XOR": binary_from_function(exp.BitwiseXor),
366            "BOOLXOR": binary_from_function(exp.Xor),
367            "CONVERT_TIMEZONE": _build_convert_timezone,
368            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
369            "DATE_TRUNC": _date_trunc_to_time,
370            "DATEADD": _build_date_time_add(exp.DateAdd),
371            "DATEDIFF": _build_datediff,
372            "DIV0": _build_if_from_div0,
373            "FLATTEN": exp.Explode.from_arg_list,
374            "GET_PATH": lambda args, dialect: exp.JSONExtract(
375                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
376            ),
377            "IFF": exp.If.from_arg_list,
378            "LAST_DAY": lambda args: exp.LastDay(
379                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
380            ),
381            "LISTAGG": exp.GroupConcat.from_arg_list,
382            "MEDIAN": lambda args: exp.PercentileCont(
383                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
384            ),
385            "NULLIFZERO": _build_if_from_nullifzero,
386            "OBJECT_CONSTRUCT": _build_object_construct,
387            "REGEXP_REPLACE": _build_regexp_replace,
388            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
389            "RLIKE": exp.RegexpLike.from_arg_list,
390            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
391            "TIMEADD": _build_date_time_add(exp.TimeAdd),
392            "TIMEDIFF": _build_datediff,
393            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
394            "TIMESTAMPDIFF": _build_datediff,
395            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
396            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
397            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
398            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
399            "TO_NUMBER": lambda args: exp.ToNumber(
400                this=seq_get(args, 0),
401                format=seq_get(args, 1),
402                precision=seq_get(args, 2),
403                scale=seq_get(args, 3),
404            ),
405            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
406            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
407            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
408            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
409            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
410            "TO_VARCHAR": exp.ToChar.from_arg_list,
411            "ZEROIFNULL": _build_if_from_zeroifnull,
412        }
413
414        FUNCTION_PARSERS = {
415            **parser.Parser.FUNCTION_PARSERS,
416            "DATE_PART": lambda self: self._parse_date_part(),
417            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
418        }
419        FUNCTION_PARSERS.pop("TRIM")
420
421        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
422
423        RANGE_PARSERS = {
424            **parser.Parser.RANGE_PARSERS,
425            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
426            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
427        }
428
429        ALTER_PARSERS = {
430            **parser.Parser.ALTER_PARSERS,
431            "UNSET": lambda self: self.expression(
432                exp.Set,
433                tag=self._match_text_seq("TAG"),
434                expressions=self._parse_csv(self._parse_id_var),
435                unset=True,
436            ),
437            "SWAP": lambda self: self._parse_alter_table_swap(),
438        }
439
440        STATEMENT_PARSERS = {
441            **parser.Parser.STATEMENT_PARSERS,
442            TokenType.SHOW: lambda self: self._parse_show(),
443        }
444
445        PROPERTY_PARSERS = {
446            **parser.Parser.PROPERTY_PARSERS,
447            "LOCATION": lambda self: self._parse_location_property(),
448        }
449
450        TYPE_CONVERTER = {
451            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
452            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
453        }
454
455        SHOW_PARSERS = {
456            "SCHEMAS": _show_parser("SCHEMAS"),
457            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
458            "OBJECTS": _show_parser("OBJECTS"),
459            "TERSE OBJECTS": _show_parser("OBJECTS"),
460            "TABLES": _show_parser("TABLES"),
461            "TERSE TABLES": _show_parser("TABLES"),
462            "VIEWS": _show_parser("VIEWS"),
463            "TERSE VIEWS": _show_parser("VIEWS"),
464            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
465            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
466            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
467            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
468            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
469            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
470            "SEQUENCES": _show_parser("SEQUENCES"),
471            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
472            "COLUMNS": _show_parser("COLUMNS"),
473            "USERS": _show_parser("USERS"),
474            "TERSE USERS": _show_parser("USERS"),
475        }
476
477        CONSTRAINT_PARSERS = {
478            **parser.Parser.CONSTRAINT_PARSERS,
479            "WITH": lambda self: self._parse_with_constraint(),
480            "MASKING": lambda self: self._parse_with_constraint(),
481            "PROJECTION": lambda self: self._parse_with_constraint(),
482            "TAG": lambda self: self._parse_with_constraint(),
483        }
484
485        STAGED_FILE_SINGLE_TOKENS = {
486            TokenType.DOT,
487            TokenType.MOD,
488            TokenType.SLASH,
489        }
490
491        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
492
493        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
494
495        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
496
497        LAMBDAS = {
498            **parser.Parser.LAMBDAS,
499            TokenType.ARROW: lambda self, expressions: self.expression(
500                exp.Lambda,
501                this=self._replace_lambda(
502                    self._parse_conjunction(),
503                    expressions,
504                ),
505                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
506            ),
507        }
508
509        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
510            if self._prev.token_type != TokenType.WITH:
511                self._retreat(self._index - 1)
512
513            if self._match_text_seq("MASKING", "POLICY"):
514                return self.expression(
515                    exp.MaskingPolicyColumnConstraint,
516                    this=self._parse_id_var(),
517                    expressions=self._match(TokenType.USING)
518                    and self._parse_wrapped_csv(self._parse_id_var),
519                )
520            if self._match_text_seq("PROJECTION", "POLICY"):
521                return self.expression(
522                    exp.ProjectionPolicyColumnConstraint, this=self._parse_id_var()
523                )
524            if self._match(TokenType.TAG):
525                return self.expression(
526                    exp.TagColumnConstraint,
527                    expressions=self._parse_wrapped_csv(self._parse_property),
528                )
529
530            return None
531
532        def _parse_create(self) -> exp.Create | exp.Command:
533            expression = super()._parse_create()
534            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
535                # Replace the Table node with the enclosed Identifier
536                expression.this.replace(expression.this.this)
537
538            return expression
539
540        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
541        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
542        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
543            this = self._parse_var() or self._parse_type()
544
545            if not this:
546                return None
547
548            self._match(TokenType.COMMA)
549            expression = self._parse_bitwise()
550            this = _map_date_part(this)
551            name = this.name.upper()
552
553            if name.startswith("EPOCH"):
554                if name == "EPOCH_MILLISECOND":
555                    scale = 10**3
556                elif name == "EPOCH_MICROSECOND":
557                    scale = 10**6
558                elif name == "EPOCH_NANOSECOND":
559                    scale = 10**9
560                else:
561                    scale = None
562
563                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
564                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
565
566                if scale:
567                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
568
569                return to_unix
570
571            return self.expression(exp.Extract, this=this, expression=expression)
572
573        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
574            if is_map:
575                # Keys are strings in Snowflake's objects, see also:
576                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
577                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
578                return self._parse_slice(self._parse_string())
579
580            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
581
582        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
583            lateral = super()._parse_lateral()
584            if not lateral:
585                return lateral
586
587            if isinstance(lateral.this, exp.Explode):
588                table_alias = lateral.args.get("alias")
589                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
590                if table_alias and not table_alias.args.get("columns"):
591                    table_alias.set("columns", columns)
592                elif not table_alias:
593                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
594
595            return lateral
596
597        def _parse_at_before(self, table: exp.Table) -> exp.Table:
598            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
599            index = self._index
600            if self._match_texts(("AT", "BEFORE")):
601                this = self._prev.text.upper()
602                kind = (
603                    self._match(TokenType.L_PAREN)
604                    and self._match_texts(self.HISTORICAL_DATA_KIND)
605                    and self._prev.text.upper()
606                )
607                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
608
609                if expression:
610                    self._match_r_paren()
611                    when = self.expression(
612                        exp.HistoricalData, this=this, kind=kind, expression=expression
613                    )
614                    table.set("when", when)
615                else:
616                    self._retreat(index)
617
618            return table
619
620        def _parse_table_parts(
621            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
622        ) -> exp.Table:
623            # https://docs.snowflake.com/en/user-guide/querying-stage
624            if self._match(TokenType.STRING, advance=False):
625                table = self._parse_string()
626            elif self._match_text_seq("@", advance=False):
627                table = self._parse_location_path()
628            else:
629                table = None
630
631            if table:
632                file_format = None
633                pattern = None
634
635                wrapped = self._match(TokenType.L_PAREN)
636                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
637                    if self._match_text_seq("FILE_FORMAT", "=>"):
638                        file_format = self._parse_string() or super()._parse_table_parts(
639                            is_db_reference=is_db_reference
640                        )
641                    elif self._match_text_seq("PATTERN", "=>"):
642                        pattern = self._parse_string()
643                    else:
644                        break
645
646                    self._match(TokenType.COMMA)
647
648                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
649            else:
650                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
651
652            return self._parse_at_before(table)
653
654        def _parse_id_var(
655            self,
656            any_token: bool = True,
657            tokens: t.Optional[t.Collection[TokenType]] = None,
658        ) -> t.Optional[exp.Expression]:
659            if self._match_text_seq("IDENTIFIER", "("):
660                identifier = (
661                    super()._parse_id_var(any_token=any_token, tokens=tokens)
662                    or self._parse_string()
663                )
664                self._match_r_paren()
665                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
666
667            return super()._parse_id_var(any_token=any_token, tokens=tokens)
668
669        def _parse_show_snowflake(self, this: str) -> exp.Show:
670            scope = None
671            scope_kind = None
672
673            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
674            # which is syntactically valid but has no effect on the output
675            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
676
677            history = self._match_text_seq("HISTORY")
678
679            like = self._parse_string() if self._match(TokenType.LIKE) else None
680
681            if self._match(TokenType.IN):
682                if self._match_text_seq("ACCOUNT"):
683                    scope_kind = "ACCOUNT"
684                elif self._match_set(self.DB_CREATABLES):
685                    scope_kind = self._prev.text.upper()
686                    if self._curr:
687                        scope = self._parse_table_parts()
688                elif self._curr:
689                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
690                    scope = self._parse_table_parts()
691
692            return self.expression(
693                exp.Show,
694                **{
695                    "terse": terse,
696                    "this": this,
697                    "history": history,
698                    "like": like,
699                    "scope": scope,
700                    "scope_kind": scope_kind,
701                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
702                    "limit": self._parse_limit(),
703                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
704                },
705            )
706
707        def _parse_alter_table_swap(self) -> exp.SwapTable:
708            self._match_text_seq("WITH")
709            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
710
711        def _parse_location_property(self) -> exp.LocationProperty:
712            self._match(TokenType.EQ)
713            return self.expression(exp.LocationProperty, this=self._parse_location_path())
714
715        def _parse_file_location(self) -> t.Optional[exp.Expression]:
716            # Parse either a subquery or a staged file
717            return (
718                self._parse_select(table=True)
719                if self._match(TokenType.L_PAREN, advance=False)
720                else self._parse_table_parts()
721            )
722
723        def _parse_location_path(self) -> exp.Var:
724            parts = [self._advance_any(ignore_reserved=True)]
725
726            # We avoid consuming a comma token because external tables like @foo and @bar
727            # can be joined in a query with a comma separator, as well as closing paren
728            # in case of subqueries
729            while self._is_connected() and not self._match_set(
730                (TokenType.COMMA, TokenType.R_PAREN), advance=False
731            ):
732                parts.append(self._advance_any(ignore_reserved=True))
733
734            return exp.var("".join(part.text for part in parts if part))
735
736        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
737            this = super()._parse_lambda_arg()
738
739            if not this:
740                return this
741
742            typ = self._parse_types()
743
744            if typ:
745                return self.expression(exp.Cast, this=this, to=typ)
746
747            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_JSON_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.DATETIME64: 'DATETIME64'>, <TokenType.CASE: 'CASE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.IS: 'IS'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DESC: 'DESC'>, <TokenType.UINT: 'UINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.IPV4: 'IPV4'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SHOW: 'SHOW'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.USE: 'USE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.XML: 'XML'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.MAP: 'MAP'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT256: 'INT256'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.COPY: 'COPY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TOP: 'TOP'>, <TokenType.LOAD: 'LOAD'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.BIT: 'BIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.YEAR: 'YEAR'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ROW: 'ROW'>, <TokenType.UINT256: 'UINT256'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.APPLY: 'APPLY'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.ASOF: 'ASOF'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.TAG: 'TAG'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SET: 'SET'>, <TokenType.ROWS: 'ROWS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.FINAL: 'FINAL'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.END: 'END'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NULL: 'NULL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.NAME: 'NAME'>, <TokenType.ASC: 'ASC'>, <TokenType.INT: 'INT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.FULL: 'FULL'>, <TokenType.KILL: 'KILL'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DATE: 'DATE'>, <TokenType.SOME: 'SOME'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TIME: 'TIME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.VAR: 'VAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.LEFT: 'LEFT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INET: 'INET'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INT4RANGE: 'INT4RANGE'>}
TABLE_ALIAS_TOKENS = {<TokenType.DATETIME64: 'DATETIME64'>, <TokenType.CASE: 'CASE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.IS: 'IS'>, <TokenType.SUPER: 'SUPER'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DESC: 'DESC'>, <TokenType.UINT: 'UINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.IPV4: 'IPV4'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SHOW: 'SHOW'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TABLE: 'TABLE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.USE: 'USE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.XML: 'XML'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.MAP: 'MAP'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT256: 'INT256'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.COPY: 'COPY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TOP: 'TOP'>, <TokenType.LOAD: 'LOAD'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.BIT: 'BIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.YEAR: 'YEAR'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ROW: 'ROW'>, <TokenType.UINT256: 'UINT256'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TAG: 'TAG'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SET: 'SET'>, <TokenType.ROWS: 'ROWS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.FINAL: 'FINAL'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.END: 'END'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NULL: 'NULL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UINT128: 'UINT128'>, <TokenType.NAME: 'NAME'>, <TokenType.ASC: 'ASC'>, <TokenType.INT: 'INT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.KILL: 'KILL'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DATE: 'DATE'>, <TokenType.SOME: 'SOME'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TIME: 'TIME'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.VAR: 'VAR'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.ANTI: 'ANTI'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.INET: 'INET'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.INT4RANGE: 'INT4RANGE'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTER = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'UNIQUE KEYS', 'IMPORTED KEYS', 'SEQUENCES', 'VIEWS', 'TABLES'}
NON_TABLE_CREATABLES = {'TAG', 'WAREHOUSE', 'STREAMLIT', 'STORAGE INTEGRATION'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
INTERVAL_VARS
ALIAS_TOKENS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
749    class Tokenizer(tokens.Tokenizer):
750        STRING_ESCAPES = ["\\", "'"]
751        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
752        RAW_STRINGS = ["$$"]
753        COMMENTS = ["--", "//", ("/*", "*/")]
754
755        KEYWORDS = {
756            **tokens.Tokenizer.KEYWORDS,
757            "BYTEINT": TokenType.INT,
758            "CHAR VARYING": TokenType.VARCHAR,
759            "CHARACTER VARYING": TokenType.VARCHAR,
760            "EXCLUDE": TokenType.EXCEPT,
761            "ILIKE ANY": TokenType.ILIKE_ANY,
762            "LIKE ANY": TokenType.LIKE_ANY,
763            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
764            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
765            "MINUS": TokenType.EXCEPT,
766            "NCHAR VARYING": TokenType.VARCHAR,
767            "PUT": TokenType.COMMAND,
768            "REMOVE": TokenType.COMMAND,
769            "RM": TokenType.COMMAND,
770            "SAMPLE": TokenType.TABLE_SAMPLE,
771            "SQL_DOUBLE": TokenType.DOUBLE,
772            "SQL_VARCHAR": TokenType.VARCHAR,
773            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
774            "TAG": TokenType.TAG,
775            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
776            "TOP": TokenType.TOP,
777            "WAREHOUSE": TokenType.WAREHOUSE,
778            "STREAMLIT": TokenType.STREAMLIT,
779        }
780
781        SINGLE_TOKENS = {
782            **tokens.Tokenizer.SINGLE_TOKENS,
783            "$": TokenType.PARAMETER,
784        }
785
786        VAR_SINGLE_TOKENS = {"$"}
787
788        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 790    class Generator(generator.Generator):
 791        PARAMETER_TOKEN = "$"
 792        MATCHED_BY_SOURCE = False
 793        SINGLE_STRING_INTERVAL = True
 794        JOIN_HINTS = False
 795        TABLE_HINTS = False
 796        QUERY_HINTS = False
 797        AGGREGATE_FILTER_SUPPORTED = False
 798        SUPPORTS_TABLE_COPY = False
 799        COLLATE_IS_FUNC = True
 800        LIMIT_ONLY_LITERALS = True
 801        JSON_KEY_VALUE_PAIR_SEP = ","
 802        INSERT_OVERWRITE = " OVERWRITE INTO"
 803        STRUCT_DELIMITER = ("(", ")")
 804        COPY_PARAMS_ARE_WRAPPED = False
 805        COPY_PARAMS_EQ_REQUIRED = True
 806        STAR_EXCEPT = "EXCLUDE"
 807
 808        TRANSFORMS = {
 809            **generator.Generator.TRANSFORMS,
 810            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 811            exp.ArgMax: rename_func("MAX_BY"),
 812            exp.ArgMin: rename_func("MIN_BY"),
 813            exp.Array: inline_array_sql,
 814            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 815            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 816            exp.AtTimeZone: lambda self, e: self.func(
 817                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 818            ),
 819            exp.BitwiseXor: rename_func("BITXOR"),
 820            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 821            exp.DateAdd: date_delta_sql("DATEADD"),
 822            exp.DateDiff: date_delta_sql("DATEDIFF"),
 823            exp.DateStrToDate: datestrtodate_sql,
 824            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 825            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 826            exp.DayOfYear: rename_func("DAYOFYEAR"),
 827            exp.Explode: rename_func("FLATTEN"),
 828            exp.Extract: rename_func("DATE_PART"),
 829            exp.FromTimeZone: lambda self, e: self.func(
 830                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 831            ),
 832            exp.GenerateSeries: lambda self, e: self.func(
 833                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 834            ),
 835            exp.GroupConcat: rename_func("LISTAGG"),
 836            exp.If: if_sql(name="IFF", false_value="NULL"),
 837            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 838            exp.JSONExtractScalar: lambda self, e: self.func(
 839                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 840            ),
 841            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 842            exp.JSONPathRoot: lambda *_: "",
 843            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 844            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 845            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 846            exp.Max: max_or_greatest,
 847            exp.Min: min_or_least,
 848            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 849            exp.PercentileCont: transforms.preprocess(
 850                [transforms.add_within_group_for_percentiles]
 851            ),
 852            exp.PercentileDisc: transforms.preprocess(
 853                [transforms.add_within_group_for_percentiles]
 854            ),
 855            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 856            exp.RegexpILike: _regexpilike_sql,
 857            exp.Rand: rename_func("RANDOM"),
 858            exp.Select: transforms.preprocess(
 859                [
 860                    transforms.eliminate_distinct_on,
 861                    transforms.explode_to_unnest(),
 862                    transforms.eliminate_semi_and_anti_joins,
 863                ]
 864            ),
 865            exp.SHA: rename_func("SHA1"),
 866            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 867            exp.StartsWith: rename_func("STARTSWITH"),
 868            exp.StrPosition: lambda self, e: self.func(
 869                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 870            ),
 871            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 872            exp.Stuff: rename_func("INSERT"),
 873            exp.TimeAdd: date_delta_sql("TIMEADD"),
 874            exp.TimestampDiff: lambda self, e: self.func(
 875                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 876            ),
 877            exp.TimestampTrunc: timestamptrunc_sql(),
 878            exp.TimeStrToTime: timestrtotime_sql,
 879            exp.TimeToStr: lambda self, e: self.func(
 880                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 881            ),
 882            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 883            exp.ToArray: rename_func("TO_ARRAY"),
 884            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 885            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 886            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 887            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 888            exp.TsOrDsToDate: lambda self, e: self.func(
 889                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 890            ),
 891            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 892            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 893            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 894            exp.Xor: rename_func("BOOLXOR"),
 895        }
 896
 897        SUPPORTED_JSON_PATH_PARTS = {
 898            exp.JSONPathKey,
 899            exp.JSONPathRoot,
 900            exp.JSONPathSubscript,
 901        }
 902
 903        TYPE_MAPPING = {
 904            **generator.Generator.TYPE_MAPPING,
 905            exp.DataType.Type.NESTED: "OBJECT",
 906            exp.DataType.Type.STRUCT: "OBJECT",
 907        }
 908
 909        PROPERTIES_LOCATION = {
 910            **generator.Generator.PROPERTIES_LOCATION,
 911            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 912            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 913        }
 914
 915        UNSUPPORTED_VALUES_EXPRESSIONS = {
 916            exp.Map,
 917            exp.StarMap,
 918            exp.Struct,
 919            exp.VarMap,
 920        }
 921
 922        def with_properties(self, properties: exp.Properties) -> str:
 923            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 924
 925        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 926            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 927                values_as_table = False
 928
 929            return super().values_sql(expression, values_as_table=values_as_table)
 930
 931        def datatype_sql(self, expression: exp.DataType) -> str:
 932            expressions = expression.expressions
 933            if (
 934                expressions
 935                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 936                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 937            ):
 938                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 939                return "OBJECT"
 940
 941            return super().datatype_sql(expression)
 942
 943        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 944            return self.func(
 945                "TO_NUMBER",
 946                expression.this,
 947                expression.args.get("format"),
 948                expression.args.get("precision"),
 949                expression.args.get("scale"),
 950            )
 951
 952        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 953            milli = expression.args.get("milli")
 954            if milli is not None:
 955                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 956                expression.set("nano", milli_to_nano)
 957
 958            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 959
 960        def trycast_sql(self, expression: exp.TryCast) -> str:
 961            value = expression.this
 962
 963            if value.type is None:
 964                from sqlglot.optimizer.annotate_types import annotate_types
 965
 966                value = annotate_types(value)
 967
 968            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 969                return super().trycast_sql(expression)
 970
 971            # TRY_CAST only works for string values in Snowflake
 972            return self.cast_sql(expression)
 973
 974        def log_sql(self, expression: exp.Log) -> str:
 975            if not expression.expression:
 976                return self.func("LN", expression.this)
 977
 978            return super().log_sql(expression)
 979
 980        def unnest_sql(self, expression: exp.Unnest) -> str:
 981            unnest_alias = expression.args.get("alias")
 982            offset = expression.args.get("offset")
 983
 984            columns = [
 985                exp.to_identifier("seq"),
 986                exp.to_identifier("key"),
 987                exp.to_identifier("path"),
 988                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 989                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 990                or exp.to_identifier("value"),
 991                exp.to_identifier("this"),
 992            ]
 993
 994            if unnest_alias:
 995                unnest_alias.set("columns", columns)
 996            else:
 997                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 998
 999            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1000            alias = self.sql(unnest_alias)
1001            alias = f" AS {alias}" if alias else ""
1002            return f"{explode}{alias}"
1003
1004        def show_sql(self, expression: exp.Show) -> str:
1005            terse = "TERSE " if expression.args.get("terse") else ""
1006            history = " HISTORY" if expression.args.get("history") else ""
1007            like = self.sql(expression, "like")
1008            like = f" LIKE {like}" if like else ""
1009
1010            scope = self.sql(expression, "scope")
1011            scope = f" {scope}" if scope else ""
1012
1013            scope_kind = self.sql(expression, "scope_kind")
1014            if scope_kind:
1015                scope_kind = f" IN {scope_kind}"
1016
1017            starts_with = self.sql(expression, "starts_with")
1018            if starts_with:
1019                starts_with = f" STARTS WITH {starts_with}"
1020
1021            limit = self.sql(expression, "limit")
1022
1023            from_ = self.sql(expression, "from")
1024            if from_:
1025                from_ = f" FROM {from_}"
1026
1027            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1028
1029        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1030            # Other dialects don't support all of the following parameters, so we need to
1031            # generate default values as necessary to ensure the transpilation is correct
1032            group = expression.args.get("group")
1033            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1034            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1035            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1036
1037            return self.func(
1038                "REGEXP_SUBSTR",
1039                expression.this,
1040                expression.expression,
1041                position,
1042                occurrence,
1043                parameters,
1044                group,
1045            )
1046
1047        def except_op(self, expression: exp.Except) -> str:
1048            if not expression.args.get("distinct"):
1049                self.unsupported("EXCEPT with All is not supported in Snowflake")
1050            return super().except_op(expression)
1051
1052        def intersect_op(self, expression: exp.Intersect) -> str:
1053            if not expression.args.get("distinct"):
1054                self.unsupported("INTERSECT with All is not supported in Snowflake")
1055            return super().intersect_op(expression)
1056
1057        def describe_sql(self, expression: exp.Describe) -> str:
1058            # Default to table if kind is unknown
1059            kind_value = expression.args.get("kind") or "TABLE"
1060            kind = f" {kind_value}" if kind_value else ""
1061            this = f" {self.sql(expression, 'this')}"
1062            expressions = self.expressions(expression, flat=True)
1063            expressions = f" {expressions}" if expressions else ""
1064            return f"DESCRIBE{kind}{this}{expressions}"
1065
1066        def generatedasidentitycolumnconstraint_sql(
1067            self, expression: exp.GeneratedAsIdentityColumnConstraint
1068        ) -> str:
1069            start = expression.args.get("start")
1070            start = f" START {start}" if start else ""
1071            increment = expression.args.get("increment")
1072            increment = f" INCREMENT {increment}" if increment else ""
1073            return f"AUTOINCREMENT{start}{increment}"
1074
1075        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1076            this = self.sql(expression, "this")
1077            return f"SWAP WITH {this}"
1078
1079        def cluster_sql(self, expression: exp.Cluster) -> str:
1080            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1081
1082        def struct_sql(self, expression: exp.Struct) -> str:
1083            keys = []
1084            values = []
1085
1086            for i, e in enumerate(expression.expressions):
1087                if isinstance(e, exp.PropertyEQ):
1088                    keys.append(
1089                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1090                    )
1091                    values.append(e.expression)
1092                else:
1093                    keys.append(exp.Literal.string(f"_{i}"))
1094                    values.append(e)
1095
1096            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1097
1098        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1099            if expression.args.get("weight") or expression.args.get("accuracy"):
1100                self.unsupported(
1101                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1102                )
1103
1104            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1105
1106        def alterset_sql(self, expression: exp.AlterSet) -> str:
1107            exprs = self.expressions(expression, flat=True)
1108            exprs = f" {exprs}" if exprs else ""
1109            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1110            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1111            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1112            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1113            tag = self.expressions(expression, key="tag", flat=True)
1114            tag = f" TAG {tag}" if tag else ""
1115
1116            return f"SET{exprs}{file_format}{copy_options}{tag}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
922        def with_properties(self, properties: exp.Properties) -> str:
923            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
925        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
926            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
927                values_as_table = False
928
929            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
931        def datatype_sql(self, expression: exp.DataType) -> str:
932            expressions = expression.expressions
933            if (
934                expressions
935                and expression.is_type(*exp.DataType.STRUCT_TYPES)
936                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
937            ):
938                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
939                return "OBJECT"
940
941            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
943        def tonumber_sql(self, expression: exp.ToNumber) -> str:
944            return self.func(
945                "TO_NUMBER",
946                expression.this,
947                expression.args.get("format"),
948                expression.args.get("precision"),
949                expression.args.get("scale"),
950            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
952        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
953            milli = expression.args.get("milli")
954            if milli is not None:
955                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
956                expression.set("nano", milli_to_nano)
957
958            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
960        def trycast_sql(self, expression: exp.TryCast) -> str:
961            value = expression.this
962
963            if value.type is None:
964                from sqlglot.optimizer.annotate_types import annotate_types
965
966                value = annotate_types(value)
967
968            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
969                return super().trycast_sql(expression)
970
971            # TRY_CAST only works for string values in Snowflake
972            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
974        def log_sql(self, expression: exp.Log) -> str:
975            if not expression.expression:
976                return self.func("LN", expression.this)
977
978            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
 980        def unnest_sql(self, expression: exp.Unnest) -> str:
 981            unnest_alias = expression.args.get("alias")
 982            offset = expression.args.get("offset")
 983
 984            columns = [
 985                exp.to_identifier("seq"),
 986                exp.to_identifier("key"),
 987                exp.to_identifier("path"),
 988                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 989                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 990                or exp.to_identifier("value"),
 991                exp.to_identifier("this"),
 992            ]
 993
 994            if unnest_alias:
 995                unnest_alias.set("columns", columns)
 996            else:
 997                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 998
 999            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1000            alias = self.sql(unnest_alias)
1001            alias = f" AS {alias}" if alias else ""
1002            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1004        def show_sql(self, expression: exp.Show) -> str:
1005            terse = "TERSE " if expression.args.get("terse") else ""
1006            history = " HISTORY" if expression.args.get("history") else ""
1007            like = self.sql(expression, "like")
1008            like = f" LIKE {like}" if like else ""
1009
1010            scope = self.sql(expression, "scope")
1011            scope = f" {scope}" if scope else ""
1012
1013            scope_kind = self.sql(expression, "scope_kind")
1014            if scope_kind:
1015                scope_kind = f" IN {scope_kind}"
1016
1017            starts_with = self.sql(expression, "starts_with")
1018            if starts_with:
1019                starts_with = f" STARTS WITH {starts_with}"
1020
1021            limit = self.sql(expression, "limit")
1022
1023            from_ = self.sql(expression, "from")
1024            if from_:
1025                from_ = f" FROM {from_}"
1026
1027            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
1029        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1030            # Other dialects don't support all of the following parameters, so we need to
1031            # generate default values as necessary to ensure the transpilation is correct
1032            group = expression.args.get("group")
1033            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1034            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1035            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1036
1037            return self.func(
1038                "REGEXP_SUBSTR",
1039                expression.this,
1040                expression.expression,
1041                position,
1042                occurrence,
1043                parameters,
1044                group,
1045            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
1047        def except_op(self, expression: exp.Except) -> str:
1048            if not expression.args.get("distinct"):
1049                self.unsupported("EXCEPT with All is not supported in Snowflake")
1050            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
1052        def intersect_op(self, expression: exp.Intersect) -> str:
1053            if not expression.args.get("distinct"):
1054                self.unsupported("INTERSECT with All is not supported in Snowflake")
1055            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1057        def describe_sql(self, expression: exp.Describe) -> str:
1058            # Default to table if kind is unknown
1059            kind_value = expression.args.get("kind") or "TABLE"
1060            kind = f" {kind_value}" if kind_value else ""
1061            this = f" {self.sql(expression, 'this')}"
1062            expressions = self.expressions(expression, flat=True)
1063            expressions = f" {expressions}" if expressions else ""
1064            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1066        def generatedasidentitycolumnconstraint_sql(
1067            self, expression: exp.GeneratedAsIdentityColumnConstraint
1068        ) -> str:
1069            start = expression.args.get("start")
1070            start = f" START {start}" if start else ""
1071            increment = expression.args.get("increment")
1072            increment = f" INCREMENT {increment}" if increment else ""
1073            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
1075        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1076            this = self.sql(expression, "this")
1077            return f"SWAP WITH {this}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1079        def cluster_sql(self, expression: exp.Cluster) -> str:
1080            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1082        def struct_sql(self, expression: exp.Struct) -> str:
1083            keys = []
1084            values = []
1085
1086            for i, e in enumerate(expression.expressions):
1087                if isinstance(e, exp.PropertyEQ):
1088                    keys.append(
1089                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1090                    )
1091                    values.append(e.expression)
1092                else:
1093                    keys.append(exp.Literal.string(f"_{i}"))
1094                    values.append(e)
1095
1096            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1098        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1099            if expression.args.get("weight") or expression.args.get("accuracy"):
1100                self.unsupported(
1101                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1102                )
1103
1104            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1106        def alterset_sql(self, expression: exp.AlterSet) -> str:
1107            exprs = self.expressions(expression, flat=True)
1108            exprs = f" {exprs}" if exprs else ""
1109            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1110            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1111            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1112            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1113            tag = self.expressions(expression, key="tag", flat=True)
1114            tag = f" TAG {tag}" if tag else ""
1115
1116            return f"SET{exprs}{file_format}{copy_options}{tag}"
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
OUTER_UNION_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql