Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    date_delta_sql,
  11    date_trunc_to_time,
  12    datestrtodate_sql,
  13    build_formatted_time,
  14    if_sql,
  15    inline_array_sql,
  16    max_or_greatest,
  17    min_or_least,
  18    rename_func,
  19    timestamptrunc_sql,
  20    timestrtotime_sql,
  21    var_map_sql,
  22)
  23from sqlglot.helper import flatten, is_float, is_int, seq_get
  24from sqlglot.tokens import TokenType
  25
  26if t.TYPE_CHECKING:
  27    from sqlglot._typing import E
  28
  29
  30# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  31def _build_datetime(
  32    name: str, kind: exp.DataType.Type, safe: bool = False
  33) -> t.Callable[[t.List], exp.Func]:
  34    def _builder(args: t.List) -> exp.Func:
  35        value = seq_get(args, 0)
  36        int_value = value is not None and is_int(value.name)
  37
  38        if isinstance(value, exp.Literal):
  39            # Converts calls like `TO_TIME('01:02:03')` into casts
  40            if len(args) == 1 and value.is_string and not int_value:
  41                return exp.cast(value, kind)
  42
  43            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  44            # cases so we can transpile them, since they're relatively common
  45            if kind == exp.DataType.Type.TIMESTAMP:
  46                if int_value:
  47                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  48                if not is_float(value.this):
  49                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  50
  51        if kind == exp.DataType.Type.DATE and not int_value:
  52            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  53            formatted_exp.set("safe", safe)
  54            return formatted_exp
  55
  56        return exp.Anonymous(this=name, expressions=args)
  57
  58    return _builder
  59
  60
  61def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  62    expression = parser.build_var_map(args)
  63
  64    if isinstance(expression, exp.StarMap):
  65        return expression
  66
  67    return exp.Struct(
  68        expressions=[
  69            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  70        ]
  71    )
  72
  73
  74def _build_datediff(args: t.List) -> exp.DateDiff:
  75    return exp.DateDiff(
  76        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
  77    )
  78
  79
  80def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  81    def _builder(args: t.List) -> E:
  82        return expr_type(
  83            this=seq_get(args, 2),
  84            expression=seq_get(args, 1),
  85            unit=_map_date_part(seq_get(args, 0)),
  86        )
  87
  88    return _builder
  89
  90
  91# https://docs.snowflake.com/en/sql-reference/functions/div0
  92def _build_if_from_div0(args: t.List) -> exp.If:
  93    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  94    true = exp.Literal.number(0)
  95    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  96    return exp.If(this=cond, true=true, false=false)
  97
  98
  99# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 100def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 101    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 102    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 103
 104
 105# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 106def _build_if_from_nullifzero(args: t.List) -> exp.If:
 107    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 108    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 109
 110
 111def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 112    flag = expression.text("flag")
 113
 114    if "i" not in flag:
 115        flag += "i"
 116
 117    return self.func(
 118        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 119    )
 120
 121
 122def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 123    if len(args) == 3:
 124        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 125    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 126
 127
 128def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 129    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 130
 131    if not regexp_replace.args.get("replacement"):
 132        regexp_replace.set("replacement", exp.Literal.string(""))
 133
 134    return regexp_replace
 135
 136
 137def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 138    def _parse(self: Snowflake.Parser) -> exp.Show:
 139        return self._parse_show_snowflake(*args, **kwargs)
 140
 141    return _parse
 142
 143
 144DATE_PART_MAPPING = {
 145    "Y": "YEAR",
 146    "YY": "YEAR",
 147    "YYY": "YEAR",
 148    "YYYY": "YEAR",
 149    "YR": "YEAR",
 150    "YEARS": "YEAR",
 151    "YRS": "YEAR",
 152    "MM": "MONTH",
 153    "MON": "MONTH",
 154    "MONS": "MONTH",
 155    "MONTHS": "MONTH",
 156    "D": "DAY",
 157    "DD": "DAY",
 158    "DAYS": "DAY",
 159    "DAYOFMONTH": "DAY",
 160    "WEEKDAY": "DAYOFWEEK",
 161    "DOW": "DAYOFWEEK",
 162    "DW": "DAYOFWEEK",
 163    "WEEKDAY_ISO": "DAYOFWEEKISO",
 164    "DOW_ISO": "DAYOFWEEKISO",
 165    "DW_ISO": "DAYOFWEEKISO",
 166    "YEARDAY": "DAYOFYEAR",
 167    "DOY": "DAYOFYEAR",
 168    "DY": "DAYOFYEAR",
 169    "W": "WEEK",
 170    "WK": "WEEK",
 171    "WEEKOFYEAR": "WEEK",
 172    "WOY": "WEEK",
 173    "WY": "WEEK",
 174    "WEEK_ISO": "WEEKISO",
 175    "WEEKOFYEARISO": "WEEKISO",
 176    "WEEKOFYEAR_ISO": "WEEKISO",
 177    "Q": "QUARTER",
 178    "QTR": "QUARTER",
 179    "QTRS": "QUARTER",
 180    "QUARTERS": "QUARTER",
 181    "H": "HOUR",
 182    "HH": "HOUR",
 183    "HR": "HOUR",
 184    "HOURS": "HOUR",
 185    "HRS": "HOUR",
 186    "M": "MINUTE",
 187    "MI": "MINUTE",
 188    "MIN": "MINUTE",
 189    "MINUTES": "MINUTE",
 190    "MINS": "MINUTE",
 191    "S": "SECOND",
 192    "SEC": "SECOND",
 193    "SECONDS": "SECOND",
 194    "SECS": "SECOND",
 195    "MS": "MILLISECOND",
 196    "MSEC": "MILLISECOND",
 197    "MILLISECONDS": "MILLISECOND",
 198    "US": "MICROSECOND",
 199    "USEC": "MICROSECOND",
 200    "MICROSECONDS": "MICROSECOND",
 201    "NS": "NANOSECOND",
 202    "NSEC": "NANOSECOND",
 203    "NANOSEC": "NANOSECOND",
 204    "NSECOND": "NANOSECOND",
 205    "NSECONDS": "NANOSECOND",
 206    "NANOSECS": "NANOSECOND",
 207    "EPOCH": "EPOCH_SECOND",
 208    "EPOCH_SECONDS": "EPOCH_SECOND",
 209    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
 210    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
 211    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
 212    "TZH": "TIMEZONE_HOUR",
 213    "TZM": "TIMEZONE_MINUTE",
 214}
 215
 216
 217@t.overload
 218def _map_date_part(part: exp.Expression) -> exp.Var:
 219    pass
 220
 221
 222@t.overload
 223def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 224    pass
 225
 226
 227def _map_date_part(part):
 228    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
 229    return exp.var(mapped) if mapped else part
 230
 231
 232def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 233    trunc = date_trunc_to_time(args)
 234    trunc.set("unit", _map_date_part(trunc.args["unit"]))
 235    return trunc
 236
 237
 238def _build_timestamp_from_parts(args: t.List) -> exp.Func:
 239    if len(args) == 2:
 240        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
 241        # so we parse this into Anonymous for now instead of introducing complexity
 242        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
 243
 244    return exp.TimestampFromParts.from_arg_list(args)
 245
 246
 247def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 248    """
 249    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 250    so we need to unqualify them.
 251
 252    Example:
 253        >>> from sqlglot import parse_one
 254        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 255        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 256        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 257    """
 258    if isinstance(expression, exp.Pivot) and expression.unpivot:
 259        expression = transforms.unqualify_columns(expression)
 260
 261    return expression
 262
 263
 264def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 265    assert isinstance(expression, exp.Create)
 266
 267    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 268        if expression.this in exp.DataType.NESTED_TYPES:
 269            expression.set("expressions", None)
 270        return expression
 271
 272    props = expression.args.get("properties")
 273    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 274        for schema_expression in expression.this.expressions:
 275            if isinstance(schema_expression, exp.ColumnDef):
 276                column_type = schema_expression.kind
 277                if isinstance(column_type, exp.DataType):
 278                    column_type.transform(_flatten_structured_type, copy=False)
 279
 280    return expression
 281
 282
 283class Snowflake(Dialect):
 284    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 285    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 286    NULL_ORDERING = "nulls_are_large"
 287    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 288    SUPPORTS_USER_DEFINED_TYPES = False
 289    SUPPORTS_SEMI_ANTI_JOIN = False
 290    PREFER_CTE_ALIAS_COLUMN = True
 291    TABLESAMPLE_SIZE_IS_PERCENT = True
 292
 293    TIME_MAPPING = {
 294        "YYYY": "%Y",
 295        "yyyy": "%Y",
 296        "YY": "%y",
 297        "yy": "%y",
 298        "MMMM": "%B",
 299        "mmmm": "%B",
 300        "MON": "%b",
 301        "mon": "%b",
 302        "MM": "%m",
 303        "mm": "%m",
 304        "DD": "%d",
 305        "dd": "%-d",
 306        "DY": "%a",
 307        "dy": "%w",
 308        "HH24": "%H",
 309        "hh24": "%H",
 310        "HH12": "%I",
 311        "hh12": "%I",
 312        "MI": "%M",
 313        "mi": "%M",
 314        "SS": "%S",
 315        "ss": "%S",
 316        "FF": "%f",
 317        "ff": "%f",
 318        "FF6": "%f",
 319        "ff6": "%f",
 320    }
 321
 322    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 323        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 324        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 325        if (
 326            isinstance(expression, exp.Identifier)
 327            and isinstance(expression.parent, exp.Table)
 328            and expression.name.lower() == "dual"
 329        ):
 330            return expression  # type: ignore
 331
 332        return super().quote_identifier(expression, identify=identify)
 333
 334    class Parser(parser.Parser):
 335        IDENTIFY_PIVOT_STRINGS = True
 336
 337        ID_VAR_TOKENS = {
 338            *parser.Parser.ID_VAR_TOKENS,
 339            TokenType.MATCH_CONDITION,
 340        }
 341
 342        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 343        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 344
 345        FUNCTIONS = {
 346            **parser.Parser.FUNCTIONS,
 347            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 348            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 349            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 350                this=seq_get(args, 1), expression=seq_get(args, 0)
 351            ),
 352            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 353                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 354                start=seq_get(args, 0),
 355                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 356                step=seq_get(args, 2),
 357            ),
 358            "BITXOR": binary_from_function(exp.BitwiseXor),
 359            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 360            "BOOLXOR": binary_from_function(exp.Xor),
 361            "CONVERT_TIMEZONE": _build_convert_timezone,
 362            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 363            "DATE_TRUNC": _date_trunc_to_time,
 364            "DATEADD": _build_date_time_add(exp.DateAdd),
 365            "DATEDIFF": _build_datediff,
 366            "DIV0": _build_if_from_div0,
 367            "FLATTEN": exp.Explode.from_arg_list,
 368            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 369                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 370            ),
 371            "IFF": exp.If.from_arg_list,
 372            "LAST_DAY": lambda args: exp.LastDay(
 373                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 374            ),
 375            "LISTAGG": exp.GroupConcat.from_arg_list,
 376            "MEDIAN": lambda args: exp.PercentileCont(
 377                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 378            ),
 379            "NULLIFZERO": _build_if_from_nullifzero,
 380            "OBJECT_CONSTRUCT": _build_object_construct,
 381            "REGEXP_REPLACE": _build_regexp_replace,
 382            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 383            "RLIKE": exp.RegexpLike.from_arg_list,
 384            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 385            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 386            "TIMEDIFF": _build_datediff,
 387            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 388            "TIMESTAMPDIFF": _build_datediff,
 389            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 390            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 391            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 392            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 393            "TO_NUMBER": lambda args: exp.ToNumber(
 394                this=seq_get(args, 0),
 395                format=seq_get(args, 1),
 396                precision=seq_get(args, 2),
 397                scale=seq_get(args, 3),
 398            ),
 399            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 400            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 401            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 402            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 403            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 404            "TO_VARCHAR": exp.ToChar.from_arg_list,
 405            "ZEROIFNULL": _build_if_from_zeroifnull,
 406        }
 407
 408        FUNCTION_PARSERS = {
 409            **parser.Parser.FUNCTION_PARSERS,
 410            "DATE_PART": lambda self: self._parse_date_part(),
 411            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 412        }
 413        FUNCTION_PARSERS.pop("TRIM")
 414
 415        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 416
 417        RANGE_PARSERS = {
 418            **parser.Parser.RANGE_PARSERS,
 419            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 420            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 421        }
 422
 423        ALTER_PARSERS = {
 424            **parser.Parser.ALTER_PARSERS,
 425            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 426            "UNSET": lambda self: self.expression(
 427                exp.Set,
 428                tag=self._match_text_seq("TAG"),
 429                expressions=self._parse_csv(self._parse_id_var),
 430                unset=True,
 431            ),
 432            "SWAP": lambda self: self._parse_alter_table_swap(),
 433        }
 434
 435        STATEMENT_PARSERS = {
 436            **parser.Parser.STATEMENT_PARSERS,
 437            TokenType.SHOW: lambda self: self._parse_show(),
 438        }
 439
 440        PROPERTY_PARSERS = {
 441            **parser.Parser.PROPERTY_PARSERS,
 442            "LOCATION": lambda self: self._parse_location(),
 443        }
 444
 445        SHOW_PARSERS = {
 446            "SCHEMAS": _show_parser("SCHEMAS"),
 447            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 448            "OBJECTS": _show_parser("OBJECTS"),
 449            "TERSE OBJECTS": _show_parser("OBJECTS"),
 450            "TABLES": _show_parser("TABLES"),
 451            "TERSE TABLES": _show_parser("TABLES"),
 452            "VIEWS": _show_parser("VIEWS"),
 453            "TERSE VIEWS": _show_parser("VIEWS"),
 454            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 455            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 456            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 457            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 458            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 459            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 460            "SEQUENCES": _show_parser("SEQUENCES"),
 461            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 462            "COLUMNS": _show_parser("COLUMNS"),
 463            "USERS": _show_parser("USERS"),
 464            "TERSE USERS": _show_parser("USERS"),
 465        }
 466
 467        STAGED_FILE_SINGLE_TOKENS = {
 468            TokenType.DOT,
 469            TokenType.MOD,
 470            TokenType.SLASH,
 471        }
 472
 473        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 474
 475        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 476
 477        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 478            this = super()._parse_column_ops(this)
 479
 480            casts = []
 481            json_path = []
 482
 483            while self._match(TokenType.COLON):
 484                path = super()._parse_column_ops(self._parse_field(any_token=True))
 485
 486                # The cast :: operator has a lower precedence than the extraction operator :, so
 487                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 488                while isinstance(path, exp.Cast):
 489                    casts.append(path.to)
 490                    path = path.this
 491
 492                if path:
 493                    json_path.append(path.sql(dialect="snowflake", copy=False))
 494
 495            if json_path:
 496                this = self.expression(
 497                    exp.JSONExtract,
 498                    this=this,
 499                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 500                )
 501
 502                while casts:
 503                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 504
 505            return this
 506
 507        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 508        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 509        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 510            this = self._parse_var() or self._parse_type()
 511
 512            if not this:
 513                return None
 514
 515            self._match(TokenType.COMMA)
 516            expression = self._parse_bitwise()
 517            this = _map_date_part(this)
 518            name = this.name.upper()
 519
 520            if name.startswith("EPOCH"):
 521                if name == "EPOCH_MILLISECOND":
 522                    scale = 10**3
 523                elif name == "EPOCH_MICROSECOND":
 524                    scale = 10**6
 525                elif name == "EPOCH_NANOSECOND":
 526                    scale = 10**9
 527                else:
 528                    scale = None
 529
 530                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 531                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 532
 533                if scale:
 534                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 535
 536                return to_unix
 537
 538            return self.expression(exp.Extract, this=this, expression=expression)
 539
 540        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 541            if is_map:
 542                # Keys are strings in Snowflake's objects, see also:
 543                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 544                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 545                return self._parse_slice(self._parse_string())
 546
 547            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 548
 549        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 550            lateral = super()._parse_lateral()
 551            if not lateral:
 552                return lateral
 553
 554            if isinstance(lateral.this, exp.Explode):
 555                table_alias = lateral.args.get("alias")
 556                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 557                if table_alias and not table_alias.args.get("columns"):
 558                    table_alias.set("columns", columns)
 559                elif not table_alias:
 560                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 561
 562            return lateral
 563
 564        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 565            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 566            index = self._index
 567            if self._match_texts(("AT", "BEFORE")):
 568                this = self._prev.text.upper()
 569                kind = (
 570                    self._match(TokenType.L_PAREN)
 571                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 572                    and self._prev.text.upper()
 573                )
 574                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 575
 576                if expression:
 577                    self._match_r_paren()
 578                    when = self.expression(
 579                        exp.HistoricalData, this=this, kind=kind, expression=expression
 580                    )
 581                    table.set("when", when)
 582                else:
 583                    self._retreat(index)
 584
 585            return table
 586
 587        def _parse_table_parts(
 588            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 589        ) -> exp.Table:
 590            # https://docs.snowflake.com/en/user-guide/querying-stage
 591            if self._match(TokenType.STRING, advance=False):
 592                table = self._parse_string()
 593            elif self._match_text_seq("@", advance=False):
 594                table = self._parse_location_path()
 595            else:
 596                table = None
 597
 598            if table:
 599                file_format = None
 600                pattern = None
 601
 602                self._match(TokenType.L_PAREN)
 603                while self._curr and not self._match(TokenType.R_PAREN):
 604                    if self._match_text_seq("FILE_FORMAT", "=>"):
 605                        file_format = self._parse_string() or super()._parse_table_parts(
 606                            is_db_reference=is_db_reference
 607                        )
 608                    elif self._match_text_seq("PATTERN", "=>"):
 609                        pattern = self._parse_string()
 610                    else:
 611                        break
 612
 613                    self._match(TokenType.COMMA)
 614
 615                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 616            else:
 617                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 618
 619            return self._parse_at_before(table)
 620
 621        def _parse_id_var(
 622            self,
 623            any_token: bool = True,
 624            tokens: t.Optional[t.Collection[TokenType]] = None,
 625        ) -> t.Optional[exp.Expression]:
 626            if self._match_text_seq("IDENTIFIER", "("):
 627                identifier = (
 628                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 629                    or self._parse_string()
 630                )
 631                self._match_r_paren()
 632                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 633
 634            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 635
 636        def _parse_show_snowflake(self, this: str) -> exp.Show:
 637            scope = None
 638            scope_kind = None
 639
 640            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 641            # which is syntactically valid but has no effect on the output
 642            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 643
 644            history = self._match_text_seq("HISTORY")
 645
 646            like = self._parse_string() if self._match(TokenType.LIKE) else None
 647
 648            if self._match(TokenType.IN):
 649                if self._match_text_seq("ACCOUNT"):
 650                    scope_kind = "ACCOUNT"
 651                elif self._match_set(self.DB_CREATABLES):
 652                    scope_kind = self._prev.text.upper()
 653                    if self._curr:
 654                        scope = self._parse_table_parts()
 655                elif self._curr:
 656                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 657                    scope = self._parse_table_parts()
 658
 659            return self.expression(
 660                exp.Show,
 661                **{
 662                    "terse": terse,
 663                    "this": this,
 664                    "history": history,
 665                    "like": like,
 666                    "scope": scope,
 667                    "scope_kind": scope_kind,
 668                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 669                    "limit": self._parse_limit(),
 670                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 671                },
 672            )
 673
 674        def _parse_alter_table_swap(self) -> exp.SwapTable:
 675            self._match_text_seq("WITH")
 676            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 677
 678        def _parse_location(self) -> exp.LocationProperty:
 679            self._match(TokenType.EQ)
 680            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 681
 682        def _parse_location_path(self) -> exp.Var:
 683            parts = [self._advance_any(ignore_reserved=True)]
 684
 685            # We avoid consuming a comma token because external tables like @foo and @bar
 686            # can be joined in a query with a comma separator.
 687            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 688                parts.append(self._advance_any(ignore_reserved=True))
 689
 690            return exp.var("".join(part.text for part in parts if part))
 691
 692    class Tokenizer(tokens.Tokenizer):
 693        STRING_ESCAPES = ["\\", "'"]
 694        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 695        RAW_STRINGS = ["$$"]
 696        COMMENTS = ["--", "//", ("/*", "*/")]
 697
 698        KEYWORDS = {
 699            **tokens.Tokenizer.KEYWORDS,
 700            "BYTEINT": TokenType.INT,
 701            "CHAR VARYING": TokenType.VARCHAR,
 702            "CHARACTER VARYING": TokenType.VARCHAR,
 703            "EXCLUDE": TokenType.EXCEPT,
 704            "ILIKE ANY": TokenType.ILIKE_ANY,
 705            "LIKE ANY": TokenType.LIKE_ANY,
 706            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 707            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 708            "MINUS": TokenType.EXCEPT,
 709            "NCHAR VARYING": TokenType.VARCHAR,
 710            "PUT": TokenType.COMMAND,
 711            "REMOVE": TokenType.COMMAND,
 712            "RENAME": TokenType.REPLACE,
 713            "RM": TokenType.COMMAND,
 714            "SAMPLE": TokenType.TABLE_SAMPLE,
 715            "SQL_DOUBLE": TokenType.DOUBLE,
 716            "SQL_VARCHAR": TokenType.VARCHAR,
 717            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 718            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 719            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 720            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 721            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 722            "TOP": TokenType.TOP,
 723        }
 724
 725        SINGLE_TOKENS = {
 726            **tokens.Tokenizer.SINGLE_TOKENS,
 727            "$": TokenType.PARAMETER,
 728        }
 729
 730        VAR_SINGLE_TOKENS = {"$"}
 731
 732        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 733
 734    class Generator(generator.Generator):
 735        PARAMETER_TOKEN = "$"
 736        MATCHED_BY_SOURCE = False
 737        SINGLE_STRING_INTERVAL = True
 738        JOIN_HINTS = False
 739        TABLE_HINTS = False
 740        QUERY_HINTS = False
 741        AGGREGATE_FILTER_SUPPORTED = False
 742        SUPPORTS_TABLE_COPY = False
 743        COLLATE_IS_FUNC = True
 744        LIMIT_ONLY_LITERALS = True
 745        JSON_KEY_VALUE_PAIR_SEP = ","
 746        INSERT_OVERWRITE = " OVERWRITE INTO"
 747        STRUCT_DELIMITER = ("(", ")")
 748
 749        TRANSFORMS = {
 750            **generator.Generator.TRANSFORMS,
 751            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 752            exp.ArgMax: rename_func("MAX_BY"),
 753            exp.ArgMin: rename_func("MIN_BY"),
 754            exp.Array: inline_array_sql,
 755            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 756            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 757            exp.AtTimeZone: lambda self, e: self.func(
 758                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 759            ),
 760            exp.BitwiseXor: rename_func("BITXOR"),
 761            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 762            exp.DateAdd: date_delta_sql("DATEADD"),
 763            exp.DateDiff: date_delta_sql("DATEDIFF"),
 764            exp.DateStrToDate: datestrtodate_sql,
 765            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 766            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 767            exp.DayOfYear: rename_func("DAYOFYEAR"),
 768            exp.Explode: rename_func("FLATTEN"),
 769            exp.Extract: rename_func("DATE_PART"),
 770            exp.FromTimeZone: lambda self, e: self.func(
 771                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 772            ),
 773            exp.GenerateSeries: lambda self, e: self.func(
 774                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 775            ),
 776            exp.GroupConcat: rename_func("LISTAGG"),
 777            exp.If: if_sql(name="IFF", false_value="NULL"),
 778            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 779            exp.JSONExtractScalar: lambda self, e: self.func(
 780                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 781            ),
 782            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 783            exp.JSONPathRoot: lambda *_: "",
 784            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 785            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 786            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 787            exp.Max: max_or_greatest,
 788            exp.Min: min_or_least,
 789            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 790            exp.PercentileCont: transforms.preprocess(
 791                [transforms.add_within_group_for_percentiles]
 792            ),
 793            exp.PercentileDisc: transforms.preprocess(
 794                [transforms.add_within_group_for_percentiles]
 795            ),
 796            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 797            exp.RegexpILike: _regexpilike_sql,
 798            exp.Rand: rename_func("RANDOM"),
 799            exp.Select: transforms.preprocess(
 800                [
 801                    transforms.eliminate_distinct_on,
 802                    transforms.explode_to_unnest(),
 803                    transforms.eliminate_semi_and_anti_joins,
 804                ]
 805            ),
 806            exp.SHA: rename_func("SHA1"),
 807            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 808            exp.StartsWith: rename_func("STARTSWITH"),
 809            exp.StrPosition: lambda self, e: self.func(
 810                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 811            ),
 812            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 813            exp.Stuff: rename_func("INSERT"),
 814            exp.TimeAdd: date_delta_sql("TIMEADD"),
 815            exp.TimestampDiff: lambda self, e: self.func(
 816                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 817            ),
 818            exp.TimestampTrunc: timestamptrunc_sql,
 819            exp.TimeStrToTime: timestrtotime_sql,
 820            exp.TimeToStr: lambda self, e: self.func(
 821                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 822            ),
 823            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 824            exp.ToArray: rename_func("TO_ARRAY"),
 825            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 826            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 827            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 828            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 829            exp.TsOrDsToDate: lambda self, e: self.func(
 830                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 831            ),
 832            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 833            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 834            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 835            exp.Xor: rename_func("BOOLXOR"),
 836        }
 837
 838        SUPPORTED_JSON_PATH_PARTS = {
 839            exp.JSONPathKey,
 840            exp.JSONPathRoot,
 841            exp.JSONPathSubscript,
 842        }
 843
 844        TYPE_MAPPING = {
 845            **generator.Generator.TYPE_MAPPING,
 846            exp.DataType.Type.NESTED: "OBJECT",
 847            exp.DataType.Type.STRUCT: "OBJECT",
 848            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 849        }
 850
 851        STAR_MAPPING = {
 852            "except": "EXCLUDE",
 853            "replace": "RENAME",
 854        }
 855
 856        PROPERTIES_LOCATION = {
 857            **generator.Generator.PROPERTIES_LOCATION,
 858            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 859            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 860        }
 861
 862        UNSUPPORTED_VALUES_EXPRESSIONS = {
 863            exp.Struct,
 864        }
 865
 866        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 867            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 868                values_as_table = False
 869
 870            return super().values_sql(expression, values_as_table=values_as_table)
 871
 872        def datatype_sql(self, expression: exp.DataType) -> str:
 873            expressions = expression.expressions
 874            if (
 875                expressions
 876                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 877                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 878            ):
 879                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 880                return "OBJECT"
 881
 882            return super().datatype_sql(expression)
 883
 884        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 885            return self.func(
 886                "TO_NUMBER",
 887                expression.this,
 888                expression.args.get("format"),
 889                expression.args.get("precision"),
 890                expression.args.get("scale"),
 891            )
 892
 893        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 894            milli = expression.args.get("milli")
 895            if milli is not None:
 896                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 897                expression.set("nano", milli_to_nano)
 898
 899            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 900
 901        def trycast_sql(self, expression: exp.TryCast) -> str:
 902            value = expression.this
 903
 904            if value.type is None:
 905                from sqlglot.optimizer.annotate_types import annotate_types
 906
 907                value = annotate_types(value)
 908
 909            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 910                return super().trycast_sql(expression)
 911
 912            # TRY_CAST only works for string values in Snowflake
 913            return self.cast_sql(expression)
 914
 915        def log_sql(self, expression: exp.Log) -> str:
 916            if not expression.expression:
 917                return self.func("LN", expression.this)
 918
 919            return super().log_sql(expression)
 920
 921        def unnest_sql(self, expression: exp.Unnest) -> str:
 922            unnest_alias = expression.args.get("alias")
 923            offset = expression.args.get("offset")
 924
 925            columns = [
 926                exp.to_identifier("seq"),
 927                exp.to_identifier("key"),
 928                exp.to_identifier("path"),
 929                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 930                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 931                or exp.to_identifier("value"),
 932                exp.to_identifier("this"),
 933            ]
 934
 935            if unnest_alias:
 936                unnest_alias.set("columns", columns)
 937            else:
 938                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 939
 940            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 941            alias = self.sql(unnest_alias)
 942            alias = f" AS {alias}" if alias else ""
 943            return f"{explode}{alias}"
 944
 945        def show_sql(self, expression: exp.Show) -> str:
 946            terse = "TERSE " if expression.args.get("terse") else ""
 947            history = " HISTORY" if expression.args.get("history") else ""
 948            like = self.sql(expression, "like")
 949            like = f" LIKE {like}" if like else ""
 950
 951            scope = self.sql(expression, "scope")
 952            scope = f" {scope}" if scope else ""
 953
 954            scope_kind = self.sql(expression, "scope_kind")
 955            if scope_kind:
 956                scope_kind = f" IN {scope_kind}"
 957
 958            starts_with = self.sql(expression, "starts_with")
 959            if starts_with:
 960                starts_with = f" STARTS WITH {starts_with}"
 961
 962            limit = self.sql(expression, "limit")
 963
 964            from_ = self.sql(expression, "from")
 965            if from_:
 966                from_ = f" FROM {from_}"
 967
 968            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 969
 970        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 971            # Other dialects don't support all of the following parameters, so we need to
 972            # generate default values as necessary to ensure the transpilation is correct
 973            group = expression.args.get("group")
 974            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 975            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 976            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 977
 978            return self.func(
 979                "REGEXP_SUBSTR",
 980                expression.this,
 981                expression.expression,
 982                position,
 983                occurrence,
 984                parameters,
 985                group,
 986            )
 987
 988        def except_op(self, expression: exp.Except) -> str:
 989            if not expression.args.get("distinct"):
 990                self.unsupported("EXCEPT with All is not supported in Snowflake")
 991            return super().except_op(expression)
 992
 993        def intersect_op(self, expression: exp.Intersect) -> str:
 994            if not expression.args.get("distinct"):
 995                self.unsupported("INTERSECT with All is not supported in Snowflake")
 996            return super().intersect_op(expression)
 997
 998        def describe_sql(self, expression: exp.Describe) -> str:
 999            # Default to table if kind is unknown
1000            kind_value = expression.args.get("kind") or "TABLE"
1001            kind = f" {kind_value}" if kind_value else ""
1002            this = f" {self.sql(expression, 'this')}"
1003            expressions = self.expressions(expression, flat=True)
1004            expressions = f" {expressions}" if expressions else ""
1005            return f"DESCRIBE{kind}{this}{expressions}"
1006
1007        def generatedasidentitycolumnconstraint_sql(
1008            self, expression: exp.GeneratedAsIdentityColumnConstraint
1009        ) -> str:
1010            start = expression.args.get("start")
1011            start = f" START {start}" if start else ""
1012            increment = expression.args.get("increment")
1013            increment = f" INCREMENT {increment}" if increment else ""
1014            return f"AUTOINCREMENT{start}{increment}"
1015
1016        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1017            this = self.sql(expression, "this")
1018            return f"SWAP WITH {this}"
1019
1020        def with_properties(self, properties: exp.Properties) -> str:
1021            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1022
1023        def cluster_sql(self, expression: exp.Cluster) -> str:
1024            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1025
1026        def struct_sql(self, expression: exp.Struct) -> str:
1027            keys = []
1028            values = []
1029
1030            for i, e in enumerate(expression.expressions):
1031                if isinstance(e, exp.PropertyEQ):
1032                    keys.append(
1033                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1034                    )
1035                    values.append(e.expression)
1036                else:
1037                    keys.append(exp.Literal.string(f"_{i}"))
1038                    values.append(e)
1039
1040            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
 284class Snowflake(Dialect):
 285    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 286    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 287    NULL_ORDERING = "nulls_are_large"
 288    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 289    SUPPORTS_USER_DEFINED_TYPES = False
 290    SUPPORTS_SEMI_ANTI_JOIN = False
 291    PREFER_CTE_ALIAS_COLUMN = True
 292    TABLESAMPLE_SIZE_IS_PERCENT = True
 293
 294    TIME_MAPPING = {
 295        "YYYY": "%Y",
 296        "yyyy": "%Y",
 297        "YY": "%y",
 298        "yy": "%y",
 299        "MMMM": "%B",
 300        "mmmm": "%B",
 301        "MON": "%b",
 302        "mon": "%b",
 303        "MM": "%m",
 304        "mm": "%m",
 305        "DD": "%d",
 306        "dd": "%-d",
 307        "DY": "%a",
 308        "dy": "%w",
 309        "HH24": "%H",
 310        "hh24": "%H",
 311        "HH12": "%I",
 312        "hh12": "%I",
 313        "MI": "%M",
 314        "mi": "%M",
 315        "SS": "%S",
 316        "ss": "%S",
 317        "FF": "%f",
 318        "ff": "%f",
 319        "FF6": "%f",
 320        "ff6": "%f",
 321    }
 322
 323    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 324        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 325        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 326        if (
 327            isinstance(expression, exp.Identifier)
 328            and isinstance(expression.parent, exp.Table)
 329            and expression.name.lower() == "dual"
 330        ):
 331            return expression  # type: ignore
 332
 333        return super().quote_identifier(expression, identify=identify)
 334
 335    class Parser(parser.Parser):
 336        IDENTIFY_PIVOT_STRINGS = True
 337
 338        ID_VAR_TOKENS = {
 339            *parser.Parser.ID_VAR_TOKENS,
 340            TokenType.MATCH_CONDITION,
 341        }
 342
 343        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 344        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 345
 346        FUNCTIONS = {
 347            **parser.Parser.FUNCTIONS,
 348            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 349            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 350            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 351                this=seq_get(args, 1), expression=seq_get(args, 0)
 352            ),
 353            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 354                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 355                start=seq_get(args, 0),
 356                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 357                step=seq_get(args, 2),
 358            ),
 359            "BITXOR": binary_from_function(exp.BitwiseXor),
 360            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 361            "BOOLXOR": binary_from_function(exp.Xor),
 362            "CONVERT_TIMEZONE": _build_convert_timezone,
 363            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 364            "DATE_TRUNC": _date_trunc_to_time,
 365            "DATEADD": _build_date_time_add(exp.DateAdd),
 366            "DATEDIFF": _build_datediff,
 367            "DIV0": _build_if_from_div0,
 368            "FLATTEN": exp.Explode.from_arg_list,
 369            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 370                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 371            ),
 372            "IFF": exp.If.from_arg_list,
 373            "LAST_DAY": lambda args: exp.LastDay(
 374                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 375            ),
 376            "LISTAGG": exp.GroupConcat.from_arg_list,
 377            "MEDIAN": lambda args: exp.PercentileCont(
 378                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 379            ),
 380            "NULLIFZERO": _build_if_from_nullifzero,
 381            "OBJECT_CONSTRUCT": _build_object_construct,
 382            "REGEXP_REPLACE": _build_regexp_replace,
 383            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 384            "RLIKE": exp.RegexpLike.from_arg_list,
 385            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 386            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 387            "TIMEDIFF": _build_datediff,
 388            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 389            "TIMESTAMPDIFF": _build_datediff,
 390            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 391            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 392            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 393            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 394            "TO_NUMBER": lambda args: exp.ToNumber(
 395                this=seq_get(args, 0),
 396                format=seq_get(args, 1),
 397                precision=seq_get(args, 2),
 398                scale=seq_get(args, 3),
 399            ),
 400            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 401            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 402            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 403            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 404            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 405            "TO_VARCHAR": exp.ToChar.from_arg_list,
 406            "ZEROIFNULL": _build_if_from_zeroifnull,
 407        }
 408
 409        FUNCTION_PARSERS = {
 410            **parser.Parser.FUNCTION_PARSERS,
 411            "DATE_PART": lambda self: self._parse_date_part(),
 412            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 413        }
 414        FUNCTION_PARSERS.pop("TRIM")
 415
 416        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 417
 418        RANGE_PARSERS = {
 419            **parser.Parser.RANGE_PARSERS,
 420            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 421            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 422        }
 423
 424        ALTER_PARSERS = {
 425            **parser.Parser.ALTER_PARSERS,
 426            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 427            "UNSET": lambda self: self.expression(
 428                exp.Set,
 429                tag=self._match_text_seq("TAG"),
 430                expressions=self._parse_csv(self._parse_id_var),
 431                unset=True,
 432            ),
 433            "SWAP": lambda self: self._parse_alter_table_swap(),
 434        }
 435
 436        STATEMENT_PARSERS = {
 437            **parser.Parser.STATEMENT_PARSERS,
 438            TokenType.SHOW: lambda self: self._parse_show(),
 439        }
 440
 441        PROPERTY_PARSERS = {
 442            **parser.Parser.PROPERTY_PARSERS,
 443            "LOCATION": lambda self: self._parse_location(),
 444        }
 445
 446        SHOW_PARSERS = {
 447            "SCHEMAS": _show_parser("SCHEMAS"),
 448            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 449            "OBJECTS": _show_parser("OBJECTS"),
 450            "TERSE OBJECTS": _show_parser("OBJECTS"),
 451            "TABLES": _show_parser("TABLES"),
 452            "TERSE TABLES": _show_parser("TABLES"),
 453            "VIEWS": _show_parser("VIEWS"),
 454            "TERSE VIEWS": _show_parser("VIEWS"),
 455            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 456            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 457            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 458            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 459            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 460            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 461            "SEQUENCES": _show_parser("SEQUENCES"),
 462            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 463            "COLUMNS": _show_parser("COLUMNS"),
 464            "USERS": _show_parser("USERS"),
 465            "TERSE USERS": _show_parser("USERS"),
 466        }
 467
 468        STAGED_FILE_SINGLE_TOKENS = {
 469            TokenType.DOT,
 470            TokenType.MOD,
 471            TokenType.SLASH,
 472        }
 473
 474        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 475
 476        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 477
 478        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 479            this = super()._parse_column_ops(this)
 480
 481            casts = []
 482            json_path = []
 483
 484            while self._match(TokenType.COLON):
 485                path = super()._parse_column_ops(self._parse_field(any_token=True))
 486
 487                # The cast :: operator has a lower precedence than the extraction operator :, so
 488                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 489                while isinstance(path, exp.Cast):
 490                    casts.append(path.to)
 491                    path = path.this
 492
 493                if path:
 494                    json_path.append(path.sql(dialect="snowflake", copy=False))
 495
 496            if json_path:
 497                this = self.expression(
 498                    exp.JSONExtract,
 499                    this=this,
 500                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 501                )
 502
 503                while casts:
 504                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 505
 506            return this
 507
 508        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 509        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 510        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 511            this = self._parse_var() or self._parse_type()
 512
 513            if not this:
 514                return None
 515
 516            self._match(TokenType.COMMA)
 517            expression = self._parse_bitwise()
 518            this = _map_date_part(this)
 519            name = this.name.upper()
 520
 521            if name.startswith("EPOCH"):
 522                if name == "EPOCH_MILLISECOND":
 523                    scale = 10**3
 524                elif name == "EPOCH_MICROSECOND":
 525                    scale = 10**6
 526                elif name == "EPOCH_NANOSECOND":
 527                    scale = 10**9
 528                else:
 529                    scale = None
 530
 531                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 532                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 533
 534                if scale:
 535                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 536
 537                return to_unix
 538
 539            return self.expression(exp.Extract, this=this, expression=expression)
 540
 541        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 542            if is_map:
 543                # Keys are strings in Snowflake's objects, see also:
 544                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 545                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 546                return self._parse_slice(self._parse_string())
 547
 548            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 549
 550        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 551            lateral = super()._parse_lateral()
 552            if not lateral:
 553                return lateral
 554
 555            if isinstance(lateral.this, exp.Explode):
 556                table_alias = lateral.args.get("alias")
 557                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 558                if table_alias and not table_alias.args.get("columns"):
 559                    table_alias.set("columns", columns)
 560                elif not table_alias:
 561                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 562
 563            return lateral
 564
 565        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 566            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 567            index = self._index
 568            if self._match_texts(("AT", "BEFORE")):
 569                this = self._prev.text.upper()
 570                kind = (
 571                    self._match(TokenType.L_PAREN)
 572                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 573                    and self._prev.text.upper()
 574                )
 575                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 576
 577                if expression:
 578                    self._match_r_paren()
 579                    when = self.expression(
 580                        exp.HistoricalData, this=this, kind=kind, expression=expression
 581                    )
 582                    table.set("when", when)
 583                else:
 584                    self._retreat(index)
 585
 586            return table
 587
 588        def _parse_table_parts(
 589            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 590        ) -> exp.Table:
 591            # https://docs.snowflake.com/en/user-guide/querying-stage
 592            if self._match(TokenType.STRING, advance=False):
 593                table = self._parse_string()
 594            elif self._match_text_seq("@", advance=False):
 595                table = self._parse_location_path()
 596            else:
 597                table = None
 598
 599            if table:
 600                file_format = None
 601                pattern = None
 602
 603                self._match(TokenType.L_PAREN)
 604                while self._curr and not self._match(TokenType.R_PAREN):
 605                    if self._match_text_seq("FILE_FORMAT", "=>"):
 606                        file_format = self._parse_string() or super()._parse_table_parts(
 607                            is_db_reference=is_db_reference
 608                        )
 609                    elif self._match_text_seq("PATTERN", "=>"):
 610                        pattern = self._parse_string()
 611                    else:
 612                        break
 613
 614                    self._match(TokenType.COMMA)
 615
 616                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 617            else:
 618                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 619
 620            return self._parse_at_before(table)
 621
 622        def _parse_id_var(
 623            self,
 624            any_token: bool = True,
 625            tokens: t.Optional[t.Collection[TokenType]] = None,
 626        ) -> t.Optional[exp.Expression]:
 627            if self._match_text_seq("IDENTIFIER", "("):
 628                identifier = (
 629                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 630                    or self._parse_string()
 631                )
 632                self._match_r_paren()
 633                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 634
 635            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 636
 637        def _parse_show_snowflake(self, this: str) -> exp.Show:
 638            scope = None
 639            scope_kind = None
 640
 641            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 642            # which is syntactically valid but has no effect on the output
 643            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 644
 645            history = self._match_text_seq("HISTORY")
 646
 647            like = self._parse_string() if self._match(TokenType.LIKE) else None
 648
 649            if self._match(TokenType.IN):
 650                if self._match_text_seq("ACCOUNT"):
 651                    scope_kind = "ACCOUNT"
 652                elif self._match_set(self.DB_CREATABLES):
 653                    scope_kind = self._prev.text.upper()
 654                    if self._curr:
 655                        scope = self._parse_table_parts()
 656                elif self._curr:
 657                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 658                    scope = self._parse_table_parts()
 659
 660            return self.expression(
 661                exp.Show,
 662                **{
 663                    "terse": terse,
 664                    "this": this,
 665                    "history": history,
 666                    "like": like,
 667                    "scope": scope,
 668                    "scope_kind": scope_kind,
 669                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 670                    "limit": self._parse_limit(),
 671                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 672                },
 673            )
 674
 675        def _parse_alter_table_swap(self) -> exp.SwapTable:
 676            self._match_text_seq("WITH")
 677            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 678
 679        def _parse_location(self) -> exp.LocationProperty:
 680            self._match(TokenType.EQ)
 681            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 682
 683        def _parse_location_path(self) -> exp.Var:
 684            parts = [self._advance_any(ignore_reserved=True)]
 685
 686            # We avoid consuming a comma token because external tables like @foo and @bar
 687            # can be joined in a query with a comma separator.
 688            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 689                parts.append(self._advance_any(ignore_reserved=True))
 690
 691            return exp.var("".join(part.text for part in parts if part))
 692
 693    class Tokenizer(tokens.Tokenizer):
 694        STRING_ESCAPES = ["\\", "'"]
 695        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 696        RAW_STRINGS = ["$$"]
 697        COMMENTS = ["--", "//", ("/*", "*/")]
 698
 699        KEYWORDS = {
 700            **tokens.Tokenizer.KEYWORDS,
 701            "BYTEINT": TokenType.INT,
 702            "CHAR VARYING": TokenType.VARCHAR,
 703            "CHARACTER VARYING": TokenType.VARCHAR,
 704            "EXCLUDE": TokenType.EXCEPT,
 705            "ILIKE ANY": TokenType.ILIKE_ANY,
 706            "LIKE ANY": TokenType.LIKE_ANY,
 707            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 708            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 709            "MINUS": TokenType.EXCEPT,
 710            "NCHAR VARYING": TokenType.VARCHAR,
 711            "PUT": TokenType.COMMAND,
 712            "REMOVE": TokenType.COMMAND,
 713            "RENAME": TokenType.REPLACE,
 714            "RM": TokenType.COMMAND,
 715            "SAMPLE": TokenType.TABLE_SAMPLE,
 716            "SQL_DOUBLE": TokenType.DOUBLE,
 717            "SQL_VARCHAR": TokenType.VARCHAR,
 718            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 719            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 720            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 721            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 722            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 723            "TOP": TokenType.TOP,
 724        }
 725
 726        SINGLE_TOKENS = {
 727            **tokens.Tokenizer.SINGLE_TOKENS,
 728            "$": TokenType.PARAMETER,
 729        }
 730
 731        VAR_SINGLE_TOKENS = {"$"}
 732
 733        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 734
 735    class Generator(generator.Generator):
 736        PARAMETER_TOKEN = "$"
 737        MATCHED_BY_SOURCE = False
 738        SINGLE_STRING_INTERVAL = True
 739        JOIN_HINTS = False
 740        TABLE_HINTS = False
 741        QUERY_HINTS = False
 742        AGGREGATE_FILTER_SUPPORTED = False
 743        SUPPORTS_TABLE_COPY = False
 744        COLLATE_IS_FUNC = True
 745        LIMIT_ONLY_LITERALS = True
 746        JSON_KEY_VALUE_PAIR_SEP = ","
 747        INSERT_OVERWRITE = " OVERWRITE INTO"
 748        STRUCT_DELIMITER = ("(", ")")
 749
 750        TRANSFORMS = {
 751            **generator.Generator.TRANSFORMS,
 752            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 753            exp.ArgMax: rename_func("MAX_BY"),
 754            exp.ArgMin: rename_func("MIN_BY"),
 755            exp.Array: inline_array_sql,
 756            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 757            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 758            exp.AtTimeZone: lambda self, e: self.func(
 759                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 760            ),
 761            exp.BitwiseXor: rename_func("BITXOR"),
 762            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 763            exp.DateAdd: date_delta_sql("DATEADD"),
 764            exp.DateDiff: date_delta_sql("DATEDIFF"),
 765            exp.DateStrToDate: datestrtodate_sql,
 766            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 767            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 768            exp.DayOfYear: rename_func("DAYOFYEAR"),
 769            exp.Explode: rename_func("FLATTEN"),
 770            exp.Extract: rename_func("DATE_PART"),
 771            exp.FromTimeZone: lambda self, e: self.func(
 772                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 773            ),
 774            exp.GenerateSeries: lambda self, e: self.func(
 775                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 776            ),
 777            exp.GroupConcat: rename_func("LISTAGG"),
 778            exp.If: if_sql(name="IFF", false_value="NULL"),
 779            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 780            exp.JSONExtractScalar: lambda self, e: self.func(
 781                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 782            ),
 783            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 784            exp.JSONPathRoot: lambda *_: "",
 785            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 786            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 787            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 788            exp.Max: max_or_greatest,
 789            exp.Min: min_or_least,
 790            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 791            exp.PercentileCont: transforms.preprocess(
 792                [transforms.add_within_group_for_percentiles]
 793            ),
 794            exp.PercentileDisc: transforms.preprocess(
 795                [transforms.add_within_group_for_percentiles]
 796            ),
 797            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 798            exp.RegexpILike: _regexpilike_sql,
 799            exp.Rand: rename_func("RANDOM"),
 800            exp.Select: transforms.preprocess(
 801                [
 802                    transforms.eliminate_distinct_on,
 803                    transforms.explode_to_unnest(),
 804                    transforms.eliminate_semi_and_anti_joins,
 805                ]
 806            ),
 807            exp.SHA: rename_func("SHA1"),
 808            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 809            exp.StartsWith: rename_func("STARTSWITH"),
 810            exp.StrPosition: lambda self, e: self.func(
 811                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 812            ),
 813            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 814            exp.Stuff: rename_func("INSERT"),
 815            exp.TimeAdd: date_delta_sql("TIMEADD"),
 816            exp.TimestampDiff: lambda self, e: self.func(
 817                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 818            ),
 819            exp.TimestampTrunc: timestamptrunc_sql,
 820            exp.TimeStrToTime: timestrtotime_sql,
 821            exp.TimeToStr: lambda self, e: self.func(
 822                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 823            ),
 824            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 825            exp.ToArray: rename_func("TO_ARRAY"),
 826            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 827            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 828            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 829            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 830            exp.TsOrDsToDate: lambda self, e: self.func(
 831                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 832            ),
 833            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 834            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 835            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 836            exp.Xor: rename_func("BOOLXOR"),
 837        }
 838
 839        SUPPORTED_JSON_PATH_PARTS = {
 840            exp.JSONPathKey,
 841            exp.JSONPathRoot,
 842            exp.JSONPathSubscript,
 843        }
 844
 845        TYPE_MAPPING = {
 846            **generator.Generator.TYPE_MAPPING,
 847            exp.DataType.Type.NESTED: "OBJECT",
 848            exp.DataType.Type.STRUCT: "OBJECT",
 849            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 850        }
 851
 852        STAR_MAPPING = {
 853            "except": "EXCLUDE",
 854            "replace": "RENAME",
 855        }
 856
 857        PROPERTIES_LOCATION = {
 858            **generator.Generator.PROPERTIES_LOCATION,
 859            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 860            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 861        }
 862
 863        UNSUPPORTED_VALUES_EXPRESSIONS = {
 864            exp.Struct,
 865        }
 866
 867        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 868            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 869                values_as_table = False
 870
 871            return super().values_sql(expression, values_as_table=values_as_table)
 872
 873        def datatype_sql(self, expression: exp.DataType) -> str:
 874            expressions = expression.expressions
 875            if (
 876                expressions
 877                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 878                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 879            ):
 880                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 881                return "OBJECT"
 882
 883            return super().datatype_sql(expression)
 884
 885        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 886            return self.func(
 887                "TO_NUMBER",
 888                expression.this,
 889                expression.args.get("format"),
 890                expression.args.get("precision"),
 891                expression.args.get("scale"),
 892            )
 893
 894        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 895            milli = expression.args.get("milli")
 896            if milli is not None:
 897                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 898                expression.set("nano", milli_to_nano)
 899
 900            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 901
 902        def trycast_sql(self, expression: exp.TryCast) -> str:
 903            value = expression.this
 904
 905            if value.type is None:
 906                from sqlglot.optimizer.annotate_types import annotate_types
 907
 908                value = annotate_types(value)
 909
 910            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 911                return super().trycast_sql(expression)
 912
 913            # TRY_CAST only works for string values in Snowflake
 914            return self.cast_sql(expression)
 915
 916        def log_sql(self, expression: exp.Log) -> str:
 917            if not expression.expression:
 918                return self.func("LN", expression.this)
 919
 920            return super().log_sql(expression)
 921
 922        def unnest_sql(self, expression: exp.Unnest) -> str:
 923            unnest_alias = expression.args.get("alias")
 924            offset = expression.args.get("offset")
 925
 926            columns = [
 927                exp.to_identifier("seq"),
 928                exp.to_identifier("key"),
 929                exp.to_identifier("path"),
 930                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 931                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 932                or exp.to_identifier("value"),
 933                exp.to_identifier("this"),
 934            ]
 935
 936            if unnest_alias:
 937                unnest_alias.set("columns", columns)
 938            else:
 939                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 940
 941            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 942            alias = self.sql(unnest_alias)
 943            alias = f" AS {alias}" if alias else ""
 944            return f"{explode}{alias}"
 945
 946        def show_sql(self, expression: exp.Show) -> str:
 947            terse = "TERSE " if expression.args.get("terse") else ""
 948            history = " HISTORY" if expression.args.get("history") else ""
 949            like = self.sql(expression, "like")
 950            like = f" LIKE {like}" if like else ""
 951
 952            scope = self.sql(expression, "scope")
 953            scope = f" {scope}" if scope else ""
 954
 955            scope_kind = self.sql(expression, "scope_kind")
 956            if scope_kind:
 957                scope_kind = f" IN {scope_kind}"
 958
 959            starts_with = self.sql(expression, "starts_with")
 960            if starts_with:
 961                starts_with = f" STARTS WITH {starts_with}"
 962
 963            limit = self.sql(expression, "limit")
 964
 965            from_ = self.sql(expression, "from")
 966            if from_:
 967                from_ = f" FROM {from_}"
 968
 969            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 970
 971        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 972            # Other dialects don't support all of the following parameters, so we need to
 973            # generate default values as necessary to ensure the transpilation is correct
 974            group = expression.args.get("group")
 975            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 976            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 977            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 978
 979            return self.func(
 980                "REGEXP_SUBSTR",
 981                expression.this,
 982                expression.expression,
 983                position,
 984                occurrence,
 985                parameters,
 986                group,
 987            )
 988
 989        def except_op(self, expression: exp.Except) -> str:
 990            if not expression.args.get("distinct"):
 991                self.unsupported("EXCEPT with All is not supported in Snowflake")
 992            return super().except_op(expression)
 993
 994        def intersect_op(self, expression: exp.Intersect) -> str:
 995            if not expression.args.get("distinct"):
 996                self.unsupported("INTERSECT with All is not supported in Snowflake")
 997            return super().intersect_op(expression)
 998
 999        def describe_sql(self, expression: exp.Describe) -> str:
1000            # Default to table if kind is unknown
1001            kind_value = expression.args.get("kind") or "TABLE"
1002            kind = f" {kind_value}" if kind_value else ""
1003            this = f" {self.sql(expression, 'this')}"
1004            expressions = self.expressions(expression, flat=True)
1005            expressions = f" {expressions}" if expressions else ""
1006            return f"DESCRIBE{kind}{this}{expressions}"
1007
1008        def generatedasidentitycolumnconstraint_sql(
1009            self, expression: exp.GeneratedAsIdentityColumnConstraint
1010        ) -> str:
1011            start = expression.args.get("start")
1012            start = f" START {start}" if start else ""
1013            increment = expression.args.get("increment")
1014            increment = f" INCREMENT {increment}" if increment else ""
1015            return f"AUTOINCREMENT{start}{increment}"
1016
1017        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1018            this = self.sql(expression, "this")
1019            return f"SWAP WITH {this}"
1020
1021        def with_properties(self, properties: exp.Properties) -> str:
1022            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1023
1024        def cluster_sql(self, expression: exp.Cluster) -> str:
1025            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1026
1027        def struct_sql(self, expression: exp.Struct) -> str:
1028            keys = []
1029            values = []
1030
1031            for i, e in enumerate(expression.expressions):
1032                if isinstance(e, exp.PropertyEQ):
1033                    keys.append(
1034                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1035                    )
1036                    values.append(e.expression)
1037                else:
1038                    keys.append(exp.Literal.string(f"_{i}"))
1039                    values.append(e)
1040
1041            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
323    def quote_identifier(self, expression: E, identify: bool = True) -> E:
324        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
325        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
326        if (
327            isinstance(expression, exp.Identifier)
328            and isinstance(expression.parent, exp.Table)
329            and expression.name.lower() == "dual"
330        ):
331            return expression  # type: ignore
332
333        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
335    class Parser(parser.Parser):
336        IDENTIFY_PIVOT_STRINGS = True
337
338        ID_VAR_TOKENS = {
339            *parser.Parser.ID_VAR_TOKENS,
340            TokenType.MATCH_CONDITION,
341        }
342
343        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
344        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
345
346        FUNCTIONS = {
347            **parser.Parser.FUNCTIONS,
348            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
349            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
350            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
351                this=seq_get(args, 1), expression=seq_get(args, 0)
352            ),
353            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
354                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
355                start=seq_get(args, 0),
356                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
357                step=seq_get(args, 2),
358            ),
359            "BITXOR": binary_from_function(exp.BitwiseXor),
360            "BIT_XOR": binary_from_function(exp.BitwiseXor),
361            "BOOLXOR": binary_from_function(exp.Xor),
362            "CONVERT_TIMEZONE": _build_convert_timezone,
363            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
364            "DATE_TRUNC": _date_trunc_to_time,
365            "DATEADD": _build_date_time_add(exp.DateAdd),
366            "DATEDIFF": _build_datediff,
367            "DIV0": _build_if_from_div0,
368            "FLATTEN": exp.Explode.from_arg_list,
369            "GET_PATH": lambda args, dialect: exp.JSONExtract(
370                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
371            ),
372            "IFF": exp.If.from_arg_list,
373            "LAST_DAY": lambda args: exp.LastDay(
374                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
375            ),
376            "LISTAGG": exp.GroupConcat.from_arg_list,
377            "MEDIAN": lambda args: exp.PercentileCont(
378                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
379            ),
380            "NULLIFZERO": _build_if_from_nullifzero,
381            "OBJECT_CONSTRUCT": _build_object_construct,
382            "REGEXP_REPLACE": _build_regexp_replace,
383            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
384            "RLIKE": exp.RegexpLike.from_arg_list,
385            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
386            "TIMEADD": _build_date_time_add(exp.TimeAdd),
387            "TIMEDIFF": _build_datediff,
388            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
389            "TIMESTAMPDIFF": _build_datediff,
390            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
391            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
392            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
393            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
394            "TO_NUMBER": lambda args: exp.ToNumber(
395                this=seq_get(args, 0),
396                format=seq_get(args, 1),
397                precision=seq_get(args, 2),
398                scale=seq_get(args, 3),
399            ),
400            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
401            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
402            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
403            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
404            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
405            "TO_VARCHAR": exp.ToChar.from_arg_list,
406            "ZEROIFNULL": _build_if_from_zeroifnull,
407        }
408
409        FUNCTION_PARSERS = {
410            **parser.Parser.FUNCTION_PARSERS,
411            "DATE_PART": lambda self: self._parse_date_part(),
412            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
413        }
414        FUNCTION_PARSERS.pop("TRIM")
415
416        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
417
418        RANGE_PARSERS = {
419            **parser.Parser.RANGE_PARSERS,
420            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
421            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
422        }
423
424        ALTER_PARSERS = {
425            **parser.Parser.ALTER_PARSERS,
426            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
427            "UNSET": lambda self: self.expression(
428                exp.Set,
429                tag=self._match_text_seq("TAG"),
430                expressions=self._parse_csv(self._parse_id_var),
431                unset=True,
432            ),
433            "SWAP": lambda self: self._parse_alter_table_swap(),
434        }
435
436        STATEMENT_PARSERS = {
437            **parser.Parser.STATEMENT_PARSERS,
438            TokenType.SHOW: lambda self: self._parse_show(),
439        }
440
441        PROPERTY_PARSERS = {
442            **parser.Parser.PROPERTY_PARSERS,
443            "LOCATION": lambda self: self._parse_location(),
444        }
445
446        SHOW_PARSERS = {
447            "SCHEMAS": _show_parser("SCHEMAS"),
448            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
449            "OBJECTS": _show_parser("OBJECTS"),
450            "TERSE OBJECTS": _show_parser("OBJECTS"),
451            "TABLES": _show_parser("TABLES"),
452            "TERSE TABLES": _show_parser("TABLES"),
453            "VIEWS": _show_parser("VIEWS"),
454            "TERSE VIEWS": _show_parser("VIEWS"),
455            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
456            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
457            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
458            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
459            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
460            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
461            "SEQUENCES": _show_parser("SEQUENCES"),
462            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
463            "COLUMNS": _show_parser("COLUMNS"),
464            "USERS": _show_parser("USERS"),
465            "TERSE USERS": _show_parser("USERS"),
466        }
467
468        STAGED_FILE_SINGLE_TOKENS = {
469            TokenType.DOT,
470            TokenType.MOD,
471            TokenType.SLASH,
472        }
473
474        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
475
476        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
477
478        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
479            this = super()._parse_column_ops(this)
480
481            casts = []
482            json_path = []
483
484            while self._match(TokenType.COLON):
485                path = super()._parse_column_ops(self._parse_field(any_token=True))
486
487                # The cast :: operator has a lower precedence than the extraction operator :, so
488                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
489                while isinstance(path, exp.Cast):
490                    casts.append(path.to)
491                    path = path.this
492
493                if path:
494                    json_path.append(path.sql(dialect="snowflake", copy=False))
495
496            if json_path:
497                this = self.expression(
498                    exp.JSONExtract,
499                    this=this,
500                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
501                )
502
503                while casts:
504                    this = self.expression(exp.Cast, this=this, to=casts.pop())
505
506            return this
507
508        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
509        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
510        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
511            this = self._parse_var() or self._parse_type()
512
513            if not this:
514                return None
515
516            self._match(TokenType.COMMA)
517            expression = self._parse_bitwise()
518            this = _map_date_part(this)
519            name = this.name.upper()
520
521            if name.startswith("EPOCH"):
522                if name == "EPOCH_MILLISECOND":
523                    scale = 10**3
524                elif name == "EPOCH_MICROSECOND":
525                    scale = 10**6
526                elif name == "EPOCH_NANOSECOND":
527                    scale = 10**9
528                else:
529                    scale = None
530
531                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
532                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
533
534                if scale:
535                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
536
537                return to_unix
538
539            return self.expression(exp.Extract, this=this, expression=expression)
540
541        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
542            if is_map:
543                # Keys are strings in Snowflake's objects, see also:
544                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
545                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
546                return self._parse_slice(self._parse_string())
547
548            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
549
550        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
551            lateral = super()._parse_lateral()
552            if not lateral:
553                return lateral
554
555            if isinstance(lateral.this, exp.Explode):
556                table_alias = lateral.args.get("alias")
557                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
558                if table_alias and not table_alias.args.get("columns"):
559                    table_alias.set("columns", columns)
560                elif not table_alias:
561                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
562
563            return lateral
564
565        def _parse_at_before(self, table: exp.Table) -> exp.Table:
566            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
567            index = self._index
568            if self._match_texts(("AT", "BEFORE")):
569                this = self._prev.text.upper()
570                kind = (
571                    self._match(TokenType.L_PAREN)
572                    and self._match_texts(self.HISTORICAL_DATA_KIND)
573                    and self._prev.text.upper()
574                )
575                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
576
577                if expression:
578                    self._match_r_paren()
579                    when = self.expression(
580                        exp.HistoricalData, this=this, kind=kind, expression=expression
581                    )
582                    table.set("when", when)
583                else:
584                    self._retreat(index)
585
586            return table
587
588        def _parse_table_parts(
589            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
590        ) -> exp.Table:
591            # https://docs.snowflake.com/en/user-guide/querying-stage
592            if self._match(TokenType.STRING, advance=False):
593                table = self._parse_string()
594            elif self._match_text_seq("@", advance=False):
595                table = self._parse_location_path()
596            else:
597                table = None
598
599            if table:
600                file_format = None
601                pattern = None
602
603                self._match(TokenType.L_PAREN)
604                while self._curr and not self._match(TokenType.R_PAREN):
605                    if self._match_text_seq("FILE_FORMAT", "=>"):
606                        file_format = self._parse_string() or super()._parse_table_parts(
607                            is_db_reference=is_db_reference
608                        )
609                    elif self._match_text_seq("PATTERN", "=>"):
610                        pattern = self._parse_string()
611                    else:
612                        break
613
614                    self._match(TokenType.COMMA)
615
616                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
617            else:
618                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
619
620            return self._parse_at_before(table)
621
622        def _parse_id_var(
623            self,
624            any_token: bool = True,
625            tokens: t.Optional[t.Collection[TokenType]] = None,
626        ) -> t.Optional[exp.Expression]:
627            if self._match_text_seq("IDENTIFIER", "("):
628                identifier = (
629                    super()._parse_id_var(any_token=any_token, tokens=tokens)
630                    or self._parse_string()
631                )
632                self._match_r_paren()
633                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
634
635            return super()._parse_id_var(any_token=any_token, tokens=tokens)
636
637        def _parse_show_snowflake(self, this: str) -> exp.Show:
638            scope = None
639            scope_kind = None
640
641            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
642            # which is syntactically valid but has no effect on the output
643            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
644
645            history = self._match_text_seq("HISTORY")
646
647            like = self._parse_string() if self._match(TokenType.LIKE) else None
648
649            if self._match(TokenType.IN):
650                if self._match_text_seq("ACCOUNT"):
651                    scope_kind = "ACCOUNT"
652                elif self._match_set(self.DB_CREATABLES):
653                    scope_kind = self._prev.text.upper()
654                    if self._curr:
655                        scope = self._parse_table_parts()
656                elif self._curr:
657                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
658                    scope = self._parse_table_parts()
659
660            return self.expression(
661                exp.Show,
662                **{
663                    "terse": terse,
664                    "this": this,
665                    "history": history,
666                    "like": like,
667                    "scope": scope,
668                    "scope_kind": scope_kind,
669                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
670                    "limit": self._parse_limit(),
671                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
672                },
673            )
674
675        def _parse_alter_table_swap(self) -> exp.SwapTable:
676            self._match_text_seq("WITH")
677            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
678
679        def _parse_location(self) -> exp.LocationProperty:
680            self._match(TokenType.EQ)
681            return self.expression(exp.LocationProperty, this=self._parse_location_path())
682
683        def _parse_location_path(self) -> exp.Var:
684            parts = [self._advance_any(ignore_reserved=True)]
685
686            # We avoid consuming a comma token because external tables like @foo and @bar
687            # can be joined in a query with a comma separator.
688            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
689                parts.append(self._advance_any(ignore_reserved=True))
690
691            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
ID_VAR_TOKENS = {<TokenType.BIGINT: 'BIGINT'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ROW: 'ROW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.MONEY: 'MONEY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CASE: 'CASE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.ANTI: 'ANTI'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.FINAL: 'FINAL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.MODEL: 'MODEL'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DESC: 'DESC'>, <TokenType.IS: 'IS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.FULL: 'FULL'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USE: 'USE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.KILL: 'KILL'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.SOME: 'SOME'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ROWS: 'ROWS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.INT128: 'INT128'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UINT: 'UINT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.XML: 'XML'>, <TokenType.ASC: 'ASC'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ASOF: 'ASOF'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.TIME: 'TIME'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INT256: 'INT256'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.VAR: 'VAR'>, <TokenType.APPLY: 'APPLY'>, <TokenType.BIT: 'BIT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.NULL: 'NULL'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.MAP: 'MAP'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DIV: 'DIV'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SET: 'SET'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT: 'INT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SUPER: 'SUPER'>, <TokenType.FILTER: 'FILTER'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ANY: 'ANY'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.DATE: 'DATE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.ALL: 'ALL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UUID: 'UUID'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.END: 'END'>, <TokenType.JSON: 'JSON'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.LEFT: 'LEFT'>}
TABLE_ALIAS_TOKENS = {<TokenType.BIGINT: 'BIGINT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.ROW: 'ROW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.MONEY: 'MONEY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT128: 'UINT128'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CASE: 'CASE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.ANTI: 'ANTI'>, <TokenType.FINAL: 'FINAL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.MODEL: 'MODEL'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DESC: 'DESC'>, <TokenType.IS: 'IS'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USE: 'USE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.KILL: 'KILL'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.SOME: 'SOME'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ROWS: 'ROWS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.INT128: 'INT128'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UINT: 'UINT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.XML: 'XML'>, <TokenType.ASC: 'ASC'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TIME: 'TIME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INT256: 'INT256'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.VAR: 'VAR'>, <TokenType.BIT: 'BIT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.NULL: 'NULL'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.MAP: 'MAP'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DIV: 'DIV'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.SET: 'SET'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.INT: 'INT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SUPER: 'SUPER'>, <TokenType.FILTER: 'FILTER'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ANY: 'ANY'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.DATE: 'DATE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.ALL: 'ALL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UUID: 'UUID'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.END: 'END'>, <TokenType.JSON: 'JSON'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SHOW: 'SHOW'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function Parser.<lambda>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'UNIQUE KEYS', 'SEQUENCES', 'OBJECTS', 'VIEWS', 'TABLES', 'IMPORTED KEYS'}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
INTERVAL_VARS
ALIAS_TOKENS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
CONSTRAINT_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
693    class Tokenizer(tokens.Tokenizer):
694        STRING_ESCAPES = ["\\", "'"]
695        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
696        RAW_STRINGS = ["$$"]
697        COMMENTS = ["--", "//", ("/*", "*/")]
698
699        KEYWORDS = {
700            **tokens.Tokenizer.KEYWORDS,
701            "BYTEINT": TokenType.INT,
702            "CHAR VARYING": TokenType.VARCHAR,
703            "CHARACTER VARYING": TokenType.VARCHAR,
704            "EXCLUDE": TokenType.EXCEPT,
705            "ILIKE ANY": TokenType.ILIKE_ANY,
706            "LIKE ANY": TokenType.LIKE_ANY,
707            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
708            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
709            "MINUS": TokenType.EXCEPT,
710            "NCHAR VARYING": TokenType.VARCHAR,
711            "PUT": TokenType.COMMAND,
712            "REMOVE": TokenType.COMMAND,
713            "RENAME": TokenType.REPLACE,
714            "RM": TokenType.COMMAND,
715            "SAMPLE": TokenType.TABLE_SAMPLE,
716            "SQL_DOUBLE": TokenType.DOUBLE,
717            "SQL_VARCHAR": TokenType.VARCHAR,
718            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
719            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
720            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
721            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
722            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
723            "TOP": TokenType.TOP,
724        }
725
726        SINGLE_TOKENS = {
727            **tokens.Tokenizer.SINGLE_TOKENS,
728            "$": TokenType.PARAMETER,
729        }
730
731        VAR_SINGLE_TOKENS = {"$"}
732
733        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 735    class Generator(generator.Generator):
 736        PARAMETER_TOKEN = "$"
 737        MATCHED_BY_SOURCE = False
 738        SINGLE_STRING_INTERVAL = True
 739        JOIN_HINTS = False
 740        TABLE_HINTS = False
 741        QUERY_HINTS = False
 742        AGGREGATE_FILTER_SUPPORTED = False
 743        SUPPORTS_TABLE_COPY = False
 744        COLLATE_IS_FUNC = True
 745        LIMIT_ONLY_LITERALS = True
 746        JSON_KEY_VALUE_PAIR_SEP = ","
 747        INSERT_OVERWRITE = " OVERWRITE INTO"
 748        STRUCT_DELIMITER = ("(", ")")
 749
 750        TRANSFORMS = {
 751            **generator.Generator.TRANSFORMS,
 752            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 753            exp.ArgMax: rename_func("MAX_BY"),
 754            exp.ArgMin: rename_func("MIN_BY"),
 755            exp.Array: inline_array_sql,
 756            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 757            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 758            exp.AtTimeZone: lambda self, e: self.func(
 759                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 760            ),
 761            exp.BitwiseXor: rename_func("BITXOR"),
 762            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 763            exp.DateAdd: date_delta_sql("DATEADD"),
 764            exp.DateDiff: date_delta_sql("DATEDIFF"),
 765            exp.DateStrToDate: datestrtodate_sql,
 766            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 767            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 768            exp.DayOfYear: rename_func("DAYOFYEAR"),
 769            exp.Explode: rename_func("FLATTEN"),
 770            exp.Extract: rename_func("DATE_PART"),
 771            exp.FromTimeZone: lambda self, e: self.func(
 772                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 773            ),
 774            exp.GenerateSeries: lambda self, e: self.func(
 775                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 776            ),
 777            exp.GroupConcat: rename_func("LISTAGG"),
 778            exp.If: if_sql(name="IFF", false_value="NULL"),
 779            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 780            exp.JSONExtractScalar: lambda self, e: self.func(
 781                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 782            ),
 783            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 784            exp.JSONPathRoot: lambda *_: "",
 785            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 786            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 787            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 788            exp.Max: max_or_greatest,
 789            exp.Min: min_or_least,
 790            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 791            exp.PercentileCont: transforms.preprocess(
 792                [transforms.add_within_group_for_percentiles]
 793            ),
 794            exp.PercentileDisc: transforms.preprocess(
 795                [transforms.add_within_group_for_percentiles]
 796            ),
 797            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 798            exp.RegexpILike: _regexpilike_sql,
 799            exp.Rand: rename_func("RANDOM"),
 800            exp.Select: transforms.preprocess(
 801                [
 802                    transforms.eliminate_distinct_on,
 803                    transforms.explode_to_unnest(),
 804                    transforms.eliminate_semi_and_anti_joins,
 805                ]
 806            ),
 807            exp.SHA: rename_func("SHA1"),
 808            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 809            exp.StartsWith: rename_func("STARTSWITH"),
 810            exp.StrPosition: lambda self, e: self.func(
 811                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 812            ),
 813            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 814            exp.Stuff: rename_func("INSERT"),
 815            exp.TimeAdd: date_delta_sql("TIMEADD"),
 816            exp.TimestampDiff: lambda self, e: self.func(
 817                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 818            ),
 819            exp.TimestampTrunc: timestamptrunc_sql,
 820            exp.TimeStrToTime: timestrtotime_sql,
 821            exp.TimeToStr: lambda self, e: self.func(
 822                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 823            ),
 824            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 825            exp.ToArray: rename_func("TO_ARRAY"),
 826            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 827            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 828            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 829            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 830            exp.TsOrDsToDate: lambda self, e: self.func(
 831                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 832            ),
 833            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 834            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 835            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 836            exp.Xor: rename_func("BOOLXOR"),
 837        }
 838
 839        SUPPORTED_JSON_PATH_PARTS = {
 840            exp.JSONPathKey,
 841            exp.JSONPathRoot,
 842            exp.JSONPathSubscript,
 843        }
 844
 845        TYPE_MAPPING = {
 846            **generator.Generator.TYPE_MAPPING,
 847            exp.DataType.Type.NESTED: "OBJECT",
 848            exp.DataType.Type.STRUCT: "OBJECT",
 849            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 850        }
 851
 852        STAR_MAPPING = {
 853            "except": "EXCLUDE",
 854            "replace": "RENAME",
 855        }
 856
 857        PROPERTIES_LOCATION = {
 858            **generator.Generator.PROPERTIES_LOCATION,
 859            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 860            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 861        }
 862
 863        UNSUPPORTED_VALUES_EXPRESSIONS = {
 864            exp.Struct,
 865        }
 866
 867        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 868            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 869                values_as_table = False
 870
 871            return super().values_sql(expression, values_as_table=values_as_table)
 872
 873        def datatype_sql(self, expression: exp.DataType) -> str:
 874            expressions = expression.expressions
 875            if (
 876                expressions
 877                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 878                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 879            ):
 880                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 881                return "OBJECT"
 882
 883            return super().datatype_sql(expression)
 884
 885        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 886            return self.func(
 887                "TO_NUMBER",
 888                expression.this,
 889                expression.args.get("format"),
 890                expression.args.get("precision"),
 891                expression.args.get("scale"),
 892            )
 893
 894        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 895            milli = expression.args.get("milli")
 896            if milli is not None:
 897                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 898                expression.set("nano", milli_to_nano)
 899
 900            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 901
 902        def trycast_sql(self, expression: exp.TryCast) -> str:
 903            value = expression.this
 904
 905            if value.type is None:
 906                from sqlglot.optimizer.annotate_types import annotate_types
 907
 908                value = annotate_types(value)
 909
 910            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 911                return super().trycast_sql(expression)
 912
 913            # TRY_CAST only works for string values in Snowflake
 914            return self.cast_sql(expression)
 915
 916        def log_sql(self, expression: exp.Log) -> str:
 917            if not expression.expression:
 918                return self.func("LN", expression.this)
 919
 920            return super().log_sql(expression)
 921
 922        def unnest_sql(self, expression: exp.Unnest) -> str:
 923            unnest_alias = expression.args.get("alias")
 924            offset = expression.args.get("offset")
 925
 926            columns = [
 927                exp.to_identifier("seq"),
 928                exp.to_identifier("key"),
 929                exp.to_identifier("path"),
 930                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 931                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 932                or exp.to_identifier("value"),
 933                exp.to_identifier("this"),
 934            ]
 935
 936            if unnest_alias:
 937                unnest_alias.set("columns", columns)
 938            else:
 939                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 940
 941            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 942            alias = self.sql(unnest_alias)
 943            alias = f" AS {alias}" if alias else ""
 944            return f"{explode}{alias}"
 945
 946        def show_sql(self, expression: exp.Show) -> str:
 947            terse = "TERSE " if expression.args.get("terse") else ""
 948            history = " HISTORY" if expression.args.get("history") else ""
 949            like = self.sql(expression, "like")
 950            like = f" LIKE {like}" if like else ""
 951
 952            scope = self.sql(expression, "scope")
 953            scope = f" {scope}" if scope else ""
 954
 955            scope_kind = self.sql(expression, "scope_kind")
 956            if scope_kind:
 957                scope_kind = f" IN {scope_kind}"
 958
 959            starts_with = self.sql(expression, "starts_with")
 960            if starts_with:
 961                starts_with = f" STARTS WITH {starts_with}"
 962
 963            limit = self.sql(expression, "limit")
 964
 965            from_ = self.sql(expression, "from")
 966            if from_:
 967                from_ = f" FROM {from_}"
 968
 969            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 970
 971        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 972            # Other dialects don't support all of the following parameters, so we need to
 973            # generate default values as necessary to ensure the transpilation is correct
 974            group = expression.args.get("group")
 975            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 976            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 977            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 978
 979            return self.func(
 980                "REGEXP_SUBSTR",
 981                expression.this,
 982                expression.expression,
 983                position,
 984                occurrence,
 985                parameters,
 986                group,
 987            )
 988
 989        def except_op(self, expression: exp.Except) -> str:
 990            if not expression.args.get("distinct"):
 991                self.unsupported("EXCEPT with All is not supported in Snowflake")
 992            return super().except_op(expression)
 993
 994        def intersect_op(self, expression: exp.Intersect) -> str:
 995            if not expression.args.get("distinct"):
 996                self.unsupported("INTERSECT with All is not supported in Snowflake")
 997            return super().intersect_op(expression)
 998
 999        def describe_sql(self, expression: exp.Describe) -> str:
1000            # Default to table if kind is unknown
1001            kind_value = expression.args.get("kind") or "TABLE"
1002            kind = f" {kind_value}" if kind_value else ""
1003            this = f" {self.sql(expression, 'this')}"
1004            expressions = self.expressions(expression, flat=True)
1005            expressions = f" {expressions}" if expressions else ""
1006            return f"DESCRIBE{kind}{this}{expressions}"
1007
1008        def generatedasidentitycolumnconstraint_sql(
1009            self, expression: exp.GeneratedAsIdentityColumnConstraint
1010        ) -> str:
1011            start = expression.args.get("start")
1012            start = f" START {start}" if start else ""
1013            increment = expression.args.get("increment")
1014            increment = f" INCREMENT {increment}" if increment else ""
1015            return f"AUTOINCREMENT{start}{increment}"
1016
1017        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1018            this = self.sql(expression, "this")
1019            return f"SWAP WITH {this}"
1020
1021        def with_properties(self, properties: exp.Properties) -> str:
1022            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1023
1024        def cluster_sql(self, expression: exp.Cluster) -> str:
1025            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1026
1027        def struct_sql(self, expression: exp.Struct) -> str:
1028            keys = []
1029            values = []
1030
1031            for i, e in enumerate(expression.expressions):
1032                if isinstance(e, exp.PropertyEQ):
1033                    keys.append(
1034                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1035                    )
1036                    values.append(e.expression)
1037                else:
1038                    keys.append(exp.Literal.string(f"_{i}"))
1039                    values.append(e)
1040
1041            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Struct'>}
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
867        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
868            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
869                values_as_table = False
870
871            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
873        def datatype_sql(self, expression: exp.DataType) -> str:
874            expressions = expression.expressions
875            if (
876                expressions
877                and expression.is_type(*exp.DataType.STRUCT_TYPES)
878                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
879            ):
880                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
881                return "OBJECT"
882
883            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
885        def tonumber_sql(self, expression: exp.ToNumber) -> str:
886            return self.func(
887                "TO_NUMBER",
888                expression.this,
889                expression.args.get("format"),
890                expression.args.get("precision"),
891                expression.args.get("scale"),
892            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
894        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
895            milli = expression.args.get("milli")
896            if milli is not None:
897                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
898                expression.set("nano", milli_to_nano)
899
900            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
902        def trycast_sql(self, expression: exp.TryCast) -> str:
903            value = expression.this
904
905            if value.type is None:
906                from sqlglot.optimizer.annotate_types import annotate_types
907
908                value = annotate_types(value)
909
910            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
911                return super().trycast_sql(expression)
912
913            # TRY_CAST only works for string values in Snowflake
914            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
916        def log_sql(self, expression: exp.Log) -> str:
917            if not expression.expression:
918                return self.func("LN", expression.this)
919
920            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
922        def unnest_sql(self, expression: exp.Unnest) -> str:
923            unnest_alias = expression.args.get("alias")
924            offset = expression.args.get("offset")
925
926            columns = [
927                exp.to_identifier("seq"),
928                exp.to_identifier("key"),
929                exp.to_identifier("path"),
930                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
931                seq_get(unnest_alias.columns if unnest_alias else [], 0)
932                or exp.to_identifier("value"),
933                exp.to_identifier("this"),
934            ]
935
936            if unnest_alias:
937                unnest_alias.set("columns", columns)
938            else:
939                unnest_alias = exp.TableAlias(this="_u", columns=columns)
940
941            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
942            alias = self.sql(unnest_alias)
943            alias = f" AS {alias}" if alias else ""
944            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
946        def show_sql(self, expression: exp.Show) -> str:
947            terse = "TERSE " if expression.args.get("terse") else ""
948            history = " HISTORY" if expression.args.get("history") else ""
949            like = self.sql(expression, "like")
950            like = f" LIKE {like}" if like else ""
951
952            scope = self.sql(expression, "scope")
953            scope = f" {scope}" if scope else ""
954
955            scope_kind = self.sql(expression, "scope_kind")
956            if scope_kind:
957                scope_kind = f" IN {scope_kind}"
958
959            starts_with = self.sql(expression, "starts_with")
960            if starts_with:
961                starts_with = f" STARTS WITH {starts_with}"
962
963            limit = self.sql(expression, "limit")
964
965            from_ = self.sql(expression, "from")
966            if from_:
967                from_ = f" FROM {from_}"
968
969            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
971        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
972            # Other dialects don't support all of the following parameters, so we need to
973            # generate default values as necessary to ensure the transpilation is correct
974            group = expression.args.get("group")
975            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
976            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
977            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
978
979            return self.func(
980                "REGEXP_SUBSTR",
981                expression.this,
982                expression.expression,
983                position,
984                occurrence,
985                parameters,
986                group,
987            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
989        def except_op(self, expression: exp.Except) -> str:
990            if not expression.args.get("distinct"):
991                self.unsupported("EXCEPT with All is not supported in Snowflake")
992            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
994        def intersect_op(self, expression: exp.Intersect) -> str:
995            if not expression.args.get("distinct"):
996                self.unsupported("INTERSECT with All is not supported in Snowflake")
997            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
 999        def describe_sql(self, expression: exp.Describe) -> str:
1000            # Default to table if kind is unknown
1001            kind_value = expression.args.get("kind") or "TABLE"
1002            kind = f" {kind_value}" if kind_value else ""
1003            this = f" {self.sql(expression, 'this')}"
1004            expressions = self.expressions(expression, flat=True)
1005            expressions = f" {expressions}" if expressions else ""
1006            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1008        def generatedasidentitycolumnconstraint_sql(
1009            self, expression: exp.GeneratedAsIdentityColumnConstraint
1010        ) -> str:
1011            start = expression.args.get("start")
1012            start = f" START {start}" if start else ""
1013            increment = expression.args.get("increment")
1014            increment = f" INCREMENT {increment}" if increment else ""
1015            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
1017        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1018            this = self.sql(expression, "this")
1019            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1021        def with_properties(self, properties: exp.Properties) -> str:
1022            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1024        def cluster_sql(self, expression: exp.Cluster) -> str:
1025            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1027        def struct_sql(self, expression: exp.Struct) -> str:
1028            keys = []
1029            values = []
1030
1031            for i, e in enumerate(expression.expressions):
1032                if isinstance(e, exp.PropertyEQ):
1033                    keys.append(
1034                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1035                    )
1036                    values.append(e.expression)
1037                else:
1038                    keys.append(exp.Literal.string(f"_{i}"))
1039                    values.append(e)
1040
1041            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
SELECT_KINDS: Tuple[str, ...] = ()
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
OUTER_UNION_MODIFIERS
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql