Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return exp.cast(value, kind)
  45
  46            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  47            # cases so we can transpile them, since they're relatively common
  48            if kind == exp.DataType.Type.TIMESTAMP:
  49                if int_value:
  50                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  51                if not is_float(value.this):
  52                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  53
  54        if kind == exp.DataType.Type.DATE and not int_value:
  55            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  56            formatted_exp.set("safe", safe)
  57            return formatted_exp
  58
  59        return exp.Anonymous(this=name, expressions=args)
  60
  61    return _builder
  62
  63
  64def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  65    expression = parser.build_var_map(args)
  66
  67    if isinstance(expression, exp.StarMap):
  68        return expression
  69
  70    return exp.Struct(
  71        expressions=[
  72            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  73        ]
  74    )
  75
  76
  77def _build_datediff(args: t.List) -> exp.DateDiff:
  78    return exp.DateDiff(
  79        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  80    )
  81
  82
  83def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  84    def _builder(args: t.List) -> E:
  85        return expr_type(
  86            this=seq_get(args, 2),
  87            expression=seq_get(args, 1),
  88            unit=map_date_part(seq_get(args, 0)),
  89        )
  90
  91    return _builder
  92
  93
  94# https://docs.snowflake.com/en/sql-reference/functions/div0
  95def _build_if_from_div0(args: t.List) -> exp.If:
  96    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
  97        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
  98    )
  99    true = exp.Literal.number(0)
 100    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 101    return exp.If(this=cond, true=true, false=false)
 102
 103
 104# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 105def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 106    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 107    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 108
 109
 110# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 111def _build_if_from_nullifzero(args: t.List) -> exp.If:
 112    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 113    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 114
 115
 116def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 117    flag = expression.text("flag")
 118
 119    if "i" not in flag:
 120        flag += "i"
 121
 122    return self.func(
 123        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 124    )
 125
 126
 127def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 128    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 129
 130    if not regexp_replace.args.get("replacement"):
 131        regexp_replace.set("replacement", exp.Literal.string(""))
 132
 133    return regexp_replace
 134
 135
 136def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 137    def _parse(self: Snowflake.Parser) -> exp.Show:
 138        return self._parse_show_snowflake(*args, **kwargs)
 139
 140    return _parse
 141
 142
 143def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 144    trunc = date_trunc_to_time(args)
 145    trunc.set("unit", map_date_part(trunc.args["unit"]))
 146    return trunc
 147
 148
 149def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 150    """
 151    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 152    so we need to unqualify them.
 153
 154    Example:
 155        >>> from sqlglot import parse_one
 156        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 157        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 158        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 159    """
 160    if isinstance(expression, exp.Pivot) and expression.unpivot:
 161        expression = transforms.unqualify_columns(expression)
 162
 163    return expression
 164
 165
 166def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 167    assert isinstance(expression, exp.Create)
 168
 169    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 170        if expression.this in exp.DataType.NESTED_TYPES:
 171            expression.set("expressions", None)
 172        return expression
 173
 174    props = expression.args.get("properties")
 175    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 176        for schema_expression in expression.this.expressions:
 177            if isinstance(schema_expression, exp.ColumnDef):
 178                column_type = schema_expression.kind
 179                if isinstance(column_type, exp.DataType):
 180                    column_type.transform(_flatten_structured_type, copy=False)
 181
 182    return expression
 183
 184
 185def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 186    if isinstance(expression, exp.Select):
 187        for unnest in expression.find_all(exp.Unnest):
 188            if (
 189                isinstance(unnest.parent, (exp.From, exp.Join))
 190                and len(unnest.expressions) == 1
 191                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 192            ):
 193                generate_date_array = unnest.expressions[0]
 194                start = generate_date_array.args.get("start")
 195                end = generate_date_array.args.get("end")
 196                step = generate_date_array.args.get("step")
 197
 198                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 199                    continue
 200
 201                unit = step.args.get("unit")
 202
 203                unnest_alias = unnest.args.get("alias")
 204                if unnest_alias:
 205                    unnest_alias = unnest_alias.copy()
 206                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 207                else:
 208                    sequence_value_name = "value"
 209
 210                # We'll add the next sequence value to the starting date and project the result
 211                date_add = _build_date_time_add(exp.DateAdd)(
 212                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 213                ).as_(sequence_value_name)
 214
 215                # We use DATEDIFF to compute the number of sequence values needed
 216                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 217                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 218                )
 219
 220                unnest.set("expressions", [number_sequence])
 221                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 222
 223    return expression
 224
 225
 226class Snowflake(Dialect):
 227    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 228    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 229    NULL_ORDERING = "nulls_are_large"
 230    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 231    SUPPORTS_USER_DEFINED_TYPES = False
 232    SUPPORTS_SEMI_ANTI_JOIN = False
 233    PREFER_CTE_ALIAS_COLUMN = True
 234    TABLESAMPLE_SIZE_IS_PERCENT = True
 235    COPY_PARAMS_ARE_CSV = False
 236    ARRAY_AGG_INCLUDES_NULLS = None
 237
 238    TIME_MAPPING = {
 239        "YYYY": "%Y",
 240        "yyyy": "%Y",
 241        "YY": "%y",
 242        "yy": "%y",
 243        "MMMM": "%B",
 244        "mmmm": "%B",
 245        "MON": "%b",
 246        "mon": "%b",
 247        "MM": "%m",
 248        "mm": "%m",
 249        "DD": "%d",
 250        "dd": "%-d",
 251        "DY": "%a",
 252        "dy": "%w",
 253        "HH24": "%H",
 254        "hh24": "%H",
 255        "HH12": "%I",
 256        "hh12": "%I",
 257        "MI": "%M",
 258        "mi": "%M",
 259        "SS": "%S",
 260        "ss": "%S",
 261        "FF": "%f",
 262        "ff": "%f",
 263        "FF6": "%f",
 264        "ff6": "%f",
 265    }
 266
 267    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 268        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 269        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 270        if (
 271            isinstance(expression, exp.Identifier)
 272            and isinstance(expression.parent, exp.Table)
 273            and expression.name.lower() == "dual"
 274        ):
 275            return expression  # type: ignore
 276
 277        return super().quote_identifier(expression, identify=identify)
 278
 279    class Parser(parser.Parser):
 280        IDENTIFY_PIVOT_STRINGS = True
 281        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 282        COLON_IS_VARIANT_EXTRACT = True
 283
 284        ID_VAR_TOKENS = {
 285            *parser.Parser.ID_VAR_TOKENS,
 286            TokenType.MATCH_CONDITION,
 287        }
 288
 289        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 290        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 291
 292        FUNCTIONS = {
 293            **parser.Parser.FUNCTIONS,
 294            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 295            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 296            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 297                this=seq_get(args, 1), expression=seq_get(args, 0)
 298            ),
 299            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 300                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 301                start=seq_get(args, 0),
 302                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 303                step=seq_get(args, 2),
 304            ),
 305            "BITXOR": binary_from_function(exp.BitwiseXor),
 306            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 307            "BOOLXOR": binary_from_function(exp.Xor),
 308            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 309            "DATE_TRUNC": _date_trunc_to_time,
 310            "DATEADD": _build_date_time_add(exp.DateAdd),
 311            "DATEDIFF": _build_datediff,
 312            "DIV0": _build_if_from_div0,
 313            "FLATTEN": exp.Explode.from_arg_list,
 314            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 315                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 316            ),
 317            "IFF": exp.If.from_arg_list,
 318            "LAST_DAY": lambda args: exp.LastDay(
 319                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 320            ),
 321            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 322            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 323            "LISTAGG": exp.GroupConcat.from_arg_list,
 324            "MEDIAN": lambda args: exp.PercentileCont(
 325                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 326            ),
 327            "NULLIFZERO": _build_if_from_nullifzero,
 328            "OBJECT_CONSTRUCT": _build_object_construct,
 329            "REGEXP_REPLACE": _build_regexp_replace,
 330            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 331                this=seq_get(args, 0),
 332                expression=seq_get(args, 1),
 333                position=seq_get(args, 2),
 334                occurrence=seq_get(args, 3),
 335                parameters=seq_get(args, 4),
 336                group=seq_get(args, 5) or exp.Literal.number(0),
 337            ),
 338            "RLIKE": exp.RegexpLike.from_arg_list,
 339            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 340            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 341            "TIMEDIFF": _build_datediff,
 342            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 343            "TIMESTAMPDIFF": _build_datediff,
 344            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 345            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 346            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 347            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 348            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 349            "TO_NUMBER": lambda args: exp.ToNumber(
 350                this=seq_get(args, 0),
 351                format=seq_get(args, 1),
 352                precision=seq_get(args, 2),
 353                scale=seq_get(args, 3),
 354            ),
 355            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 356            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 357            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 358            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 359            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 360            "TO_VARCHAR": exp.ToChar.from_arg_list,
 361            "ZEROIFNULL": _build_if_from_zeroifnull,
 362        }
 363
 364        FUNCTION_PARSERS = {
 365            **parser.Parser.FUNCTION_PARSERS,
 366            "DATE_PART": lambda self: self._parse_date_part(),
 367            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 368        }
 369        FUNCTION_PARSERS.pop("TRIM")
 370
 371        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 372
 373        RANGE_PARSERS = {
 374            **parser.Parser.RANGE_PARSERS,
 375            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 376            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 377        }
 378
 379        ALTER_PARSERS = {
 380            **parser.Parser.ALTER_PARSERS,
 381            "UNSET": lambda self: self.expression(
 382                exp.Set,
 383                tag=self._match_text_seq("TAG"),
 384                expressions=self._parse_csv(self._parse_id_var),
 385                unset=True,
 386            ),
 387            "SWAP": lambda self: self._parse_alter_table_swap(),
 388        }
 389
 390        STATEMENT_PARSERS = {
 391            **parser.Parser.STATEMENT_PARSERS,
 392            TokenType.SHOW: lambda self: self._parse_show(),
 393        }
 394
 395        PROPERTY_PARSERS = {
 396            **parser.Parser.PROPERTY_PARSERS,
 397            "LOCATION": lambda self: self._parse_location_property(),
 398        }
 399
 400        TYPE_CONVERTERS = {
 401            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 402            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 403        }
 404
 405        SHOW_PARSERS = {
 406            "SCHEMAS": _show_parser("SCHEMAS"),
 407            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 408            "OBJECTS": _show_parser("OBJECTS"),
 409            "TERSE OBJECTS": _show_parser("OBJECTS"),
 410            "TABLES": _show_parser("TABLES"),
 411            "TERSE TABLES": _show_parser("TABLES"),
 412            "VIEWS": _show_parser("VIEWS"),
 413            "TERSE VIEWS": _show_parser("VIEWS"),
 414            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 415            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 416            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 417            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 418            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 419            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 420            "SEQUENCES": _show_parser("SEQUENCES"),
 421            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 422            "COLUMNS": _show_parser("COLUMNS"),
 423            "USERS": _show_parser("USERS"),
 424            "TERSE USERS": _show_parser("USERS"),
 425        }
 426
 427        CONSTRAINT_PARSERS = {
 428            **parser.Parser.CONSTRAINT_PARSERS,
 429            "WITH": lambda self: self._parse_with_constraint(),
 430            "MASKING": lambda self: self._parse_with_constraint(),
 431            "PROJECTION": lambda self: self._parse_with_constraint(),
 432            "TAG": lambda self: self._parse_with_constraint(),
 433        }
 434
 435        STAGED_FILE_SINGLE_TOKENS = {
 436            TokenType.DOT,
 437            TokenType.MOD,
 438            TokenType.SLASH,
 439        }
 440
 441        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 442
 443        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 444
 445        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 446
 447        LAMBDAS = {
 448            **parser.Parser.LAMBDAS,
 449            TokenType.ARROW: lambda self, expressions: self.expression(
 450                exp.Lambda,
 451                this=self._replace_lambda(
 452                    self._parse_assignment(),
 453                    expressions,
 454                ),
 455                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 456            ),
 457        }
 458
 459        def _negate_range(
 460            self, this: t.Optional[exp.Expression] = None
 461        ) -> t.Optional[exp.Expression]:
 462            if not this:
 463                return this
 464
 465            query = this.args.get("query")
 466            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 467                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 468                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 469                # which can produce different results (most likely a SnowFlake bug).
 470                #
 471                # https://docs.snowflake.com/en/sql-reference/functions/in
 472                # Context: https://github.com/tobymao/sqlglot/issues/3890
 473                return self.expression(
 474                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 475                )
 476
 477            return self.expression(exp.Not, this=this)
 478
 479        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 480            if self._prev.token_type != TokenType.WITH:
 481                self._retreat(self._index - 1)
 482
 483            if self._match_text_seq("MASKING", "POLICY"):
 484                policy = self._parse_column()
 485                return self.expression(
 486                    exp.MaskingPolicyColumnConstraint,
 487                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 488                    expressions=self._match(TokenType.USING)
 489                    and self._parse_wrapped_csv(self._parse_id_var),
 490                )
 491            if self._match_text_seq("PROJECTION", "POLICY"):
 492                policy = self._parse_column()
 493                return self.expression(
 494                    exp.ProjectionPolicyColumnConstraint,
 495                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 496                )
 497            if self._match(TokenType.TAG):
 498                return self.expression(
 499                    exp.TagColumnConstraint,
 500                    expressions=self._parse_wrapped_csv(self._parse_property),
 501                )
 502
 503            return None
 504
 505        def _parse_create(self) -> exp.Create | exp.Command:
 506            expression = super()._parse_create()
 507            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 508                # Replace the Table node with the enclosed Identifier
 509                expression.this.replace(expression.this.this)
 510
 511            return expression
 512
 513        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 514        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 515        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 516            this = self._parse_var() or self._parse_type()
 517
 518            if not this:
 519                return None
 520
 521            self._match(TokenType.COMMA)
 522            expression = self._parse_bitwise()
 523            this = map_date_part(this)
 524            name = this.name.upper()
 525
 526            if name.startswith("EPOCH"):
 527                if name == "EPOCH_MILLISECOND":
 528                    scale = 10**3
 529                elif name == "EPOCH_MICROSECOND":
 530                    scale = 10**6
 531                elif name == "EPOCH_NANOSECOND":
 532                    scale = 10**9
 533                else:
 534                    scale = None
 535
 536                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 537                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 538
 539                if scale:
 540                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 541
 542                return to_unix
 543
 544            return self.expression(exp.Extract, this=this, expression=expression)
 545
 546        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 547            if is_map:
 548                # Keys are strings in Snowflake's objects, see also:
 549                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 550                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 551                return self._parse_slice(self._parse_string())
 552
 553            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 554
 555        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 556            lateral = super()._parse_lateral()
 557            if not lateral:
 558                return lateral
 559
 560            if isinstance(lateral.this, exp.Explode):
 561                table_alias = lateral.args.get("alias")
 562                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 563                if table_alias and not table_alias.args.get("columns"):
 564                    table_alias.set("columns", columns)
 565                elif not table_alias:
 566                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 567
 568            return lateral
 569
 570        def _parse_table_parts(
 571            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 572        ) -> exp.Table:
 573            # https://docs.snowflake.com/en/user-guide/querying-stage
 574            if self._match(TokenType.STRING, advance=False):
 575                table = self._parse_string()
 576            elif self._match_text_seq("@", advance=False):
 577                table = self._parse_location_path()
 578            else:
 579                table = None
 580
 581            if table:
 582                file_format = None
 583                pattern = None
 584
 585                wrapped = self._match(TokenType.L_PAREN)
 586                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 587                    if self._match_text_seq("FILE_FORMAT", "=>"):
 588                        file_format = self._parse_string() or super()._parse_table_parts(
 589                            is_db_reference=is_db_reference
 590                        )
 591                    elif self._match_text_seq("PATTERN", "=>"):
 592                        pattern = self._parse_string()
 593                    else:
 594                        break
 595
 596                    self._match(TokenType.COMMA)
 597
 598                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 599            else:
 600                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 601
 602            return table
 603
 604        def _parse_id_var(
 605            self,
 606            any_token: bool = True,
 607            tokens: t.Optional[t.Collection[TokenType]] = None,
 608        ) -> t.Optional[exp.Expression]:
 609            if self._match_text_seq("IDENTIFIER", "("):
 610                identifier = (
 611                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 612                    or self._parse_string()
 613                )
 614                self._match_r_paren()
 615                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 616
 617            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 618
 619        def _parse_show_snowflake(self, this: str) -> exp.Show:
 620            scope = None
 621            scope_kind = None
 622
 623            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 624            # which is syntactically valid but has no effect on the output
 625            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 626
 627            history = self._match_text_seq("HISTORY")
 628
 629            like = self._parse_string() if self._match(TokenType.LIKE) else None
 630
 631            if self._match(TokenType.IN):
 632                if self._match_text_seq("ACCOUNT"):
 633                    scope_kind = "ACCOUNT"
 634                elif self._match_set(self.DB_CREATABLES):
 635                    scope_kind = self._prev.text.upper()
 636                    if self._curr:
 637                        scope = self._parse_table_parts()
 638                elif self._curr:
 639                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 640                    scope = self._parse_table_parts()
 641
 642            return self.expression(
 643                exp.Show,
 644                **{
 645                    "terse": terse,
 646                    "this": this,
 647                    "history": history,
 648                    "like": like,
 649                    "scope": scope,
 650                    "scope_kind": scope_kind,
 651                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 652                    "limit": self._parse_limit(),
 653                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 654                },
 655            )
 656
 657        def _parse_alter_table_swap(self) -> exp.SwapTable:
 658            self._match_text_seq("WITH")
 659            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 660
 661        def _parse_location_property(self) -> exp.LocationProperty:
 662            self._match(TokenType.EQ)
 663            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 664
 665        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 666            # Parse either a subquery or a staged file
 667            return (
 668                self._parse_select(table=True, parse_subquery_alias=False)
 669                if self._match(TokenType.L_PAREN, advance=False)
 670                else self._parse_table_parts()
 671            )
 672
 673        def _parse_location_path(self) -> exp.Var:
 674            parts = [self._advance_any(ignore_reserved=True)]
 675
 676            # We avoid consuming a comma token because external tables like @foo and @bar
 677            # can be joined in a query with a comma separator, as well as closing paren
 678            # in case of subqueries
 679            while self._is_connected() and not self._match_set(
 680                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 681            ):
 682                parts.append(self._advance_any(ignore_reserved=True))
 683
 684            return exp.var("".join(part.text for part in parts if part))
 685
 686        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 687            this = super()._parse_lambda_arg()
 688
 689            if not this:
 690                return this
 691
 692            typ = self._parse_types()
 693
 694            if typ:
 695                return self.expression(exp.Cast, this=this, to=typ)
 696
 697            return this
 698
 699    class Tokenizer(tokens.Tokenizer):
 700        STRING_ESCAPES = ["\\", "'"]
 701        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 702        RAW_STRINGS = ["$$"]
 703        COMMENTS = ["--", "//", ("/*", "*/")]
 704        NESTED_COMMENTS = False
 705
 706        KEYWORDS = {
 707            **tokens.Tokenizer.KEYWORDS,
 708            "BYTEINT": TokenType.INT,
 709            "CHAR VARYING": TokenType.VARCHAR,
 710            "CHARACTER VARYING": TokenType.VARCHAR,
 711            "EXCLUDE": TokenType.EXCEPT,
 712            "ILIKE ANY": TokenType.ILIKE_ANY,
 713            "LIKE ANY": TokenType.LIKE_ANY,
 714            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 715            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 716            "MINUS": TokenType.EXCEPT,
 717            "NCHAR VARYING": TokenType.VARCHAR,
 718            "PUT": TokenType.COMMAND,
 719            "REMOVE": TokenType.COMMAND,
 720            "RM": TokenType.COMMAND,
 721            "SAMPLE": TokenType.TABLE_SAMPLE,
 722            "SQL_DOUBLE": TokenType.DOUBLE,
 723            "SQL_VARCHAR": TokenType.VARCHAR,
 724            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 725            "TAG": TokenType.TAG,
 726            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 727            "TOP": TokenType.TOP,
 728            "WAREHOUSE": TokenType.WAREHOUSE,
 729            "STREAMLIT": TokenType.STREAMLIT,
 730        }
 731        KEYWORDS.pop("/*+")
 732
 733        SINGLE_TOKENS = {
 734            **tokens.Tokenizer.SINGLE_TOKENS,
 735            "$": TokenType.PARAMETER,
 736        }
 737
 738        VAR_SINGLE_TOKENS = {"$"}
 739
 740        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 741
 742    class Generator(generator.Generator):
 743        PARAMETER_TOKEN = "$"
 744        MATCHED_BY_SOURCE = False
 745        SINGLE_STRING_INTERVAL = True
 746        JOIN_HINTS = False
 747        TABLE_HINTS = False
 748        QUERY_HINTS = False
 749        AGGREGATE_FILTER_SUPPORTED = False
 750        SUPPORTS_TABLE_COPY = False
 751        COLLATE_IS_FUNC = True
 752        LIMIT_ONLY_LITERALS = True
 753        JSON_KEY_VALUE_PAIR_SEP = ","
 754        INSERT_OVERWRITE = " OVERWRITE INTO"
 755        STRUCT_DELIMITER = ("(", ")")
 756        COPY_PARAMS_ARE_WRAPPED = False
 757        COPY_PARAMS_EQ_REQUIRED = True
 758        STAR_EXCEPT = "EXCLUDE"
 759        SUPPORTS_EXPLODING_PROJECTIONS = False
 760        ARRAY_CONCAT_IS_VAR_LEN = False
 761        SUPPORTS_CONVERT_TIMEZONE = True
 762        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 763
 764        TRANSFORMS = {
 765            **generator.Generator.TRANSFORMS,
 766            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 767            exp.ArgMax: rename_func("MAX_BY"),
 768            exp.ArgMin: rename_func("MIN_BY"),
 769            exp.Array: inline_array_sql,
 770            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 771            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 772            exp.AtTimeZone: lambda self, e: self.func(
 773                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 774            ),
 775            exp.BitwiseXor: rename_func("BITXOR"),
 776            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 777            exp.DateAdd: date_delta_sql("DATEADD"),
 778            exp.DateDiff: date_delta_sql("DATEDIFF"),
 779            exp.DateStrToDate: datestrtodate_sql,
 780            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 781            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 782            exp.DayOfYear: rename_func("DAYOFYEAR"),
 783            exp.Explode: rename_func("FLATTEN"),
 784            exp.Extract: rename_func("DATE_PART"),
 785            exp.FromTimeZone: lambda self, e: self.func(
 786                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 787            ),
 788            exp.GenerateSeries: lambda self, e: self.func(
 789                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 790            ),
 791            exp.GroupConcat: rename_func("LISTAGG"),
 792            exp.If: if_sql(name="IFF", false_value="NULL"),
 793            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 794            exp.JSONExtractScalar: lambda self, e: self.func(
 795                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 796            ),
 797            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 798            exp.JSONPathRoot: lambda *_: "",
 799            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 800            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 801            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 802            exp.Max: max_or_greatest,
 803            exp.Min: min_or_least,
 804            exp.ParseJSON: lambda self, e: self.func(
 805                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 806            ),
 807            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 808            exp.PercentileCont: transforms.preprocess(
 809                [transforms.add_within_group_for_percentiles]
 810            ),
 811            exp.PercentileDisc: transforms.preprocess(
 812                [transforms.add_within_group_for_percentiles]
 813            ),
 814            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 815            exp.RegexpILike: _regexpilike_sql,
 816            exp.Rand: rename_func("RANDOM"),
 817            exp.Select: transforms.preprocess(
 818                [
 819                    transforms.eliminate_distinct_on,
 820                    transforms.explode_to_unnest(),
 821                    transforms.eliminate_semi_and_anti_joins,
 822                    _unnest_generate_date_array,
 823                ]
 824            ),
 825            exp.SHA: rename_func("SHA1"),
 826            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 827            exp.StartsWith: rename_func("STARTSWITH"),
 828            exp.StrPosition: lambda self, e: self.func(
 829                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 830            ),
 831            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 832            exp.Stuff: rename_func("INSERT"),
 833            exp.TimeAdd: date_delta_sql("TIMEADD"),
 834            exp.TimestampDiff: lambda self, e: self.func(
 835                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 836            ),
 837            exp.TimestampTrunc: timestamptrunc_sql(),
 838            exp.TimeStrToTime: timestrtotime_sql,
 839            exp.TimeToStr: lambda self, e: self.func(
 840                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 841            ),
 842            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 843            exp.ToArray: rename_func("TO_ARRAY"),
 844            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 845            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 846            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 847            exp.TsOrDsToDate: lambda self, e: self.func(
 848                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 849            ),
 850            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 851            exp.Uuid: rename_func("UUID_STRING"),
 852            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 853            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 854            exp.Xor: rename_func("BOOLXOR"),
 855        }
 856
 857        SUPPORTED_JSON_PATH_PARTS = {
 858            exp.JSONPathKey,
 859            exp.JSONPathRoot,
 860            exp.JSONPathSubscript,
 861        }
 862
 863        TYPE_MAPPING = {
 864            **generator.Generator.TYPE_MAPPING,
 865            exp.DataType.Type.NESTED: "OBJECT",
 866            exp.DataType.Type.STRUCT: "OBJECT",
 867        }
 868
 869        PROPERTIES_LOCATION = {
 870            **generator.Generator.PROPERTIES_LOCATION,
 871            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 872            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 873        }
 874
 875        UNSUPPORTED_VALUES_EXPRESSIONS = {
 876            exp.Map,
 877            exp.StarMap,
 878            exp.Struct,
 879            exp.VarMap,
 880        }
 881
 882        def with_properties(self, properties: exp.Properties) -> str:
 883            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 884
 885        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 886            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 887                values_as_table = False
 888
 889            return super().values_sql(expression, values_as_table=values_as_table)
 890
 891        def datatype_sql(self, expression: exp.DataType) -> str:
 892            expressions = expression.expressions
 893            if (
 894                expressions
 895                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 896                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 897            ):
 898                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 899                return "OBJECT"
 900
 901            return super().datatype_sql(expression)
 902
 903        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 904            return self.func(
 905                "TO_NUMBER",
 906                expression.this,
 907                expression.args.get("format"),
 908                expression.args.get("precision"),
 909                expression.args.get("scale"),
 910            )
 911
 912        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 913            milli = expression.args.get("milli")
 914            if milli is not None:
 915                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 916                expression.set("nano", milli_to_nano)
 917
 918            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 919
 920        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 921            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 922                return self.func("TO_GEOGRAPHY", expression.this)
 923            if expression.is_type(exp.DataType.Type.GEOMETRY):
 924                return self.func("TO_GEOMETRY", expression.this)
 925
 926            return super().cast_sql(expression, safe_prefix=safe_prefix)
 927
 928        def trycast_sql(self, expression: exp.TryCast) -> str:
 929            value = expression.this
 930
 931            if value.type is None:
 932                from sqlglot.optimizer.annotate_types import annotate_types
 933
 934                value = annotate_types(value)
 935
 936            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 937                return super().trycast_sql(expression)
 938
 939            # TRY_CAST only works for string values in Snowflake
 940            return self.cast_sql(expression)
 941
 942        def log_sql(self, expression: exp.Log) -> str:
 943            if not expression.expression:
 944                return self.func("LN", expression.this)
 945
 946            return super().log_sql(expression)
 947
 948        def unnest_sql(self, expression: exp.Unnest) -> str:
 949            unnest_alias = expression.args.get("alias")
 950            offset = expression.args.get("offset")
 951
 952            columns = [
 953                exp.to_identifier("seq"),
 954                exp.to_identifier("key"),
 955                exp.to_identifier("path"),
 956                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 957                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 958                or exp.to_identifier("value"),
 959                exp.to_identifier("this"),
 960            ]
 961
 962            if unnest_alias:
 963                unnest_alias.set("columns", columns)
 964            else:
 965                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 966
 967            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 968            alias = self.sql(unnest_alias)
 969            alias = f" AS {alias}" if alias else ""
 970            return f"{explode}{alias}"
 971
 972        def show_sql(self, expression: exp.Show) -> str:
 973            terse = "TERSE " if expression.args.get("terse") else ""
 974            history = " HISTORY" if expression.args.get("history") else ""
 975            like = self.sql(expression, "like")
 976            like = f" LIKE {like}" if like else ""
 977
 978            scope = self.sql(expression, "scope")
 979            scope = f" {scope}" if scope else ""
 980
 981            scope_kind = self.sql(expression, "scope_kind")
 982            if scope_kind:
 983                scope_kind = f" IN {scope_kind}"
 984
 985            starts_with = self.sql(expression, "starts_with")
 986            if starts_with:
 987                starts_with = f" STARTS WITH {starts_with}"
 988
 989            limit = self.sql(expression, "limit")
 990
 991            from_ = self.sql(expression, "from")
 992            if from_:
 993                from_ = f" FROM {from_}"
 994
 995            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 996
 997        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 998            # Other dialects don't support all of the following parameters, so we need to
 999            # generate default values as necessary to ensure the transpilation is correct
1000            group = expression.args.get("group")
1001
1002            # To avoid generating all these default values, we set group to None if
1003            # it's 0 (also default value) which doesn't trigger the following chain
1004            if group and group.name == "0":
1005                group = None
1006
1007            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1008            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1009            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1010
1011            return self.func(
1012                "REGEXP_SUBSTR",
1013                expression.this,
1014                expression.expression,
1015                position,
1016                occurrence,
1017                parameters,
1018                group,
1019            )
1020
1021        def describe_sql(self, expression: exp.Describe) -> str:
1022            # Default to table if kind is unknown
1023            kind_value = expression.args.get("kind") or "TABLE"
1024            kind = f" {kind_value}" if kind_value else ""
1025            this = f" {self.sql(expression, 'this')}"
1026            expressions = self.expressions(expression, flat=True)
1027            expressions = f" {expressions}" if expressions else ""
1028            return f"DESCRIBE{kind}{this}{expressions}"
1029
1030        def generatedasidentitycolumnconstraint_sql(
1031            self, expression: exp.GeneratedAsIdentityColumnConstraint
1032        ) -> str:
1033            start = expression.args.get("start")
1034            start = f" START {start}" if start else ""
1035            increment = expression.args.get("increment")
1036            increment = f" INCREMENT {increment}" if increment else ""
1037            return f"AUTOINCREMENT{start}{increment}"
1038
1039        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1040            this = self.sql(expression, "this")
1041            return f"SWAP WITH {this}"
1042
1043        def cluster_sql(self, expression: exp.Cluster) -> str:
1044            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1045
1046        def struct_sql(self, expression: exp.Struct) -> str:
1047            keys = []
1048            values = []
1049
1050            for i, e in enumerate(expression.expressions):
1051                if isinstance(e, exp.PropertyEQ):
1052                    keys.append(
1053                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1054                    )
1055                    values.append(e.expression)
1056                else:
1057                    keys.append(exp.Literal.string(f"_{i}"))
1058                    values.append(e)
1059
1060            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1061
1062        @generator.unsupported_args("weight", "accuracy")
1063        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1064            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1065
1066        def alterset_sql(self, expression: exp.AlterSet) -> str:
1067            exprs = self.expressions(expression, flat=True)
1068            exprs = f" {exprs}" if exprs else ""
1069            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1070            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1071            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1072            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1073            tag = self.expressions(expression, key="tag", flat=True)
1074            tag = f" TAG {tag}" if tag else ""
1075
1076            return f"SET{exprs}{file_format}{copy_options}{tag}"
class Snowflake(sqlglot.dialects.dialect.Dialect):
 227class Snowflake(Dialect):
 228    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 229    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 230    NULL_ORDERING = "nulls_are_large"
 231    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 232    SUPPORTS_USER_DEFINED_TYPES = False
 233    SUPPORTS_SEMI_ANTI_JOIN = False
 234    PREFER_CTE_ALIAS_COLUMN = True
 235    TABLESAMPLE_SIZE_IS_PERCENT = True
 236    COPY_PARAMS_ARE_CSV = False
 237    ARRAY_AGG_INCLUDES_NULLS = None
 238
 239    TIME_MAPPING = {
 240        "YYYY": "%Y",
 241        "yyyy": "%Y",
 242        "YY": "%y",
 243        "yy": "%y",
 244        "MMMM": "%B",
 245        "mmmm": "%B",
 246        "MON": "%b",
 247        "mon": "%b",
 248        "MM": "%m",
 249        "mm": "%m",
 250        "DD": "%d",
 251        "dd": "%-d",
 252        "DY": "%a",
 253        "dy": "%w",
 254        "HH24": "%H",
 255        "hh24": "%H",
 256        "HH12": "%I",
 257        "hh12": "%I",
 258        "MI": "%M",
 259        "mi": "%M",
 260        "SS": "%S",
 261        "ss": "%S",
 262        "FF": "%f",
 263        "ff": "%f",
 264        "FF6": "%f",
 265        "ff6": "%f",
 266    }
 267
 268    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 269        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 270        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 271        if (
 272            isinstance(expression, exp.Identifier)
 273            and isinstance(expression.parent, exp.Table)
 274            and expression.name.lower() == "dual"
 275        ):
 276            return expression  # type: ignore
 277
 278        return super().quote_identifier(expression, identify=identify)
 279
 280    class Parser(parser.Parser):
 281        IDENTIFY_PIVOT_STRINGS = True
 282        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 283        COLON_IS_VARIANT_EXTRACT = True
 284
 285        ID_VAR_TOKENS = {
 286            *parser.Parser.ID_VAR_TOKENS,
 287            TokenType.MATCH_CONDITION,
 288        }
 289
 290        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 291        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 292
 293        FUNCTIONS = {
 294            **parser.Parser.FUNCTIONS,
 295            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 296            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 297            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 298                this=seq_get(args, 1), expression=seq_get(args, 0)
 299            ),
 300            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 301                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 302                start=seq_get(args, 0),
 303                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 304                step=seq_get(args, 2),
 305            ),
 306            "BITXOR": binary_from_function(exp.BitwiseXor),
 307            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 308            "BOOLXOR": binary_from_function(exp.Xor),
 309            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 310            "DATE_TRUNC": _date_trunc_to_time,
 311            "DATEADD": _build_date_time_add(exp.DateAdd),
 312            "DATEDIFF": _build_datediff,
 313            "DIV0": _build_if_from_div0,
 314            "FLATTEN": exp.Explode.from_arg_list,
 315            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 316                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 317            ),
 318            "IFF": exp.If.from_arg_list,
 319            "LAST_DAY": lambda args: exp.LastDay(
 320                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 321            ),
 322            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 323            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 324            "LISTAGG": exp.GroupConcat.from_arg_list,
 325            "MEDIAN": lambda args: exp.PercentileCont(
 326                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 327            ),
 328            "NULLIFZERO": _build_if_from_nullifzero,
 329            "OBJECT_CONSTRUCT": _build_object_construct,
 330            "REGEXP_REPLACE": _build_regexp_replace,
 331            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 332                this=seq_get(args, 0),
 333                expression=seq_get(args, 1),
 334                position=seq_get(args, 2),
 335                occurrence=seq_get(args, 3),
 336                parameters=seq_get(args, 4),
 337                group=seq_get(args, 5) or exp.Literal.number(0),
 338            ),
 339            "RLIKE": exp.RegexpLike.from_arg_list,
 340            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 341            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 342            "TIMEDIFF": _build_datediff,
 343            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 344            "TIMESTAMPDIFF": _build_datediff,
 345            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 346            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 347            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 348            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 349            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 350            "TO_NUMBER": lambda args: exp.ToNumber(
 351                this=seq_get(args, 0),
 352                format=seq_get(args, 1),
 353                precision=seq_get(args, 2),
 354                scale=seq_get(args, 3),
 355            ),
 356            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 357            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 358            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 359            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 360            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 361            "TO_VARCHAR": exp.ToChar.from_arg_list,
 362            "ZEROIFNULL": _build_if_from_zeroifnull,
 363        }
 364
 365        FUNCTION_PARSERS = {
 366            **parser.Parser.FUNCTION_PARSERS,
 367            "DATE_PART": lambda self: self._parse_date_part(),
 368            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 369        }
 370        FUNCTION_PARSERS.pop("TRIM")
 371
 372        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 373
 374        RANGE_PARSERS = {
 375            **parser.Parser.RANGE_PARSERS,
 376            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 377            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 378        }
 379
 380        ALTER_PARSERS = {
 381            **parser.Parser.ALTER_PARSERS,
 382            "UNSET": lambda self: self.expression(
 383                exp.Set,
 384                tag=self._match_text_seq("TAG"),
 385                expressions=self._parse_csv(self._parse_id_var),
 386                unset=True,
 387            ),
 388            "SWAP": lambda self: self._parse_alter_table_swap(),
 389        }
 390
 391        STATEMENT_PARSERS = {
 392            **parser.Parser.STATEMENT_PARSERS,
 393            TokenType.SHOW: lambda self: self._parse_show(),
 394        }
 395
 396        PROPERTY_PARSERS = {
 397            **parser.Parser.PROPERTY_PARSERS,
 398            "LOCATION": lambda self: self._parse_location_property(),
 399        }
 400
 401        TYPE_CONVERTERS = {
 402            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 403            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 404        }
 405
 406        SHOW_PARSERS = {
 407            "SCHEMAS": _show_parser("SCHEMAS"),
 408            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 409            "OBJECTS": _show_parser("OBJECTS"),
 410            "TERSE OBJECTS": _show_parser("OBJECTS"),
 411            "TABLES": _show_parser("TABLES"),
 412            "TERSE TABLES": _show_parser("TABLES"),
 413            "VIEWS": _show_parser("VIEWS"),
 414            "TERSE VIEWS": _show_parser("VIEWS"),
 415            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 416            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 417            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 418            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 419            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 420            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 421            "SEQUENCES": _show_parser("SEQUENCES"),
 422            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 423            "COLUMNS": _show_parser("COLUMNS"),
 424            "USERS": _show_parser("USERS"),
 425            "TERSE USERS": _show_parser("USERS"),
 426        }
 427
 428        CONSTRAINT_PARSERS = {
 429            **parser.Parser.CONSTRAINT_PARSERS,
 430            "WITH": lambda self: self._parse_with_constraint(),
 431            "MASKING": lambda self: self._parse_with_constraint(),
 432            "PROJECTION": lambda self: self._parse_with_constraint(),
 433            "TAG": lambda self: self._parse_with_constraint(),
 434        }
 435
 436        STAGED_FILE_SINGLE_TOKENS = {
 437            TokenType.DOT,
 438            TokenType.MOD,
 439            TokenType.SLASH,
 440        }
 441
 442        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 443
 444        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 445
 446        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 447
 448        LAMBDAS = {
 449            **parser.Parser.LAMBDAS,
 450            TokenType.ARROW: lambda self, expressions: self.expression(
 451                exp.Lambda,
 452                this=self._replace_lambda(
 453                    self._parse_assignment(),
 454                    expressions,
 455                ),
 456                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 457            ),
 458        }
 459
 460        def _negate_range(
 461            self, this: t.Optional[exp.Expression] = None
 462        ) -> t.Optional[exp.Expression]:
 463            if not this:
 464                return this
 465
 466            query = this.args.get("query")
 467            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 468                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 469                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 470                # which can produce different results (most likely a SnowFlake bug).
 471                #
 472                # https://docs.snowflake.com/en/sql-reference/functions/in
 473                # Context: https://github.com/tobymao/sqlglot/issues/3890
 474                return self.expression(
 475                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 476                )
 477
 478            return self.expression(exp.Not, this=this)
 479
 480        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 481            if self._prev.token_type != TokenType.WITH:
 482                self._retreat(self._index - 1)
 483
 484            if self._match_text_seq("MASKING", "POLICY"):
 485                policy = self._parse_column()
 486                return self.expression(
 487                    exp.MaskingPolicyColumnConstraint,
 488                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 489                    expressions=self._match(TokenType.USING)
 490                    and self._parse_wrapped_csv(self._parse_id_var),
 491                )
 492            if self._match_text_seq("PROJECTION", "POLICY"):
 493                policy = self._parse_column()
 494                return self.expression(
 495                    exp.ProjectionPolicyColumnConstraint,
 496                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 497                )
 498            if self._match(TokenType.TAG):
 499                return self.expression(
 500                    exp.TagColumnConstraint,
 501                    expressions=self._parse_wrapped_csv(self._parse_property),
 502                )
 503
 504            return None
 505
 506        def _parse_create(self) -> exp.Create | exp.Command:
 507            expression = super()._parse_create()
 508            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 509                # Replace the Table node with the enclosed Identifier
 510                expression.this.replace(expression.this.this)
 511
 512            return expression
 513
 514        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 515        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 516        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 517            this = self._parse_var() or self._parse_type()
 518
 519            if not this:
 520                return None
 521
 522            self._match(TokenType.COMMA)
 523            expression = self._parse_bitwise()
 524            this = map_date_part(this)
 525            name = this.name.upper()
 526
 527            if name.startswith("EPOCH"):
 528                if name == "EPOCH_MILLISECOND":
 529                    scale = 10**3
 530                elif name == "EPOCH_MICROSECOND":
 531                    scale = 10**6
 532                elif name == "EPOCH_NANOSECOND":
 533                    scale = 10**9
 534                else:
 535                    scale = None
 536
 537                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 538                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 539
 540                if scale:
 541                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 542
 543                return to_unix
 544
 545            return self.expression(exp.Extract, this=this, expression=expression)
 546
 547        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 548            if is_map:
 549                # Keys are strings in Snowflake's objects, see also:
 550                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 551                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 552                return self._parse_slice(self._parse_string())
 553
 554            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 555
 556        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 557            lateral = super()._parse_lateral()
 558            if not lateral:
 559                return lateral
 560
 561            if isinstance(lateral.this, exp.Explode):
 562                table_alias = lateral.args.get("alias")
 563                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 564                if table_alias and not table_alias.args.get("columns"):
 565                    table_alias.set("columns", columns)
 566                elif not table_alias:
 567                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 568
 569            return lateral
 570
 571        def _parse_table_parts(
 572            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 573        ) -> exp.Table:
 574            # https://docs.snowflake.com/en/user-guide/querying-stage
 575            if self._match(TokenType.STRING, advance=False):
 576                table = self._parse_string()
 577            elif self._match_text_seq("@", advance=False):
 578                table = self._parse_location_path()
 579            else:
 580                table = None
 581
 582            if table:
 583                file_format = None
 584                pattern = None
 585
 586                wrapped = self._match(TokenType.L_PAREN)
 587                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 588                    if self._match_text_seq("FILE_FORMAT", "=>"):
 589                        file_format = self._parse_string() or super()._parse_table_parts(
 590                            is_db_reference=is_db_reference
 591                        )
 592                    elif self._match_text_seq("PATTERN", "=>"):
 593                        pattern = self._parse_string()
 594                    else:
 595                        break
 596
 597                    self._match(TokenType.COMMA)
 598
 599                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 600            else:
 601                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 602
 603            return table
 604
 605        def _parse_id_var(
 606            self,
 607            any_token: bool = True,
 608            tokens: t.Optional[t.Collection[TokenType]] = None,
 609        ) -> t.Optional[exp.Expression]:
 610            if self._match_text_seq("IDENTIFIER", "("):
 611                identifier = (
 612                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 613                    or self._parse_string()
 614                )
 615                self._match_r_paren()
 616                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 617
 618            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 619
 620        def _parse_show_snowflake(self, this: str) -> exp.Show:
 621            scope = None
 622            scope_kind = None
 623
 624            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 625            # which is syntactically valid but has no effect on the output
 626            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 627
 628            history = self._match_text_seq("HISTORY")
 629
 630            like = self._parse_string() if self._match(TokenType.LIKE) else None
 631
 632            if self._match(TokenType.IN):
 633                if self._match_text_seq("ACCOUNT"):
 634                    scope_kind = "ACCOUNT"
 635                elif self._match_set(self.DB_CREATABLES):
 636                    scope_kind = self._prev.text.upper()
 637                    if self._curr:
 638                        scope = self._parse_table_parts()
 639                elif self._curr:
 640                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 641                    scope = self._parse_table_parts()
 642
 643            return self.expression(
 644                exp.Show,
 645                **{
 646                    "terse": terse,
 647                    "this": this,
 648                    "history": history,
 649                    "like": like,
 650                    "scope": scope,
 651                    "scope_kind": scope_kind,
 652                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 653                    "limit": self._parse_limit(),
 654                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 655                },
 656            )
 657
 658        def _parse_alter_table_swap(self) -> exp.SwapTable:
 659            self._match_text_seq("WITH")
 660            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 661
 662        def _parse_location_property(self) -> exp.LocationProperty:
 663            self._match(TokenType.EQ)
 664            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 665
 666        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 667            # Parse either a subquery or a staged file
 668            return (
 669                self._parse_select(table=True, parse_subquery_alias=False)
 670                if self._match(TokenType.L_PAREN, advance=False)
 671                else self._parse_table_parts()
 672            )
 673
 674        def _parse_location_path(self) -> exp.Var:
 675            parts = [self._advance_any(ignore_reserved=True)]
 676
 677            # We avoid consuming a comma token because external tables like @foo and @bar
 678            # can be joined in a query with a comma separator, as well as closing paren
 679            # in case of subqueries
 680            while self._is_connected() and not self._match_set(
 681                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 682            ):
 683                parts.append(self._advance_any(ignore_reserved=True))
 684
 685            return exp.var("".join(part.text for part in parts if part))
 686
 687        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 688            this = super()._parse_lambda_arg()
 689
 690            if not this:
 691                return this
 692
 693            typ = self._parse_types()
 694
 695            if typ:
 696                return self.expression(exp.Cast, this=this, to=typ)
 697
 698            return this
 699
 700    class Tokenizer(tokens.Tokenizer):
 701        STRING_ESCAPES = ["\\", "'"]
 702        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 703        RAW_STRINGS = ["$$"]
 704        COMMENTS = ["--", "//", ("/*", "*/")]
 705        NESTED_COMMENTS = False
 706
 707        KEYWORDS = {
 708            **tokens.Tokenizer.KEYWORDS,
 709            "BYTEINT": TokenType.INT,
 710            "CHAR VARYING": TokenType.VARCHAR,
 711            "CHARACTER VARYING": TokenType.VARCHAR,
 712            "EXCLUDE": TokenType.EXCEPT,
 713            "ILIKE ANY": TokenType.ILIKE_ANY,
 714            "LIKE ANY": TokenType.LIKE_ANY,
 715            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 716            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 717            "MINUS": TokenType.EXCEPT,
 718            "NCHAR VARYING": TokenType.VARCHAR,
 719            "PUT": TokenType.COMMAND,
 720            "REMOVE": TokenType.COMMAND,
 721            "RM": TokenType.COMMAND,
 722            "SAMPLE": TokenType.TABLE_SAMPLE,
 723            "SQL_DOUBLE": TokenType.DOUBLE,
 724            "SQL_VARCHAR": TokenType.VARCHAR,
 725            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 726            "TAG": TokenType.TAG,
 727            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 728            "TOP": TokenType.TOP,
 729            "WAREHOUSE": TokenType.WAREHOUSE,
 730            "STREAMLIT": TokenType.STREAMLIT,
 731        }
 732        KEYWORDS.pop("/*+")
 733
 734        SINGLE_TOKENS = {
 735            **tokens.Tokenizer.SINGLE_TOKENS,
 736            "$": TokenType.PARAMETER,
 737        }
 738
 739        VAR_SINGLE_TOKENS = {"$"}
 740
 741        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 742
 743    class Generator(generator.Generator):
 744        PARAMETER_TOKEN = "$"
 745        MATCHED_BY_SOURCE = False
 746        SINGLE_STRING_INTERVAL = True
 747        JOIN_HINTS = False
 748        TABLE_HINTS = False
 749        QUERY_HINTS = False
 750        AGGREGATE_FILTER_SUPPORTED = False
 751        SUPPORTS_TABLE_COPY = False
 752        COLLATE_IS_FUNC = True
 753        LIMIT_ONLY_LITERALS = True
 754        JSON_KEY_VALUE_PAIR_SEP = ","
 755        INSERT_OVERWRITE = " OVERWRITE INTO"
 756        STRUCT_DELIMITER = ("(", ")")
 757        COPY_PARAMS_ARE_WRAPPED = False
 758        COPY_PARAMS_EQ_REQUIRED = True
 759        STAR_EXCEPT = "EXCLUDE"
 760        SUPPORTS_EXPLODING_PROJECTIONS = False
 761        ARRAY_CONCAT_IS_VAR_LEN = False
 762        SUPPORTS_CONVERT_TIMEZONE = True
 763        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 764
 765        TRANSFORMS = {
 766            **generator.Generator.TRANSFORMS,
 767            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 768            exp.ArgMax: rename_func("MAX_BY"),
 769            exp.ArgMin: rename_func("MIN_BY"),
 770            exp.Array: inline_array_sql,
 771            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 772            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 773            exp.AtTimeZone: lambda self, e: self.func(
 774                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 775            ),
 776            exp.BitwiseXor: rename_func("BITXOR"),
 777            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 778            exp.DateAdd: date_delta_sql("DATEADD"),
 779            exp.DateDiff: date_delta_sql("DATEDIFF"),
 780            exp.DateStrToDate: datestrtodate_sql,
 781            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 782            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 783            exp.DayOfYear: rename_func("DAYOFYEAR"),
 784            exp.Explode: rename_func("FLATTEN"),
 785            exp.Extract: rename_func("DATE_PART"),
 786            exp.FromTimeZone: lambda self, e: self.func(
 787                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 788            ),
 789            exp.GenerateSeries: lambda self, e: self.func(
 790                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 791            ),
 792            exp.GroupConcat: rename_func("LISTAGG"),
 793            exp.If: if_sql(name="IFF", false_value="NULL"),
 794            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 795            exp.JSONExtractScalar: lambda self, e: self.func(
 796                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 797            ),
 798            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 799            exp.JSONPathRoot: lambda *_: "",
 800            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 801            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 802            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 803            exp.Max: max_or_greatest,
 804            exp.Min: min_or_least,
 805            exp.ParseJSON: lambda self, e: self.func(
 806                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 807            ),
 808            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 809            exp.PercentileCont: transforms.preprocess(
 810                [transforms.add_within_group_for_percentiles]
 811            ),
 812            exp.PercentileDisc: transforms.preprocess(
 813                [transforms.add_within_group_for_percentiles]
 814            ),
 815            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 816            exp.RegexpILike: _regexpilike_sql,
 817            exp.Rand: rename_func("RANDOM"),
 818            exp.Select: transforms.preprocess(
 819                [
 820                    transforms.eliminate_distinct_on,
 821                    transforms.explode_to_unnest(),
 822                    transforms.eliminate_semi_and_anti_joins,
 823                    _unnest_generate_date_array,
 824                ]
 825            ),
 826            exp.SHA: rename_func("SHA1"),
 827            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 828            exp.StartsWith: rename_func("STARTSWITH"),
 829            exp.StrPosition: lambda self, e: self.func(
 830                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 831            ),
 832            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 833            exp.Stuff: rename_func("INSERT"),
 834            exp.TimeAdd: date_delta_sql("TIMEADD"),
 835            exp.TimestampDiff: lambda self, e: self.func(
 836                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 837            ),
 838            exp.TimestampTrunc: timestamptrunc_sql(),
 839            exp.TimeStrToTime: timestrtotime_sql,
 840            exp.TimeToStr: lambda self, e: self.func(
 841                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 842            ),
 843            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 844            exp.ToArray: rename_func("TO_ARRAY"),
 845            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 846            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 847            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 848            exp.TsOrDsToDate: lambda self, e: self.func(
 849                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 850            ),
 851            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 852            exp.Uuid: rename_func("UUID_STRING"),
 853            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 854            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 855            exp.Xor: rename_func("BOOLXOR"),
 856        }
 857
 858        SUPPORTED_JSON_PATH_PARTS = {
 859            exp.JSONPathKey,
 860            exp.JSONPathRoot,
 861            exp.JSONPathSubscript,
 862        }
 863
 864        TYPE_MAPPING = {
 865            **generator.Generator.TYPE_MAPPING,
 866            exp.DataType.Type.NESTED: "OBJECT",
 867            exp.DataType.Type.STRUCT: "OBJECT",
 868        }
 869
 870        PROPERTIES_LOCATION = {
 871            **generator.Generator.PROPERTIES_LOCATION,
 872            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 873            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 874        }
 875
 876        UNSUPPORTED_VALUES_EXPRESSIONS = {
 877            exp.Map,
 878            exp.StarMap,
 879            exp.Struct,
 880            exp.VarMap,
 881        }
 882
 883        def with_properties(self, properties: exp.Properties) -> str:
 884            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 885
 886        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 887            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 888                values_as_table = False
 889
 890            return super().values_sql(expression, values_as_table=values_as_table)
 891
 892        def datatype_sql(self, expression: exp.DataType) -> str:
 893            expressions = expression.expressions
 894            if (
 895                expressions
 896                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 897                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 898            ):
 899                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 900                return "OBJECT"
 901
 902            return super().datatype_sql(expression)
 903
 904        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 905            return self.func(
 906                "TO_NUMBER",
 907                expression.this,
 908                expression.args.get("format"),
 909                expression.args.get("precision"),
 910                expression.args.get("scale"),
 911            )
 912
 913        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 914            milli = expression.args.get("milli")
 915            if milli is not None:
 916                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 917                expression.set("nano", milli_to_nano)
 918
 919            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 920
 921        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 922            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 923                return self.func("TO_GEOGRAPHY", expression.this)
 924            if expression.is_type(exp.DataType.Type.GEOMETRY):
 925                return self.func("TO_GEOMETRY", expression.this)
 926
 927            return super().cast_sql(expression, safe_prefix=safe_prefix)
 928
 929        def trycast_sql(self, expression: exp.TryCast) -> str:
 930            value = expression.this
 931
 932            if value.type is None:
 933                from sqlglot.optimizer.annotate_types import annotate_types
 934
 935                value = annotate_types(value)
 936
 937            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 938                return super().trycast_sql(expression)
 939
 940            # TRY_CAST only works for string values in Snowflake
 941            return self.cast_sql(expression)
 942
 943        def log_sql(self, expression: exp.Log) -> str:
 944            if not expression.expression:
 945                return self.func("LN", expression.this)
 946
 947            return super().log_sql(expression)
 948
 949        def unnest_sql(self, expression: exp.Unnest) -> str:
 950            unnest_alias = expression.args.get("alias")
 951            offset = expression.args.get("offset")
 952
 953            columns = [
 954                exp.to_identifier("seq"),
 955                exp.to_identifier("key"),
 956                exp.to_identifier("path"),
 957                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 958                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 959                or exp.to_identifier("value"),
 960                exp.to_identifier("this"),
 961            ]
 962
 963            if unnest_alias:
 964                unnest_alias.set("columns", columns)
 965            else:
 966                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 967
 968            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 969            alias = self.sql(unnest_alias)
 970            alias = f" AS {alias}" if alias else ""
 971            return f"{explode}{alias}"
 972
 973        def show_sql(self, expression: exp.Show) -> str:
 974            terse = "TERSE " if expression.args.get("terse") else ""
 975            history = " HISTORY" if expression.args.get("history") else ""
 976            like = self.sql(expression, "like")
 977            like = f" LIKE {like}" if like else ""
 978
 979            scope = self.sql(expression, "scope")
 980            scope = f" {scope}" if scope else ""
 981
 982            scope_kind = self.sql(expression, "scope_kind")
 983            if scope_kind:
 984                scope_kind = f" IN {scope_kind}"
 985
 986            starts_with = self.sql(expression, "starts_with")
 987            if starts_with:
 988                starts_with = f" STARTS WITH {starts_with}"
 989
 990            limit = self.sql(expression, "limit")
 991
 992            from_ = self.sql(expression, "from")
 993            if from_:
 994                from_ = f" FROM {from_}"
 995
 996            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 997
 998        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 999            # Other dialects don't support all of the following parameters, so we need to
1000            # generate default values as necessary to ensure the transpilation is correct
1001            group = expression.args.get("group")
1002
1003            # To avoid generating all these default values, we set group to None if
1004            # it's 0 (also default value) which doesn't trigger the following chain
1005            if group and group.name == "0":
1006                group = None
1007
1008            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1009            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1010            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1011
1012            return self.func(
1013                "REGEXP_SUBSTR",
1014                expression.this,
1015                expression.expression,
1016                position,
1017                occurrence,
1018                parameters,
1019                group,
1020            )
1021
1022        def describe_sql(self, expression: exp.Describe) -> str:
1023            # Default to table if kind is unknown
1024            kind_value = expression.args.get("kind") or "TABLE"
1025            kind = f" {kind_value}" if kind_value else ""
1026            this = f" {self.sql(expression, 'this')}"
1027            expressions = self.expressions(expression, flat=True)
1028            expressions = f" {expressions}" if expressions else ""
1029            return f"DESCRIBE{kind}{this}{expressions}"
1030
1031        def generatedasidentitycolumnconstraint_sql(
1032            self, expression: exp.GeneratedAsIdentityColumnConstraint
1033        ) -> str:
1034            start = expression.args.get("start")
1035            start = f" START {start}" if start else ""
1036            increment = expression.args.get("increment")
1037            increment = f" INCREMENT {increment}" if increment else ""
1038            return f"AUTOINCREMENT{start}{increment}"
1039
1040        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1041            this = self.sql(expression, "this")
1042            return f"SWAP WITH {this}"
1043
1044        def cluster_sql(self, expression: exp.Cluster) -> str:
1045            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1046
1047        def struct_sql(self, expression: exp.Struct) -> str:
1048            keys = []
1049            values = []
1050
1051            for i, e in enumerate(expression.expressions):
1052                if isinstance(e, exp.PropertyEQ):
1053                    keys.append(
1054                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1055                    )
1056                    values.append(e.expression)
1057                else:
1058                    keys.append(exp.Literal.string(f"_{i}"))
1059                    values.append(e)
1060
1061            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1062
1063        @generator.unsupported_args("weight", "accuracy")
1064        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1065            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1066
1067        def alterset_sql(self, expression: exp.AlterSet) -> str:
1068            exprs = self.expressions(expression, flat=True)
1069            exprs = f" {exprs}" if exprs else ""
1070            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1071            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1072            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1073            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1074            tag = self.expressions(expression, key="tag", flat=True)
1075            tag = f" TAG {tag}" if tag else ""
1076
1077            return f"SET{exprs}{file_format}{copy_options}{tag}"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
268    def quote_identifier(self, expression: E, identify: bool = True) -> E:
269        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
270        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
271        if (
272            isinstance(expression, exp.Identifier)
273            and isinstance(expression.parent, exp.Table)
274            and expression.name.lower() == "dual"
275        ):
276            return expression  # type: ignore
277
278        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
280    class Parser(parser.Parser):
281        IDENTIFY_PIVOT_STRINGS = True
282        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
283        COLON_IS_VARIANT_EXTRACT = True
284
285        ID_VAR_TOKENS = {
286            *parser.Parser.ID_VAR_TOKENS,
287            TokenType.MATCH_CONDITION,
288        }
289
290        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
291        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
292
293        FUNCTIONS = {
294            **parser.Parser.FUNCTIONS,
295            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
296            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
297            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
298                this=seq_get(args, 1), expression=seq_get(args, 0)
299            ),
300            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
301                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
302                start=seq_get(args, 0),
303                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
304                step=seq_get(args, 2),
305            ),
306            "BITXOR": binary_from_function(exp.BitwiseXor),
307            "BIT_XOR": binary_from_function(exp.BitwiseXor),
308            "BOOLXOR": binary_from_function(exp.Xor),
309            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
310            "DATE_TRUNC": _date_trunc_to_time,
311            "DATEADD": _build_date_time_add(exp.DateAdd),
312            "DATEDIFF": _build_datediff,
313            "DIV0": _build_if_from_div0,
314            "FLATTEN": exp.Explode.from_arg_list,
315            "GET_PATH": lambda args, dialect: exp.JSONExtract(
316                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
317            ),
318            "IFF": exp.If.from_arg_list,
319            "LAST_DAY": lambda args: exp.LastDay(
320                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
321            ),
322            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
323            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
324            "LISTAGG": exp.GroupConcat.from_arg_list,
325            "MEDIAN": lambda args: exp.PercentileCont(
326                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
327            ),
328            "NULLIFZERO": _build_if_from_nullifzero,
329            "OBJECT_CONSTRUCT": _build_object_construct,
330            "REGEXP_REPLACE": _build_regexp_replace,
331            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
332                this=seq_get(args, 0),
333                expression=seq_get(args, 1),
334                position=seq_get(args, 2),
335                occurrence=seq_get(args, 3),
336                parameters=seq_get(args, 4),
337                group=seq_get(args, 5) or exp.Literal.number(0),
338            ),
339            "RLIKE": exp.RegexpLike.from_arg_list,
340            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
341            "TIMEADD": _build_date_time_add(exp.TimeAdd),
342            "TIMEDIFF": _build_datediff,
343            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
344            "TIMESTAMPDIFF": _build_datediff,
345            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
346            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
347            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
348            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
349            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
350            "TO_NUMBER": lambda args: exp.ToNumber(
351                this=seq_get(args, 0),
352                format=seq_get(args, 1),
353                precision=seq_get(args, 2),
354                scale=seq_get(args, 3),
355            ),
356            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
357            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
358            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
359            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
360            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
361            "TO_VARCHAR": exp.ToChar.from_arg_list,
362            "ZEROIFNULL": _build_if_from_zeroifnull,
363        }
364
365        FUNCTION_PARSERS = {
366            **parser.Parser.FUNCTION_PARSERS,
367            "DATE_PART": lambda self: self._parse_date_part(),
368            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
369        }
370        FUNCTION_PARSERS.pop("TRIM")
371
372        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
373
374        RANGE_PARSERS = {
375            **parser.Parser.RANGE_PARSERS,
376            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
377            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
378        }
379
380        ALTER_PARSERS = {
381            **parser.Parser.ALTER_PARSERS,
382            "UNSET": lambda self: self.expression(
383                exp.Set,
384                tag=self._match_text_seq("TAG"),
385                expressions=self._parse_csv(self._parse_id_var),
386                unset=True,
387            ),
388            "SWAP": lambda self: self._parse_alter_table_swap(),
389        }
390
391        STATEMENT_PARSERS = {
392            **parser.Parser.STATEMENT_PARSERS,
393            TokenType.SHOW: lambda self: self._parse_show(),
394        }
395
396        PROPERTY_PARSERS = {
397            **parser.Parser.PROPERTY_PARSERS,
398            "LOCATION": lambda self: self._parse_location_property(),
399        }
400
401        TYPE_CONVERTERS = {
402            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
403            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
404        }
405
406        SHOW_PARSERS = {
407            "SCHEMAS": _show_parser("SCHEMAS"),
408            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
409            "OBJECTS": _show_parser("OBJECTS"),
410            "TERSE OBJECTS": _show_parser("OBJECTS"),
411            "TABLES": _show_parser("TABLES"),
412            "TERSE TABLES": _show_parser("TABLES"),
413            "VIEWS": _show_parser("VIEWS"),
414            "TERSE VIEWS": _show_parser("VIEWS"),
415            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
416            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
417            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
418            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
419            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
420            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
421            "SEQUENCES": _show_parser("SEQUENCES"),
422            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
423            "COLUMNS": _show_parser("COLUMNS"),
424            "USERS": _show_parser("USERS"),
425            "TERSE USERS": _show_parser("USERS"),
426        }
427
428        CONSTRAINT_PARSERS = {
429            **parser.Parser.CONSTRAINT_PARSERS,
430            "WITH": lambda self: self._parse_with_constraint(),
431            "MASKING": lambda self: self._parse_with_constraint(),
432            "PROJECTION": lambda self: self._parse_with_constraint(),
433            "TAG": lambda self: self._parse_with_constraint(),
434        }
435
436        STAGED_FILE_SINGLE_TOKENS = {
437            TokenType.DOT,
438            TokenType.MOD,
439            TokenType.SLASH,
440        }
441
442        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
443
444        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
445
446        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
447
448        LAMBDAS = {
449            **parser.Parser.LAMBDAS,
450            TokenType.ARROW: lambda self, expressions: self.expression(
451                exp.Lambda,
452                this=self._replace_lambda(
453                    self._parse_assignment(),
454                    expressions,
455                ),
456                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
457            ),
458        }
459
460        def _negate_range(
461            self, this: t.Optional[exp.Expression] = None
462        ) -> t.Optional[exp.Expression]:
463            if not this:
464                return this
465
466            query = this.args.get("query")
467            if isinstance(this, exp.In) and isinstance(query, exp.Query):
468                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
469                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
470                # which can produce different results (most likely a SnowFlake bug).
471                #
472                # https://docs.snowflake.com/en/sql-reference/functions/in
473                # Context: https://github.com/tobymao/sqlglot/issues/3890
474                return self.expression(
475                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
476                )
477
478            return self.expression(exp.Not, this=this)
479
480        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
481            if self._prev.token_type != TokenType.WITH:
482                self._retreat(self._index - 1)
483
484            if self._match_text_seq("MASKING", "POLICY"):
485                policy = self._parse_column()
486                return self.expression(
487                    exp.MaskingPolicyColumnConstraint,
488                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
489                    expressions=self._match(TokenType.USING)
490                    and self._parse_wrapped_csv(self._parse_id_var),
491                )
492            if self._match_text_seq("PROJECTION", "POLICY"):
493                policy = self._parse_column()
494                return self.expression(
495                    exp.ProjectionPolicyColumnConstraint,
496                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
497                )
498            if self._match(TokenType.TAG):
499                return self.expression(
500                    exp.TagColumnConstraint,
501                    expressions=self._parse_wrapped_csv(self._parse_property),
502                )
503
504            return None
505
506        def _parse_create(self) -> exp.Create | exp.Command:
507            expression = super()._parse_create()
508            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
509                # Replace the Table node with the enclosed Identifier
510                expression.this.replace(expression.this.this)
511
512            return expression
513
514        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
515        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
516        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
517            this = self._parse_var() or self._parse_type()
518
519            if not this:
520                return None
521
522            self._match(TokenType.COMMA)
523            expression = self._parse_bitwise()
524            this = map_date_part(this)
525            name = this.name.upper()
526
527            if name.startswith("EPOCH"):
528                if name == "EPOCH_MILLISECOND":
529                    scale = 10**3
530                elif name == "EPOCH_MICROSECOND":
531                    scale = 10**6
532                elif name == "EPOCH_NANOSECOND":
533                    scale = 10**9
534                else:
535                    scale = None
536
537                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
538                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
539
540                if scale:
541                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
542
543                return to_unix
544
545            return self.expression(exp.Extract, this=this, expression=expression)
546
547        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
548            if is_map:
549                # Keys are strings in Snowflake's objects, see also:
550                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
551                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
552                return self._parse_slice(self._parse_string())
553
554            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
555
556        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
557            lateral = super()._parse_lateral()
558            if not lateral:
559                return lateral
560
561            if isinstance(lateral.this, exp.Explode):
562                table_alias = lateral.args.get("alias")
563                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
564                if table_alias and not table_alias.args.get("columns"):
565                    table_alias.set("columns", columns)
566                elif not table_alias:
567                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
568
569            return lateral
570
571        def _parse_table_parts(
572            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
573        ) -> exp.Table:
574            # https://docs.snowflake.com/en/user-guide/querying-stage
575            if self._match(TokenType.STRING, advance=False):
576                table = self._parse_string()
577            elif self._match_text_seq("@", advance=False):
578                table = self._parse_location_path()
579            else:
580                table = None
581
582            if table:
583                file_format = None
584                pattern = None
585
586                wrapped = self._match(TokenType.L_PAREN)
587                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
588                    if self._match_text_seq("FILE_FORMAT", "=>"):
589                        file_format = self._parse_string() or super()._parse_table_parts(
590                            is_db_reference=is_db_reference
591                        )
592                    elif self._match_text_seq("PATTERN", "=>"):
593                        pattern = self._parse_string()
594                    else:
595                        break
596
597                    self._match(TokenType.COMMA)
598
599                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
600            else:
601                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
602
603            return table
604
605        def _parse_id_var(
606            self,
607            any_token: bool = True,
608            tokens: t.Optional[t.Collection[TokenType]] = None,
609        ) -> t.Optional[exp.Expression]:
610            if self._match_text_seq("IDENTIFIER", "("):
611                identifier = (
612                    super()._parse_id_var(any_token=any_token, tokens=tokens)
613                    or self._parse_string()
614                )
615                self._match_r_paren()
616                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
617
618            return super()._parse_id_var(any_token=any_token, tokens=tokens)
619
620        def _parse_show_snowflake(self, this: str) -> exp.Show:
621            scope = None
622            scope_kind = None
623
624            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
625            # which is syntactically valid but has no effect on the output
626            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
627
628            history = self._match_text_seq("HISTORY")
629
630            like = self._parse_string() if self._match(TokenType.LIKE) else None
631
632            if self._match(TokenType.IN):
633                if self._match_text_seq("ACCOUNT"):
634                    scope_kind = "ACCOUNT"
635                elif self._match_set(self.DB_CREATABLES):
636                    scope_kind = self._prev.text.upper()
637                    if self._curr:
638                        scope = self._parse_table_parts()
639                elif self._curr:
640                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
641                    scope = self._parse_table_parts()
642
643            return self.expression(
644                exp.Show,
645                **{
646                    "terse": terse,
647                    "this": this,
648                    "history": history,
649                    "like": like,
650                    "scope": scope,
651                    "scope_kind": scope_kind,
652                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
653                    "limit": self._parse_limit(),
654                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
655                },
656            )
657
658        def _parse_alter_table_swap(self) -> exp.SwapTable:
659            self._match_text_seq("WITH")
660            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
661
662        def _parse_location_property(self) -> exp.LocationProperty:
663            self._match(TokenType.EQ)
664            return self.expression(exp.LocationProperty, this=self._parse_location_path())
665
666        def _parse_file_location(self) -> t.Optional[exp.Expression]:
667            # Parse either a subquery or a staged file
668            return (
669                self._parse_select(table=True, parse_subquery_alias=False)
670                if self._match(TokenType.L_PAREN, advance=False)
671                else self._parse_table_parts()
672            )
673
674        def _parse_location_path(self) -> exp.Var:
675            parts = [self._advance_any(ignore_reserved=True)]
676
677            # We avoid consuming a comma token because external tables like @foo and @bar
678            # can be joined in a query with a comma separator, as well as closing paren
679            # in case of subqueries
680            while self._is_connected() and not self._match_set(
681                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
682            ):
683                parts.append(self._advance_any(ignore_reserved=True))
684
685            return exp.var("".join(part.text for part in parts if part))
686
687        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
688            this = super()._parse_lambda_arg()
689
690            if not this:
691                return this
692
693            typ = self._parse_types()
694
695            if typ:
696                return self.expression(exp.Cast, this=this, to=typ)
697
698            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE32: 'DATE32'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ASC: 'ASC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VAR: 'VAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.CUBE: 'CUBE'>, <TokenType.IS: 'IS'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.CACHE: 'CACHE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INDEX: 'INDEX'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT128: 'INT128'>, <TokenType.FULL: 'FULL'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.NESTED: 'NESTED'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INET: 'INET'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.KILL: 'KILL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TOP: 'TOP'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USE: 'USE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ALL: 'ALL'>, <TokenType.CASE: 'CASE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SUPER: 'SUPER'>, <TokenType.SHOW: 'SHOW'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.SEMI: 'SEMI'>, <TokenType.APPLY: 'APPLY'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.ROW: 'ROW'>, <TokenType.UUID: 'UUID'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.INT256: 'INT256'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.LEFT: 'LEFT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TAG: 'TAG'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.SET: 'SET'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SOME: 'SOME'>, <TokenType.JSON: 'JSON'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NULL: 'NULL'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ANY: 'ANY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.LOAD: 'LOAD'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.XML: 'XML'>, <TokenType.MAP: 'MAP'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ANTI: 'ANTI'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DATE: 'DATE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.INT: 'INT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.COPY: 'COPY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.FIRST: 'FIRST'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TIME: 'TIME'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.FINAL: 'FINAL'>}
TABLE_ALIAS_TOKENS = {<TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATE32: 'DATE32'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ASC: 'ASC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VAR: 'VAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.CUBE: 'CUBE'>, <TokenType.IS: 'IS'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.CACHE: 'CACHE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.BIT: 'BIT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INDEX: 'INDEX'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.INT128: 'INT128'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.NESTED: 'NESTED'>, <TokenType.IPV4: 'IPV4'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INET: 'INET'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.KILL: 'KILL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TOP: 'TOP'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USE: 'USE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ALL: 'ALL'>, <TokenType.CASE: 'CASE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SUPER: 'SUPER'>, <TokenType.SHOW: 'SHOW'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TABLE: 'TABLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.ROW: 'ROW'>, <TokenType.UUID: 'UUID'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.INT256: 'INT256'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TAG: 'TAG'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.SET: 'SET'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.SOME: 'SOME'>, <TokenType.JSON: 'JSON'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NULL: 'NULL'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.ANY: 'ANY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.LOAD: 'LOAD'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.XML: 'XML'>, <TokenType.MAP: 'MAP'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ANTI: 'ANTI'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DATE: 'DATE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.INT: 'INT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.COPY: 'COPY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.FIRST: 'FIRST'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.TIME: 'TIME'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.FINAL: 'FINAL'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UNPACK_COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnpackColumns'>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function Snowflake.Parser.<lambda>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'VIEWS', 'UNIQUE KEYS', 'OBJECTS', 'TABLES', 'SEQUENCES', 'IMPORTED KEYS'}
NON_TABLE_CREATABLES = {'WAREHOUSE', 'STREAMLIT', 'TAG', 'STORAGE INTEGRATION'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
700    class Tokenizer(tokens.Tokenizer):
701        STRING_ESCAPES = ["\\", "'"]
702        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
703        RAW_STRINGS = ["$$"]
704        COMMENTS = ["--", "//", ("/*", "*/")]
705        NESTED_COMMENTS = False
706
707        KEYWORDS = {
708            **tokens.Tokenizer.KEYWORDS,
709            "BYTEINT": TokenType.INT,
710            "CHAR VARYING": TokenType.VARCHAR,
711            "CHARACTER VARYING": TokenType.VARCHAR,
712            "EXCLUDE": TokenType.EXCEPT,
713            "ILIKE ANY": TokenType.ILIKE_ANY,
714            "LIKE ANY": TokenType.LIKE_ANY,
715            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
716            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
717            "MINUS": TokenType.EXCEPT,
718            "NCHAR VARYING": TokenType.VARCHAR,
719            "PUT": TokenType.COMMAND,
720            "REMOVE": TokenType.COMMAND,
721            "RM": TokenType.COMMAND,
722            "SAMPLE": TokenType.TABLE_SAMPLE,
723            "SQL_DOUBLE": TokenType.DOUBLE,
724            "SQL_VARCHAR": TokenType.VARCHAR,
725            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
726            "TAG": TokenType.TAG,
727            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
728            "TOP": TokenType.TOP,
729            "WAREHOUSE": TokenType.WAREHOUSE,
730            "STREAMLIT": TokenType.STREAMLIT,
731        }
732        KEYWORDS.pop("/*+")
733
734        SINGLE_TOKENS = {
735            **tokens.Tokenizer.SINGLE_TOKENS,
736            "$": TokenType.PARAMETER,
737        }
738
739        VAR_SINGLE_TOKENS = {"$"}
740
741        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 743    class Generator(generator.Generator):
 744        PARAMETER_TOKEN = "$"
 745        MATCHED_BY_SOURCE = False
 746        SINGLE_STRING_INTERVAL = True
 747        JOIN_HINTS = False
 748        TABLE_HINTS = False
 749        QUERY_HINTS = False
 750        AGGREGATE_FILTER_SUPPORTED = False
 751        SUPPORTS_TABLE_COPY = False
 752        COLLATE_IS_FUNC = True
 753        LIMIT_ONLY_LITERALS = True
 754        JSON_KEY_VALUE_PAIR_SEP = ","
 755        INSERT_OVERWRITE = " OVERWRITE INTO"
 756        STRUCT_DELIMITER = ("(", ")")
 757        COPY_PARAMS_ARE_WRAPPED = False
 758        COPY_PARAMS_EQ_REQUIRED = True
 759        STAR_EXCEPT = "EXCLUDE"
 760        SUPPORTS_EXPLODING_PROJECTIONS = False
 761        ARRAY_CONCAT_IS_VAR_LEN = False
 762        SUPPORTS_CONVERT_TIMEZONE = True
 763        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 764
 765        TRANSFORMS = {
 766            **generator.Generator.TRANSFORMS,
 767            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 768            exp.ArgMax: rename_func("MAX_BY"),
 769            exp.ArgMin: rename_func("MIN_BY"),
 770            exp.Array: inline_array_sql,
 771            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 772            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 773            exp.AtTimeZone: lambda self, e: self.func(
 774                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 775            ),
 776            exp.BitwiseXor: rename_func("BITXOR"),
 777            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 778            exp.DateAdd: date_delta_sql("DATEADD"),
 779            exp.DateDiff: date_delta_sql("DATEDIFF"),
 780            exp.DateStrToDate: datestrtodate_sql,
 781            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 782            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 783            exp.DayOfYear: rename_func("DAYOFYEAR"),
 784            exp.Explode: rename_func("FLATTEN"),
 785            exp.Extract: rename_func("DATE_PART"),
 786            exp.FromTimeZone: lambda self, e: self.func(
 787                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 788            ),
 789            exp.GenerateSeries: lambda self, e: self.func(
 790                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 791            ),
 792            exp.GroupConcat: rename_func("LISTAGG"),
 793            exp.If: if_sql(name="IFF", false_value="NULL"),
 794            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 795            exp.JSONExtractScalar: lambda self, e: self.func(
 796                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 797            ),
 798            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 799            exp.JSONPathRoot: lambda *_: "",
 800            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 801            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 802            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 803            exp.Max: max_or_greatest,
 804            exp.Min: min_or_least,
 805            exp.ParseJSON: lambda self, e: self.func(
 806                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 807            ),
 808            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 809            exp.PercentileCont: transforms.preprocess(
 810                [transforms.add_within_group_for_percentiles]
 811            ),
 812            exp.PercentileDisc: transforms.preprocess(
 813                [transforms.add_within_group_for_percentiles]
 814            ),
 815            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 816            exp.RegexpILike: _regexpilike_sql,
 817            exp.Rand: rename_func("RANDOM"),
 818            exp.Select: transforms.preprocess(
 819                [
 820                    transforms.eliminate_distinct_on,
 821                    transforms.explode_to_unnest(),
 822                    transforms.eliminate_semi_and_anti_joins,
 823                    _unnest_generate_date_array,
 824                ]
 825            ),
 826            exp.SHA: rename_func("SHA1"),
 827            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 828            exp.StartsWith: rename_func("STARTSWITH"),
 829            exp.StrPosition: lambda self, e: self.func(
 830                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 831            ),
 832            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 833            exp.Stuff: rename_func("INSERT"),
 834            exp.TimeAdd: date_delta_sql("TIMEADD"),
 835            exp.TimestampDiff: lambda self, e: self.func(
 836                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 837            ),
 838            exp.TimestampTrunc: timestamptrunc_sql(),
 839            exp.TimeStrToTime: timestrtotime_sql,
 840            exp.TimeToStr: lambda self, e: self.func(
 841                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 842            ),
 843            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 844            exp.ToArray: rename_func("TO_ARRAY"),
 845            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 846            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 847            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 848            exp.TsOrDsToDate: lambda self, e: self.func(
 849                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 850            ),
 851            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 852            exp.Uuid: rename_func("UUID_STRING"),
 853            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 854            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 855            exp.Xor: rename_func("BOOLXOR"),
 856        }
 857
 858        SUPPORTED_JSON_PATH_PARTS = {
 859            exp.JSONPathKey,
 860            exp.JSONPathRoot,
 861            exp.JSONPathSubscript,
 862        }
 863
 864        TYPE_MAPPING = {
 865            **generator.Generator.TYPE_MAPPING,
 866            exp.DataType.Type.NESTED: "OBJECT",
 867            exp.DataType.Type.STRUCT: "OBJECT",
 868        }
 869
 870        PROPERTIES_LOCATION = {
 871            **generator.Generator.PROPERTIES_LOCATION,
 872            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 873            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 874        }
 875
 876        UNSUPPORTED_VALUES_EXPRESSIONS = {
 877            exp.Map,
 878            exp.StarMap,
 879            exp.Struct,
 880            exp.VarMap,
 881        }
 882
 883        def with_properties(self, properties: exp.Properties) -> str:
 884            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 885
 886        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 887            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 888                values_as_table = False
 889
 890            return super().values_sql(expression, values_as_table=values_as_table)
 891
 892        def datatype_sql(self, expression: exp.DataType) -> str:
 893            expressions = expression.expressions
 894            if (
 895                expressions
 896                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 897                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 898            ):
 899                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 900                return "OBJECT"
 901
 902            return super().datatype_sql(expression)
 903
 904        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 905            return self.func(
 906                "TO_NUMBER",
 907                expression.this,
 908                expression.args.get("format"),
 909                expression.args.get("precision"),
 910                expression.args.get("scale"),
 911            )
 912
 913        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 914            milli = expression.args.get("milli")
 915            if milli is not None:
 916                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 917                expression.set("nano", milli_to_nano)
 918
 919            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 920
 921        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 922            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 923                return self.func("TO_GEOGRAPHY", expression.this)
 924            if expression.is_type(exp.DataType.Type.GEOMETRY):
 925                return self.func("TO_GEOMETRY", expression.this)
 926
 927            return super().cast_sql(expression, safe_prefix=safe_prefix)
 928
 929        def trycast_sql(self, expression: exp.TryCast) -> str:
 930            value = expression.this
 931
 932            if value.type is None:
 933                from sqlglot.optimizer.annotate_types import annotate_types
 934
 935                value = annotate_types(value)
 936
 937            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 938                return super().trycast_sql(expression)
 939
 940            # TRY_CAST only works for string values in Snowflake
 941            return self.cast_sql(expression)
 942
 943        def log_sql(self, expression: exp.Log) -> str:
 944            if not expression.expression:
 945                return self.func("LN", expression.this)
 946
 947            return super().log_sql(expression)
 948
 949        def unnest_sql(self, expression: exp.Unnest) -> str:
 950            unnest_alias = expression.args.get("alias")
 951            offset = expression.args.get("offset")
 952
 953            columns = [
 954                exp.to_identifier("seq"),
 955                exp.to_identifier("key"),
 956                exp.to_identifier("path"),
 957                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 958                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 959                or exp.to_identifier("value"),
 960                exp.to_identifier("this"),
 961            ]
 962
 963            if unnest_alias:
 964                unnest_alias.set("columns", columns)
 965            else:
 966                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 967
 968            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 969            alias = self.sql(unnest_alias)
 970            alias = f" AS {alias}" if alias else ""
 971            return f"{explode}{alias}"
 972
 973        def show_sql(self, expression: exp.Show) -> str:
 974            terse = "TERSE " if expression.args.get("terse") else ""
 975            history = " HISTORY" if expression.args.get("history") else ""
 976            like = self.sql(expression, "like")
 977            like = f" LIKE {like}" if like else ""
 978
 979            scope = self.sql(expression, "scope")
 980            scope = f" {scope}" if scope else ""
 981
 982            scope_kind = self.sql(expression, "scope_kind")
 983            if scope_kind:
 984                scope_kind = f" IN {scope_kind}"
 985
 986            starts_with = self.sql(expression, "starts_with")
 987            if starts_with:
 988                starts_with = f" STARTS WITH {starts_with}"
 989
 990            limit = self.sql(expression, "limit")
 991
 992            from_ = self.sql(expression, "from")
 993            if from_:
 994                from_ = f" FROM {from_}"
 995
 996            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 997
 998        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 999            # Other dialects don't support all of the following parameters, so we need to
1000            # generate default values as necessary to ensure the transpilation is correct
1001            group = expression.args.get("group")
1002
1003            # To avoid generating all these default values, we set group to None if
1004            # it's 0 (also default value) which doesn't trigger the following chain
1005            if group and group.name == "0":
1006                group = None
1007
1008            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1009            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1010            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1011
1012            return self.func(
1013                "REGEXP_SUBSTR",
1014                expression.this,
1015                expression.expression,
1016                position,
1017                occurrence,
1018                parameters,
1019                group,
1020            )
1021
1022        def describe_sql(self, expression: exp.Describe) -> str:
1023            # Default to table if kind is unknown
1024            kind_value = expression.args.get("kind") or "TABLE"
1025            kind = f" {kind_value}" if kind_value else ""
1026            this = f" {self.sql(expression, 'this')}"
1027            expressions = self.expressions(expression, flat=True)
1028            expressions = f" {expressions}" if expressions else ""
1029            return f"DESCRIBE{kind}{this}{expressions}"
1030
1031        def generatedasidentitycolumnconstraint_sql(
1032            self, expression: exp.GeneratedAsIdentityColumnConstraint
1033        ) -> str:
1034            start = expression.args.get("start")
1035            start = f" START {start}" if start else ""
1036            increment = expression.args.get("increment")
1037            increment = f" INCREMENT {increment}" if increment else ""
1038            return f"AUTOINCREMENT{start}{increment}"
1039
1040        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1041            this = self.sql(expression, "this")
1042            return f"SWAP WITH {this}"
1043
1044        def cluster_sql(self, expression: exp.Cluster) -> str:
1045            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1046
1047        def struct_sql(self, expression: exp.Struct) -> str:
1048            keys = []
1049            values = []
1050
1051            for i, e in enumerate(expression.expressions):
1052                if isinstance(e, exp.PropertyEQ):
1053                    keys.append(
1054                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1055                    )
1056                    values.append(e.expression)
1057                else:
1058                    keys.append(exp.Literal.string(f"_{i}"))
1059                    values.append(e)
1060
1061            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1062
1063        @generator.unsupported_args("weight", "accuracy")
1064        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1065            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1066
1067        def alterset_sql(self, expression: exp.AlterSet) -> str:
1068            exprs = self.expressions(expression, flat=True)
1069            exprs = f" {exprs}" if exprs else ""
1070            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1071            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1072            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1073            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1074            tag = self.expressions(expression, key="tag", flat=True)
1075            tag = f" TAG {tag}" if tag else ""
1076
1077            return f"SET{exprs}{file_format}{copy_options}{tag}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnpackColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
883        def with_properties(self, properties: exp.Properties) -> str:
884            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
886        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
887            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
888                values_as_table = False
889
890            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
892        def datatype_sql(self, expression: exp.DataType) -> str:
893            expressions = expression.expressions
894            if (
895                expressions
896                and expression.is_type(*exp.DataType.STRUCT_TYPES)
897                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
898            ):
899                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
900                return "OBJECT"
901
902            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
904        def tonumber_sql(self, expression: exp.ToNumber) -> str:
905            return self.func(
906                "TO_NUMBER",
907                expression.this,
908                expression.args.get("format"),
909                expression.args.get("precision"),
910                expression.args.get("scale"),
911            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
913        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
914            milli = expression.args.get("milli")
915            if milli is not None:
916                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
917                expression.set("nano", milli_to_nano)
918
919            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
921        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
922            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
923                return self.func("TO_GEOGRAPHY", expression.this)
924            if expression.is_type(exp.DataType.Type.GEOMETRY):
925                return self.func("TO_GEOMETRY", expression.this)
926
927            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
929        def trycast_sql(self, expression: exp.TryCast) -> str:
930            value = expression.this
931
932            if value.type is None:
933                from sqlglot.optimizer.annotate_types import annotate_types
934
935                value = annotate_types(value)
936
937            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
938                return super().trycast_sql(expression)
939
940            # TRY_CAST only works for string values in Snowflake
941            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
943        def log_sql(self, expression: exp.Log) -> str:
944            if not expression.expression:
945                return self.func("LN", expression.this)
946
947            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
949        def unnest_sql(self, expression: exp.Unnest) -> str:
950            unnest_alias = expression.args.get("alias")
951            offset = expression.args.get("offset")
952
953            columns = [
954                exp.to_identifier("seq"),
955                exp.to_identifier("key"),
956                exp.to_identifier("path"),
957                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
958                seq_get(unnest_alias.columns if unnest_alias else [], 0)
959                or exp.to_identifier("value"),
960                exp.to_identifier("this"),
961            ]
962
963            if unnest_alias:
964                unnest_alias.set("columns", columns)
965            else:
966                unnest_alias = exp.TableAlias(this="_u", columns=columns)
967
968            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
969            alias = self.sql(unnest_alias)
970            alias = f" AS {alias}" if alias else ""
971            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
973        def show_sql(self, expression: exp.Show) -> str:
974            terse = "TERSE " if expression.args.get("terse") else ""
975            history = " HISTORY" if expression.args.get("history") else ""
976            like = self.sql(expression, "like")
977            like = f" LIKE {like}" if like else ""
978
979            scope = self.sql(expression, "scope")
980            scope = f" {scope}" if scope else ""
981
982            scope_kind = self.sql(expression, "scope_kind")
983            if scope_kind:
984                scope_kind = f" IN {scope_kind}"
985
986            starts_with = self.sql(expression, "starts_with")
987            if starts_with:
988                starts_with = f" STARTS WITH {starts_with}"
989
990            limit = self.sql(expression, "limit")
991
992            from_ = self.sql(expression, "from")
993            if from_:
994                from_ = f" FROM {from_}"
995
996            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
 998        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 999            # Other dialects don't support all of the following parameters, so we need to
1000            # generate default values as necessary to ensure the transpilation is correct
1001            group = expression.args.get("group")
1002
1003            # To avoid generating all these default values, we set group to None if
1004            # it's 0 (also default value) which doesn't trigger the following chain
1005            if group and group.name == "0":
1006                group = None
1007
1008            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1009            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1010            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1011
1012            return self.func(
1013                "REGEXP_SUBSTR",
1014                expression.this,
1015                expression.expression,
1016                position,
1017                occurrence,
1018                parameters,
1019                group,
1020            )
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1022        def describe_sql(self, expression: exp.Describe) -> str:
1023            # Default to table if kind is unknown
1024            kind_value = expression.args.get("kind") or "TABLE"
1025            kind = f" {kind_value}" if kind_value else ""
1026            this = f" {self.sql(expression, 'this')}"
1027            expressions = self.expressions(expression, flat=True)
1028            expressions = f" {expressions}" if expressions else ""
1029            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1031        def generatedasidentitycolumnconstraint_sql(
1032            self, expression: exp.GeneratedAsIdentityColumnConstraint
1033        ) -> str:
1034            start = expression.args.get("start")
1035            start = f" START {start}" if start else ""
1036            increment = expression.args.get("increment")
1037            increment = f" INCREMENT {increment}" if increment else ""
1038            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
1040        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1041            this = self.sql(expression, "this")
1042            return f"SWAP WITH {this}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1044        def cluster_sql(self, expression: exp.Cluster) -> str:
1045            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1047        def struct_sql(self, expression: exp.Struct) -> str:
1048            keys = []
1049            values = []
1050
1051            for i, e in enumerate(expression.expressions):
1052                if isinstance(e, exp.PropertyEQ):
1053                    keys.append(
1054                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1055                    )
1056                    values.append(e.expression)
1057                else:
1058                    keys.append(exp.Literal.string(f"_{i}"))
1059                    values.append(e)
1060
1061            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@generator.unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1063        @generator.unsupported_args("weight", "accuracy")
1064        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1065            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1067        def alterset_sql(self, expression: exp.AlterSet) -> str:
1068            exprs = self.expressions(expression, flat=True)
1069            exprs = f" {exprs}" if exprs else ""
1070            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1071            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1072            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1073            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1074            tag = self.expressions(expression, key="tag", flat=True)
1075            tag = f" TAG {tag}" if tag else ""
1076
1077            return f"SET{exprs}{file_format}{copy_options}{tag}"
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
renametable_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql