Edit on GitHub

sqlglot.dialects.snowflake

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot._typing import E
  7from sqlglot.dialects.dialect import (
  8    Dialect,
  9    NormalizationStrategy,
 10    binary_from_function,
 11    date_delta_sql,
 12    date_trunc_to_time,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    if_sql,
 16    inline_array_sql,
 17    json_keyvalue_comma_sql,
 18    max_or_greatest,
 19    min_or_least,
 20    rename_func,
 21    timestamptrunc_sql,
 22    timestrtotime_sql,
 23    var_map_sql,
 24)
 25from sqlglot.expressions import Literal
 26from sqlglot.helper import seq_get
 27from sqlglot.tokens import TokenType
 28
 29
 30def _check_int(s: str) -> bool:
 31    if s[0] in ("-", "+"):
 32        return s[1:].isdigit()
 33    return s.isdigit()
 34
 35
 36# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
 37def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
 38    if len(args) == 2:
 39        first_arg, second_arg = args
 40        if second_arg.is_string:
 41            # case: <string_expr> [ , <format> ]
 42            return format_time_lambda(exp.StrToTime, "snowflake")(args)
 43
 44        # case: <numeric_expr> [ , <scale> ]
 45        if second_arg.name not in ["0", "3", "9"]:
 46            raise ValueError(
 47                f"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9"
 48            )
 49
 50        if second_arg.name == "0":
 51            timescale = exp.UnixToTime.SECONDS
 52        elif second_arg.name == "3":
 53            timescale = exp.UnixToTime.MILLIS
 54        elif second_arg.name == "9":
 55            timescale = exp.UnixToTime.NANOS
 56
 57        return exp.UnixToTime(this=first_arg, scale=timescale)
 58
 59    from sqlglot.optimizer.simplify import simplify_literals
 60
 61    # The first argument might be an expression like 40 * 365 * 86400, so we try to
 62    # reduce it using `simplify_literals` first and then check if it's a Literal.
 63    first_arg = seq_get(args, 0)
 64    if not isinstance(simplify_literals(first_arg, root=True), Literal):
 65        # case: <variant_expr> or other expressions such as columns
 66        return exp.TimeStrToTime.from_arg_list(args)
 67
 68    if first_arg.is_string:
 69        if _check_int(first_arg.this):
 70            # case: <integer>
 71            return exp.UnixToTime.from_arg_list(args)
 72
 73        # case: <date_expr>
 74        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 75
 76    # case: <numeric_expr>
 77    return exp.UnixToTime.from_arg_list(args)
 78
 79
 80def _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
 81    expression = parser.parse_var_map(args)
 82
 83    if isinstance(expression, exp.StarMap):
 84        return expression
 85
 86    return exp.Struct(
 87        expressions=[
 88            t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)
 89        ]
 90    )
 91
 92
 93def _parse_datediff(args: t.List) -> exp.DateDiff:
 94    return exp.DateDiff(
 95        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
 96    )
 97
 98
 99def _unix_to_time_sql(self: Snowflake.Generator, expression: exp.UnixToTime) -> str:
100    scale = expression.args.get("scale")
101    timestamp = self.sql(expression, "this")
102    if scale in (None, exp.UnixToTime.SECONDS):
103        return f"TO_TIMESTAMP({timestamp})"
104    if scale == exp.UnixToTime.MILLIS:
105        return f"TO_TIMESTAMP({timestamp}, 3)"
106    if scale == exp.UnixToTime.MICROS:
107        return f"TO_TIMESTAMP({timestamp} / 1000, 3)"
108    if scale == exp.UnixToTime.NANOS:
109        return f"TO_TIMESTAMP({timestamp}, 9)"
110
111    self.unsupported(f"Unsupported scale for timestamp: {scale}.")
112    return ""
113
114
115# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
116# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
117def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
118    this = self._parse_var() or self._parse_type()
119
120    if not this:
121        return None
122
123    self._match(TokenType.COMMA)
124    expression = self._parse_bitwise()
125    this = _map_date_part(this)
126    name = this.name.upper()
127
128    if name.startswith("EPOCH"):
129        if name == "EPOCH_MILLISECOND":
130            scale = 10**3
131        elif name == "EPOCH_MICROSECOND":
132            scale = 10**6
133        elif name == "EPOCH_NANOSECOND":
134            scale = 10**9
135        else:
136            scale = None
137
138        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
139        to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
140
141        if scale:
142            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
143
144        return to_unix
145
146    return self.expression(exp.Extract, this=this, expression=expression)
147
148
149# https://docs.snowflake.com/en/sql-reference/functions/div0
150def _div0_to_if(args: t.List) -> exp.If:
151    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
152    true = exp.Literal.number(0)
153    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
154    return exp.If(this=cond, true=true, false=false)
155
156
157# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
158def _zeroifnull_to_if(args: t.List) -> exp.If:
159    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
160    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
161
162
163# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
164def _nullifzero_to_if(args: t.List) -> exp.If:
165    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
166    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
167
168
169def _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:
170    if expression.is_type("array"):
171        return "ARRAY"
172    elif expression.is_type("map"):
173        return "OBJECT"
174    return self.datatype_sql(expression)
175
176
177def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
178    flag = expression.text("flag")
179
180    if "i" not in flag:
181        flag += "i"
182
183    return self.func(
184        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
185    )
186
187
188def _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
189    if len(args) == 3:
190        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
191    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
192
193
194def _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:
195    regexp_replace = exp.RegexpReplace.from_arg_list(args)
196
197    if not regexp_replace.args.get("replacement"):
198        regexp_replace.set("replacement", exp.Literal.string(""))
199
200    return regexp_replace
201
202
203def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
204    def _parse(self: Snowflake.Parser) -> exp.Show:
205        return self._parse_show_snowflake(*args, **kwargs)
206
207    return _parse
208
209
210DATE_PART_MAPPING = {
211    "Y": "YEAR",
212    "YY": "YEAR",
213    "YYY": "YEAR",
214    "YYYY": "YEAR",
215    "YR": "YEAR",
216    "YEARS": "YEAR",
217    "YRS": "YEAR",
218    "MM": "MONTH",
219    "MON": "MONTH",
220    "MONS": "MONTH",
221    "MONTHS": "MONTH",
222    "D": "DAY",
223    "DD": "DAY",
224    "DAYS": "DAY",
225    "DAYOFMONTH": "DAY",
226    "WEEKDAY": "DAYOFWEEK",
227    "DOW": "DAYOFWEEK",
228    "DW": "DAYOFWEEK",
229    "WEEKDAY_ISO": "DAYOFWEEKISO",
230    "DOW_ISO": "DAYOFWEEKISO",
231    "DW_ISO": "DAYOFWEEKISO",
232    "YEARDAY": "DAYOFYEAR",
233    "DOY": "DAYOFYEAR",
234    "DY": "DAYOFYEAR",
235    "W": "WEEK",
236    "WK": "WEEK",
237    "WEEKOFYEAR": "WEEK",
238    "WOY": "WEEK",
239    "WY": "WEEK",
240    "WEEK_ISO": "WEEKISO",
241    "WEEKOFYEARISO": "WEEKISO",
242    "WEEKOFYEAR_ISO": "WEEKISO",
243    "Q": "QUARTER",
244    "QTR": "QUARTER",
245    "QTRS": "QUARTER",
246    "QUARTERS": "QUARTER",
247    "H": "HOUR",
248    "HH": "HOUR",
249    "HR": "HOUR",
250    "HOURS": "HOUR",
251    "HRS": "HOUR",
252    "M": "MINUTE",
253    "MI": "MINUTE",
254    "MIN": "MINUTE",
255    "MINUTES": "MINUTE",
256    "MINS": "MINUTE",
257    "S": "SECOND",
258    "SEC": "SECOND",
259    "SECONDS": "SECOND",
260    "SECS": "SECOND",
261    "MS": "MILLISECOND",
262    "MSEC": "MILLISECOND",
263    "MILLISECONDS": "MILLISECOND",
264    "US": "MICROSECOND",
265    "USEC": "MICROSECOND",
266    "MICROSECONDS": "MICROSECOND",
267    "NS": "NANOSECOND",
268    "NSEC": "NANOSECOND",
269    "NANOSEC": "NANOSECOND",
270    "NSECOND": "NANOSECOND",
271    "NSECONDS": "NANOSECOND",
272    "NANOSECS": "NANOSECOND",
273    "NSECONDS": "NANOSECOND",
274    "EPOCH": "EPOCH_SECOND",
275    "EPOCH_SECONDS": "EPOCH_SECOND",
276    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
277    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
278    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
279    "TZH": "TIMEZONE_HOUR",
280    "TZM": "TIMEZONE_MINUTE",
281}
282
283
284@t.overload
285def _map_date_part(part: exp.Expression) -> exp.Var:
286    pass
287
288
289@t.overload
290def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
291    pass
292
293
294def _map_date_part(part):
295    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
296    return exp.var(mapped) if mapped else part
297
298
299def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
300    trunc = date_trunc_to_time(args)
301    trunc.set("unit", _map_date_part(trunc.args["unit"]))
302    return trunc
303
304
305def _parse_colon_get_path(
306    self: parser.Parser, this: t.Optional[exp.Expression]
307) -> t.Optional[exp.Expression]:
308    while True:
309        path = self._parse_bitwise()
310
311        # The cast :: operator has a lower precedence than the extraction operator :, so
312        # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
313        if isinstance(path, exp.Cast):
314            target_type = path.to
315            path = path.this
316        else:
317            target_type = None
318
319        if isinstance(path, exp.Expression):
320            path = exp.Literal.string(path.sql(dialect="snowflake"))
321
322        # The extraction operator : is left-associative
323        this = self.expression(exp.GetPath, this=this, expression=path)
324
325        if target_type:
326            this = exp.cast(this, target_type)
327
328        if not self._match(TokenType.COLON):
329            break
330
331    return this
332
333
334def _parse_timestamp_from_parts(args: t.List) -> exp.Func:
335    if len(args) == 2:
336        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
337        # so we parse this into Anonymous for now instead of introducing complexity
338        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
339
340    return exp.TimestampFromParts.from_arg_list(args)
341
342
343def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
344    """
345    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
346    so we need to unqualify them.
347
348    Example:
349        >>> from sqlglot import parse_one
350        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
351        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
352        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
353    """
354    if isinstance(expression, exp.Pivot) and expression.unpivot:
355        expression = transforms.unqualify_columns(expression)
356
357    return expression
358
359
360class Snowflake(Dialect):
361    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
362    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
363    NULL_ORDERING = "nulls_are_large"
364    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
365    SUPPORTS_USER_DEFINED_TYPES = False
366    SUPPORTS_SEMI_ANTI_JOIN = False
367    PREFER_CTE_ALIAS_COLUMN = True
368    TABLESAMPLE_SIZE_IS_PERCENT = True
369
370    TIME_MAPPING = {
371        "YYYY": "%Y",
372        "yyyy": "%Y",
373        "YY": "%y",
374        "yy": "%y",
375        "MMMM": "%B",
376        "mmmm": "%B",
377        "MON": "%b",
378        "mon": "%b",
379        "MM": "%m",
380        "mm": "%m",
381        "DD": "%d",
382        "dd": "%-d",
383        "DY": "%a",
384        "dy": "%w",
385        "HH24": "%H",
386        "hh24": "%H",
387        "HH12": "%I",
388        "hh12": "%I",
389        "MI": "%M",
390        "mi": "%M",
391        "SS": "%S",
392        "ss": "%S",
393        "FF": "%f",
394        "ff": "%f",
395        "FF6": "%f",
396        "ff6": "%f",
397    }
398
399    def quote_identifier(self, expression: E, identify: bool = True) -> E:
400        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
401        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
402        if (
403            isinstance(expression, exp.Identifier)
404            and isinstance(expression.parent, exp.Table)
405            and expression.name.lower() == "dual"
406        ):
407            return t.cast(E, expression)
408
409        return super().quote_identifier(expression, identify=identify)
410
411    class Parser(parser.Parser):
412        IDENTIFY_PIVOT_STRINGS = True
413
414        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
415
416        FUNCTIONS = {
417            **parser.Parser.FUNCTIONS,
418            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
419            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
420            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
421                this=seq_get(args, 1), expression=seq_get(args, 0)
422            ),
423            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
424                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
425                start=seq_get(args, 0),
426                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
427                step=seq_get(args, 2),
428            ),
429            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
430            "BITXOR": binary_from_function(exp.BitwiseXor),
431            "BIT_XOR": binary_from_function(exp.BitwiseXor),
432            "BOOLXOR": binary_from_function(exp.Xor),
433            "CONVERT_TIMEZONE": _parse_convert_timezone,
434            "DATE_TRUNC": _date_trunc_to_time,
435            "DATEADD": lambda args: exp.DateAdd(
436                this=seq_get(args, 2),
437                expression=seq_get(args, 1),
438                unit=_map_date_part(seq_get(args, 0)),
439            ),
440            "DATEDIFF": _parse_datediff,
441            "DIV0": _div0_to_if,
442            "FLATTEN": exp.Explode.from_arg_list,
443            "IFF": exp.If.from_arg_list,
444            "LAST_DAY": lambda args: exp.LastDay(
445                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
446            ),
447            "LISTAGG": exp.GroupConcat.from_arg_list,
448            "NULLIFZERO": _nullifzero_to_if,
449            "OBJECT_CONSTRUCT": _parse_object_construct,
450            "REGEXP_REPLACE": _parse_regexp_replace,
451            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
452            "RLIKE": exp.RegexpLike.from_arg_list,
453            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
454            "TIMEDIFF": _parse_datediff,
455            "TIMESTAMPDIFF": _parse_datediff,
456            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
457            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
458            "TO_TIMESTAMP": _parse_to_timestamp,
459            "TO_VARCHAR": exp.ToChar.from_arg_list,
460            "ZEROIFNULL": _zeroifnull_to_if,
461        }
462
463        FUNCTION_PARSERS = {
464            **parser.Parser.FUNCTION_PARSERS,
465            "DATE_PART": _parse_date_part,
466            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
467        }
468        FUNCTION_PARSERS.pop("TRIM")
469
470        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
471
472        RANGE_PARSERS = {
473            **parser.Parser.RANGE_PARSERS,
474            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
475            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
476            TokenType.COLON: _parse_colon_get_path,
477        }
478
479        ALTER_PARSERS = {
480            **parser.Parser.ALTER_PARSERS,
481            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
482            "UNSET": lambda self: self.expression(
483                exp.Set,
484                tag=self._match_text_seq("TAG"),
485                expressions=self._parse_csv(self._parse_id_var),
486                unset=True,
487            ),
488            "SWAP": lambda self: self._parse_alter_table_swap(),
489        }
490
491        STATEMENT_PARSERS = {
492            **parser.Parser.STATEMENT_PARSERS,
493            TokenType.SHOW: lambda self: self._parse_show(),
494        }
495
496        PROPERTY_PARSERS = {
497            **parser.Parser.PROPERTY_PARSERS,
498            "LOCATION": lambda self: self._parse_location(),
499        }
500
501        SHOW_PARSERS = {
502            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
503            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
504            "COLUMNS": _show_parser("COLUMNS"),
505        }
506
507        STAGED_FILE_SINGLE_TOKENS = {
508            TokenType.DOT,
509            TokenType.MOD,
510            TokenType.SLASH,
511        }
512
513        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
514
515        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
516            if is_map:
517                # Keys are strings in Snowflake's objects, see also:
518                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
519                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
520                return self._parse_slice(self._parse_string())
521
522            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
523
524        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
525            lateral = super()._parse_lateral()
526            if not lateral:
527                return lateral
528
529            if isinstance(lateral.this, exp.Explode):
530                table_alias = lateral.args.get("alias")
531                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
532                if table_alias and not table_alias.args.get("columns"):
533                    table_alias.set("columns", columns)
534                elif not table_alias:
535                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
536
537            return lateral
538
539        def _parse_at_before(self, table: exp.Table) -> exp.Table:
540            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
541            index = self._index
542            if self._match_texts(("AT", "BEFORE")):
543                this = self._prev.text.upper()
544                kind = (
545                    self._match(TokenType.L_PAREN)
546                    and self._match_texts(self.HISTORICAL_DATA_KIND)
547                    and self._prev.text.upper()
548                )
549                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
550
551                if expression:
552                    self._match_r_paren()
553                    when = self.expression(
554                        exp.HistoricalData, this=this, kind=kind, expression=expression
555                    )
556                    table.set("when", when)
557                else:
558                    self._retreat(index)
559
560            return table
561
562        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
563            # https://docs.snowflake.com/en/user-guide/querying-stage
564            if self._match(TokenType.STRING, advance=False):
565                table = self._parse_string()
566            elif self._match_text_seq("@", advance=False):
567                table = self._parse_location_path()
568            else:
569                table = None
570
571            if table:
572                file_format = None
573                pattern = None
574
575                self._match(TokenType.L_PAREN)
576                while self._curr and not self._match(TokenType.R_PAREN):
577                    if self._match_text_seq("FILE_FORMAT", "=>"):
578                        file_format = self._parse_string() or super()._parse_table_parts()
579                    elif self._match_text_seq("PATTERN", "=>"):
580                        pattern = self._parse_string()
581                    else:
582                        break
583
584                    self._match(TokenType.COMMA)
585
586                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
587            else:
588                table = super()._parse_table_parts(schema=schema)
589
590            return self._parse_at_before(table)
591
592        def _parse_id_var(
593            self,
594            any_token: bool = True,
595            tokens: t.Optional[t.Collection[TokenType]] = None,
596        ) -> t.Optional[exp.Expression]:
597            if self._match_text_seq("IDENTIFIER", "("):
598                identifier = (
599                    super()._parse_id_var(any_token=any_token, tokens=tokens)
600                    or self._parse_string()
601                )
602                self._match_r_paren()
603                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
604
605            return super()._parse_id_var(any_token=any_token, tokens=tokens)
606
607        def _parse_show_snowflake(self, this: str) -> exp.Show:
608            scope = None
609            scope_kind = None
610
611            like = self._parse_string() if self._match(TokenType.LIKE) else None
612
613            if self._match(TokenType.IN):
614                if self._match_text_seq("ACCOUNT"):
615                    scope_kind = "ACCOUNT"
616                elif self._match_set(self.DB_CREATABLES):
617                    scope_kind = self._prev.text
618                    if self._curr:
619                        scope = self._parse_table()
620                elif self._curr:
621                    scope_kind = "TABLE"
622                    scope = self._parse_table()
623
624            return self.expression(
625                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
626            )
627
628        def _parse_alter_table_swap(self) -> exp.SwapTable:
629            self._match_text_seq("WITH")
630            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
631
632        def _parse_location(self) -> exp.LocationProperty:
633            self._match(TokenType.EQ)
634            return self.expression(exp.LocationProperty, this=self._parse_location_path())
635
636        def _parse_location_path(self) -> exp.Var:
637            parts = [self._advance_any(ignore_reserved=True)]
638
639            # We avoid consuming a comma token because external tables like @foo and @bar
640            # can be joined in a query with a comma separator.
641            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
642                parts.append(self._advance_any(ignore_reserved=True))
643
644            return exp.var("".join(part.text for part in parts if part))
645
646    class Tokenizer(tokens.Tokenizer):
647        STRING_ESCAPES = ["\\", "'"]
648        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
649        RAW_STRINGS = ["$$"]
650        COMMENTS = ["--", "//", ("/*", "*/")]
651
652        KEYWORDS = {
653            **tokens.Tokenizer.KEYWORDS,
654            "BYTEINT": TokenType.INT,
655            "CHAR VARYING": TokenType.VARCHAR,
656            "CHARACTER VARYING": TokenType.VARCHAR,
657            "EXCLUDE": TokenType.EXCEPT,
658            "ILIKE ANY": TokenType.ILIKE_ANY,
659            "LIKE ANY": TokenType.LIKE_ANY,
660            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
661            "MINUS": TokenType.EXCEPT,
662            "NCHAR VARYING": TokenType.VARCHAR,
663            "PUT": TokenType.COMMAND,
664            "RENAME": TokenType.REPLACE,
665            "SAMPLE": TokenType.TABLE_SAMPLE,
666            "SQL_DOUBLE": TokenType.DOUBLE,
667            "SQL_VARCHAR": TokenType.VARCHAR,
668            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
669            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
670            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
671            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
672            "TOP": TokenType.TOP,
673        }
674
675        SINGLE_TOKENS = {
676            **tokens.Tokenizer.SINGLE_TOKENS,
677            "$": TokenType.PARAMETER,
678        }
679
680        VAR_SINGLE_TOKENS = {"$"}
681
682        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
683
684    class Generator(generator.Generator):
685        PARAMETER_TOKEN = "$"
686        MATCHED_BY_SOURCE = False
687        SINGLE_STRING_INTERVAL = True
688        JOIN_HINTS = False
689        TABLE_HINTS = False
690        QUERY_HINTS = False
691        AGGREGATE_FILTER_SUPPORTED = False
692        SUPPORTS_TABLE_COPY = False
693        COLLATE_IS_FUNC = True
694        LIMIT_ONLY_LITERALS = True
695
696        TRANSFORMS = {
697            **generator.Generator.TRANSFORMS,
698            exp.ArgMax: rename_func("MAX_BY"),
699            exp.ArgMin: rename_func("MIN_BY"),
700            exp.Array: inline_array_sql,
701            exp.ArrayConcat: rename_func("ARRAY_CAT"),
702            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
703            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
704            exp.AtTimeZone: lambda self, e: self.func(
705                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
706            ),
707            exp.BitwiseXor: rename_func("BITXOR"),
708            exp.DateAdd: date_delta_sql("DATEADD"),
709            exp.DateDiff: date_delta_sql("DATEDIFF"),
710            exp.DateStrToDate: datestrtodate_sql,
711            exp.DataType: _datatype_sql,
712            exp.DayOfMonth: rename_func("DAYOFMONTH"),
713            exp.DayOfWeek: rename_func("DAYOFWEEK"),
714            exp.DayOfYear: rename_func("DAYOFYEAR"),
715            exp.Explode: rename_func("FLATTEN"),
716            exp.Extract: rename_func("DATE_PART"),
717            exp.GenerateSeries: lambda self, e: self.func(
718                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
719            ),
720            exp.GroupConcat: rename_func("LISTAGG"),
721            exp.If: if_sql(name="IFF", false_value="NULL"),
722            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
723            exp.JSONKeyValue: json_keyvalue_comma_sql,
724            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
725            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
726            exp.LogicalOr: rename_func("BOOLOR_AGG"),
727            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
728            exp.Max: max_or_greatest,
729            exp.Min: min_or_least,
730            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
731            exp.PercentileCont: transforms.preprocess(
732                [transforms.add_within_group_for_percentiles]
733            ),
734            exp.PercentileDisc: transforms.preprocess(
735                [transforms.add_within_group_for_percentiles]
736            ),
737            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
738            exp.RegexpILike: _regexpilike_sql,
739            exp.Rand: rename_func("RANDOM"),
740            exp.Select: transforms.preprocess(
741                [
742                    transforms.eliminate_distinct_on,
743                    transforms.explode_to_unnest(),
744                    transforms.eliminate_semi_and_anti_joins,
745                ]
746            ),
747            exp.SHA: rename_func("SHA1"),
748            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
749            exp.StartsWith: rename_func("STARTSWITH"),
750            exp.StrPosition: lambda self, e: self.func(
751                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
752            ),
753            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
754            exp.Struct: lambda self, e: self.func(
755                "OBJECT_CONSTRUCT",
756                *(arg for expression in e.expressions for arg in expression.flatten()),
757            ),
758            exp.Stuff: rename_func("INSERT"),
759            exp.TimestampDiff: lambda self, e: self.func(
760                "TIMESTAMPDIFF", e.unit, e.expression, e.this
761            ),
762            exp.TimestampTrunc: timestamptrunc_sql,
763            exp.TimeStrToTime: timestrtotime_sql,
764            exp.TimeToStr: lambda self, e: self.func(
765                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
766            ),
767            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
768            exp.ToArray: rename_func("TO_ARRAY"),
769            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
770            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
771            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
772            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
773            exp.UnixToTime: _unix_to_time_sql,
774            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
775            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
776            exp.Xor: rename_func("BOOLXOR"),
777        }
778
779        TYPE_MAPPING = {
780            **generator.Generator.TYPE_MAPPING,
781            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
782        }
783
784        STAR_MAPPING = {
785            "except": "EXCLUDE",
786            "replace": "RENAME",
787        }
788
789        PROPERTIES_LOCATION = {
790            **generator.Generator.PROPERTIES_LOCATION,
791            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
792            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
793        }
794
795        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
796            milli = expression.args.get("milli")
797            if milli is not None:
798                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
799                expression.set("nano", milli_to_nano)
800
801            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
802
803        def trycast_sql(self, expression: exp.TryCast) -> str:
804            value = expression.this
805
806            if value.type is None:
807                from sqlglot.optimizer.annotate_types import annotate_types
808
809                value = annotate_types(value)
810
811            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
812                return super().trycast_sql(expression)
813
814            # TRY_CAST only works for string values in Snowflake
815            return self.cast_sql(expression)
816
817        def log_sql(self, expression: exp.Log) -> str:
818            if not expression.expression:
819                return self.func("LN", expression.this)
820
821            return super().log_sql(expression)
822
823        def unnest_sql(self, expression: exp.Unnest) -> str:
824            unnest_alias = expression.args.get("alias")
825            offset = expression.args.get("offset")
826
827            columns = [
828                exp.to_identifier("seq"),
829                exp.to_identifier("key"),
830                exp.to_identifier("path"),
831                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
832                seq_get(unnest_alias.columns if unnest_alias else [], 0)
833                or exp.to_identifier("value"),
834                exp.to_identifier("this"),
835            ]
836
837            if unnest_alias:
838                unnest_alias.set("columns", columns)
839            else:
840                unnest_alias = exp.TableAlias(this="_u", columns=columns)
841
842            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
843            alias = self.sql(unnest_alias)
844            alias = f" AS {alias}" if alias else ""
845            return f"{explode}{alias}"
846
847        def show_sql(self, expression: exp.Show) -> str:
848            like = self.sql(expression, "like")
849            like = f" LIKE {like}" if like else ""
850
851            scope = self.sql(expression, "scope")
852            scope = f" {scope}" if scope else ""
853
854            scope_kind = self.sql(expression, "scope_kind")
855            if scope_kind:
856                scope_kind = f" IN {scope_kind}"
857
858            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
859
860        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
861            # Other dialects don't support all of the following parameters, so we need to
862            # generate default values as necessary to ensure the transpilation is correct
863            group = expression.args.get("group")
864            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
865            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
866            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
867
868            return self.func(
869                "REGEXP_SUBSTR",
870                expression.this,
871                expression.expression,
872                position,
873                occurrence,
874                parameters,
875                group,
876            )
877
878        def except_op(self, expression: exp.Except) -> str:
879            if not expression.args.get("distinct", False):
880                self.unsupported("EXCEPT with All is not supported in Snowflake")
881            return super().except_op(expression)
882
883        def intersect_op(self, expression: exp.Intersect) -> str:
884            if not expression.args.get("distinct", False):
885                self.unsupported("INTERSECT with All is not supported in Snowflake")
886            return super().intersect_op(expression)
887
888        def describe_sql(self, expression: exp.Describe) -> str:
889            # Default to table if kind is unknown
890            kind_value = expression.args.get("kind") or "TABLE"
891            kind = f" {kind_value}" if kind_value else ""
892            this = f" {self.sql(expression, 'this')}"
893            expressions = self.expressions(expression, flat=True)
894            expressions = f" {expressions}" if expressions else ""
895            return f"DESCRIBE{kind}{this}{expressions}"
896
897        def generatedasidentitycolumnconstraint_sql(
898            self, expression: exp.GeneratedAsIdentityColumnConstraint
899        ) -> str:
900            start = expression.args.get("start")
901            start = f" START {start}" if start else ""
902            increment = expression.args.get("increment")
903            increment = f" INCREMENT {increment}" if increment else ""
904            return f"AUTOINCREMENT{start}{increment}"
905
906        def swaptable_sql(self, expression: exp.SwapTable) -> str:
907            this = self.sql(expression, "this")
908            return f"SWAP WITH {this}"
909
910        def with_properties(self, properties: exp.Properties) -> str:
911            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
361class Snowflake(Dialect):
362    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
363    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
364    NULL_ORDERING = "nulls_are_large"
365    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
366    SUPPORTS_USER_DEFINED_TYPES = False
367    SUPPORTS_SEMI_ANTI_JOIN = False
368    PREFER_CTE_ALIAS_COLUMN = True
369    TABLESAMPLE_SIZE_IS_PERCENT = True
370
371    TIME_MAPPING = {
372        "YYYY": "%Y",
373        "yyyy": "%Y",
374        "YY": "%y",
375        "yy": "%y",
376        "MMMM": "%B",
377        "mmmm": "%B",
378        "MON": "%b",
379        "mon": "%b",
380        "MM": "%m",
381        "mm": "%m",
382        "DD": "%d",
383        "dd": "%-d",
384        "DY": "%a",
385        "dy": "%w",
386        "HH24": "%H",
387        "hh24": "%H",
388        "HH12": "%I",
389        "hh12": "%I",
390        "MI": "%M",
391        "mi": "%M",
392        "SS": "%S",
393        "ss": "%S",
394        "FF": "%f",
395        "ff": "%f",
396        "FF6": "%f",
397        "ff6": "%f",
398    }
399
400    def quote_identifier(self, expression: E, identify: bool = True) -> E:
401        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
402        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
403        if (
404            isinstance(expression, exp.Identifier)
405            and isinstance(expression.parent, exp.Table)
406            and expression.name.lower() == "dual"
407        ):
408            return t.cast(E, expression)
409
410        return super().quote_identifier(expression, identify=identify)
411
412    class Parser(parser.Parser):
413        IDENTIFY_PIVOT_STRINGS = True
414
415        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
416
417        FUNCTIONS = {
418            **parser.Parser.FUNCTIONS,
419            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
420            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
421            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
422                this=seq_get(args, 1), expression=seq_get(args, 0)
423            ),
424            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
425                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
426                start=seq_get(args, 0),
427                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
428                step=seq_get(args, 2),
429            ),
430            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
431            "BITXOR": binary_from_function(exp.BitwiseXor),
432            "BIT_XOR": binary_from_function(exp.BitwiseXor),
433            "BOOLXOR": binary_from_function(exp.Xor),
434            "CONVERT_TIMEZONE": _parse_convert_timezone,
435            "DATE_TRUNC": _date_trunc_to_time,
436            "DATEADD": lambda args: exp.DateAdd(
437                this=seq_get(args, 2),
438                expression=seq_get(args, 1),
439                unit=_map_date_part(seq_get(args, 0)),
440            ),
441            "DATEDIFF": _parse_datediff,
442            "DIV0": _div0_to_if,
443            "FLATTEN": exp.Explode.from_arg_list,
444            "IFF": exp.If.from_arg_list,
445            "LAST_DAY": lambda args: exp.LastDay(
446                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
447            ),
448            "LISTAGG": exp.GroupConcat.from_arg_list,
449            "NULLIFZERO": _nullifzero_to_if,
450            "OBJECT_CONSTRUCT": _parse_object_construct,
451            "REGEXP_REPLACE": _parse_regexp_replace,
452            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
453            "RLIKE": exp.RegexpLike.from_arg_list,
454            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
455            "TIMEDIFF": _parse_datediff,
456            "TIMESTAMPDIFF": _parse_datediff,
457            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
458            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
459            "TO_TIMESTAMP": _parse_to_timestamp,
460            "TO_VARCHAR": exp.ToChar.from_arg_list,
461            "ZEROIFNULL": _zeroifnull_to_if,
462        }
463
464        FUNCTION_PARSERS = {
465            **parser.Parser.FUNCTION_PARSERS,
466            "DATE_PART": _parse_date_part,
467            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
468        }
469        FUNCTION_PARSERS.pop("TRIM")
470
471        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
472
473        RANGE_PARSERS = {
474            **parser.Parser.RANGE_PARSERS,
475            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
476            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
477            TokenType.COLON: _parse_colon_get_path,
478        }
479
480        ALTER_PARSERS = {
481            **parser.Parser.ALTER_PARSERS,
482            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
483            "UNSET": lambda self: self.expression(
484                exp.Set,
485                tag=self._match_text_seq("TAG"),
486                expressions=self._parse_csv(self._parse_id_var),
487                unset=True,
488            ),
489            "SWAP": lambda self: self._parse_alter_table_swap(),
490        }
491
492        STATEMENT_PARSERS = {
493            **parser.Parser.STATEMENT_PARSERS,
494            TokenType.SHOW: lambda self: self._parse_show(),
495        }
496
497        PROPERTY_PARSERS = {
498            **parser.Parser.PROPERTY_PARSERS,
499            "LOCATION": lambda self: self._parse_location(),
500        }
501
502        SHOW_PARSERS = {
503            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
504            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
505            "COLUMNS": _show_parser("COLUMNS"),
506        }
507
508        STAGED_FILE_SINGLE_TOKENS = {
509            TokenType.DOT,
510            TokenType.MOD,
511            TokenType.SLASH,
512        }
513
514        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
515
516        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
517            if is_map:
518                # Keys are strings in Snowflake's objects, see also:
519                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
520                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
521                return self._parse_slice(self._parse_string())
522
523            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
524
525        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
526            lateral = super()._parse_lateral()
527            if not lateral:
528                return lateral
529
530            if isinstance(lateral.this, exp.Explode):
531                table_alias = lateral.args.get("alias")
532                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
533                if table_alias and not table_alias.args.get("columns"):
534                    table_alias.set("columns", columns)
535                elif not table_alias:
536                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
537
538            return lateral
539
540        def _parse_at_before(self, table: exp.Table) -> exp.Table:
541            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
542            index = self._index
543            if self._match_texts(("AT", "BEFORE")):
544                this = self._prev.text.upper()
545                kind = (
546                    self._match(TokenType.L_PAREN)
547                    and self._match_texts(self.HISTORICAL_DATA_KIND)
548                    and self._prev.text.upper()
549                )
550                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
551
552                if expression:
553                    self._match_r_paren()
554                    when = self.expression(
555                        exp.HistoricalData, this=this, kind=kind, expression=expression
556                    )
557                    table.set("when", when)
558                else:
559                    self._retreat(index)
560
561            return table
562
563        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
564            # https://docs.snowflake.com/en/user-guide/querying-stage
565            if self._match(TokenType.STRING, advance=False):
566                table = self._parse_string()
567            elif self._match_text_seq("@", advance=False):
568                table = self._parse_location_path()
569            else:
570                table = None
571
572            if table:
573                file_format = None
574                pattern = None
575
576                self._match(TokenType.L_PAREN)
577                while self._curr and not self._match(TokenType.R_PAREN):
578                    if self._match_text_seq("FILE_FORMAT", "=>"):
579                        file_format = self._parse_string() or super()._parse_table_parts()
580                    elif self._match_text_seq("PATTERN", "=>"):
581                        pattern = self._parse_string()
582                    else:
583                        break
584
585                    self._match(TokenType.COMMA)
586
587                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
588            else:
589                table = super()._parse_table_parts(schema=schema)
590
591            return self._parse_at_before(table)
592
593        def _parse_id_var(
594            self,
595            any_token: bool = True,
596            tokens: t.Optional[t.Collection[TokenType]] = None,
597        ) -> t.Optional[exp.Expression]:
598            if self._match_text_seq("IDENTIFIER", "("):
599                identifier = (
600                    super()._parse_id_var(any_token=any_token, tokens=tokens)
601                    or self._parse_string()
602                )
603                self._match_r_paren()
604                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
605
606            return super()._parse_id_var(any_token=any_token, tokens=tokens)
607
608        def _parse_show_snowflake(self, this: str) -> exp.Show:
609            scope = None
610            scope_kind = None
611
612            like = self._parse_string() if self._match(TokenType.LIKE) else None
613
614            if self._match(TokenType.IN):
615                if self._match_text_seq("ACCOUNT"):
616                    scope_kind = "ACCOUNT"
617                elif self._match_set(self.DB_CREATABLES):
618                    scope_kind = self._prev.text
619                    if self._curr:
620                        scope = self._parse_table()
621                elif self._curr:
622                    scope_kind = "TABLE"
623                    scope = self._parse_table()
624
625            return self.expression(
626                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
627            )
628
629        def _parse_alter_table_swap(self) -> exp.SwapTable:
630            self._match_text_seq("WITH")
631            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
632
633        def _parse_location(self) -> exp.LocationProperty:
634            self._match(TokenType.EQ)
635            return self.expression(exp.LocationProperty, this=self._parse_location_path())
636
637        def _parse_location_path(self) -> exp.Var:
638            parts = [self._advance_any(ignore_reserved=True)]
639
640            # We avoid consuming a comma token because external tables like @foo and @bar
641            # can be joined in a query with a comma separator.
642            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
643                parts.append(self._advance_any(ignore_reserved=True))
644
645            return exp.var("".join(part.text for part in parts if part))
646
647    class Tokenizer(tokens.Tokenizer):
648        STRING_ESCAPES = ["\\", "'"]
649        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
650        RAW_STRINGS = ["$$"]
651        COMMENTS = ["--", "//", ("/*", "*/")]
652
653        KEYWORDS = {
654            **tokens.Tokenizer.KEYWORDS,
655            "BYTEINT": TokenType.INT,
656            "CHAR VARYING": TokenType.VARCHAR,
657            "CHARACTER VARYING": TokenType.VARCHAR,
658            "EXCLUDE": TokenType.EXCEPT,
659            "ILIKE ANY": TokenType.ILIKE_ANY,
660            "LIKE ANY": TokenType.LIKE_ANY,
661            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
662            "MINUS": TokenType.EXCEPT,
663            "NCHAR VARYING": TokenType.VARCHAR,
664            "PUT": TokenType.COMMAND,
665            "RENAME": TokenType.REPLACE,
666            "SAMPLE": TokenType.TABLE_SAMPLE,
667            "SQL_DOUBLE": TokenType.DOUBLE,
668            "SQL_VARCHAR": TokenType.VARCHAR,
669            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
670            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
671            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
672            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
673            "TOP": TokenType.TOP,
674        }
675
676        SINGLE_TOKENS = {
677            **tokens.Tokenizer.SINGLE_TOKENS,
678            "$": TokenType.PARAMETER,
679        }
680
681        VAR_SINGLE_TOKENS = {"$"}
682
683        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
684
685    class Generator(generator.Generator):
686        PARAMETER_TOKEN = "$"
687        MATCHED_BY_SOURCE = False
688        SINGLE_STRING_INTERVAL = True
689        JOIN_HINTS = False
690        TABLE_HINTS = False
691        QUERY_HINTS = False
692        AGGREGATE_FILTER_SUPPORTED = False
693        SUPPORTS_TABLE_COPY = False
694        COLLATE_IS_FUNC = True
695        LIMIT_ONLY_LITERALS = True
696
697        TRANSFORMS = {
698            **generator.Generator.TRANSFORMS,
699            exp.ArgMax: rename_func("MAX_BY"),
700            exp.ArgMin: rename_func("MIN_BY"),
701            exp.Array: inline_array_sql,
702            exp.ArrayConcat: rename_func("ARRAY_CAT"),
703            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
704            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
705            exp.AtTimeZone: lambda self, e: self.func(
706                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
707            ),
708            exp.BitwiseXor: rename_func("BITXOR"),
709            exp.DateAdd: date_delta_sql("DATEADD"),
710            exp.DateDiff: date_delta_sql("DATEDIFF"),
711            exp.DateStrToDate: datestrtodate_sql,
712            exp.DataType: _datatype_sql,
713            exp.DayOfMonth: rename_func("DAYOFMONTH"),
714            exp.DayOfWeek: rename_func("DAYOFWEEK"),
715            exp.DayOfYear: rename_func("DAYOFYEAR"),
716            exp.Explode: rename_func("FLATTEN"),
717            exp.Extract: rename_func("DATE_PART"),
718            exp.GenerateSeries: lambda self, e: self.func(
719                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
720            ),
721            exp.GroupConcat: rename_func("LISTAGG"),
722            exp.If: if_sql(name="IFF", false_value="NULL"),
723            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
724            exp.JSONKeyValue: json_keyvalue_comma_sql,
725            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
726            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
727            exp.LogicalOr: rename_func("BOOLOR_AGG"),
728            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
729            exp.Max: max_or_greatest,
730            exp.Min: min_or_least,
731            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
732            exp.PercentileCont: transforms.preprocess(
733                [transforms.add_within_group_for_percentiles]
734            ),
735            exp.PercentileDisc: transforms.preprocess(
736                [transforms.add_within_group_for_percentiles]
737            ),
738            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
739            exp.RegexpILike: _regexpilike_sql,
740            exp.Rand: rename_func("RANDOM"),
741            exp.Select: transforms.preprocess(
742                [
743                    transforms.eliminate_distinct_on,
744                    transforms.explode_to_unnest(),
745                    transforms.eliminate_semi_and_anti_joins,
746                ]
747            ),
748            exp.SHA: rename_func("SHA1"),
749            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
750            exp.StartsWith: rename_func("STARTSWITH"),
751            exp.StrPosition: lambda self, e: self.func(
752                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
753            ),
754            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
755            exp.Struct: lambda self, e: self.func(
756                "OBJECT_CONSTRUCT",
757                *(arg for expression in e.expressions for arg in expression.flatten()),
758            ),
759            exp.Stuff: rename_func("INSERT"),
760            exp.TimestampDiff: lambda self, e: self.func(
761                "TIMESTAMPDIFF", e.unit, e.expression, e.this
762            ),
763            exp.TimestampTrunc: timestamptrunc_sql,
764            exp.TimeStrToTime: timestrtotime_sql,
765            exp.TimeToStr: lambda self, e: self.func(
766                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
767            ),
768            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
769            exp.ToArray: rename_func("TO_ARRAY"),
770            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
771            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
772            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
773            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
774            exp.UnixToTime: _unix_to_time_sql,
775            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
776            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
777            exp.Xor: rename_func("BOOLXOR"),
778        }
779
780        TYPE_MAPPING = {
781            **generator.Generator.TYPE_MAPPING,
782            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
783        }
784
785        STAR_MAPPING = {
786            "except": "EXCLUDE",
787            "replace": "RENAME",
788        }
789
790        PROPERTIES_LOCATION = {
791            **generator.Generator.PROPERTIES_LOCATION,
792            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
793            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
794        }
795
796        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
797            milli = expression.args.get("milli")
798            if milli is not None:
799                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
800                expression.set("nano", milli_to_nano)
801
802            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
803
804        def trycast_sql(self, expression: exp.TryCast) -> str:
805            value = expression.this
806
807            if value.type is None:
808                from sqlglot.optimizer.annotate_types import annotate_types
809
810                value = annotate_types(value)
811
812            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
813                return super().trycast_sql(expression)
814
815            # TRY_CAST only works for string values in Snowflake
816            return self.cast_sql(expression)
817
818        def log_sql(self, expression: exp.Log) -> str:
819            if not expression.expression:
820                return self.func("LN", expression.this)
821
822            return super().log_sql(expression)
823
824        def unnest_sql(self, expression: exp.Unnest) -> str:
825            unnest_alias = expression.args.get("alias")
826            offset = expression.args.get("offset")
827
828            columns = [
829                exp.to_identifier("seq"),
830                exp.to_identifier("key"),
831                exp.to_identifier("path"),
832                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
833                seq_get(unnest_alias.columns if unnest_alias else [], 0)
834                or exp.to_identifier("value"),
835                exp.to_identifier("this"),
836            ]
837
838            if unnest_alias:
839                unnest_alias.set("columns", columns)
840            else:
841                unnest_alias = exp.TableAlias(this="_u", columns=columns)
842
843            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
844            alias = self.sql(unnest_alias)
845            alias = f" AS {alias}" if alias else ""
846            return f"{explode}{alias}"
847
848        def show_sql(self, expression: exp.Show) -> str:
849            like = self.sql(expression, "like")
850            like = f" LIKE {like}" if like else ""
851
852            scope = self.sql(expression, "scope")
853            scope = f" {scope}" if scope else ""
854
855            scope_kind = self.sql(expression, "scope_kind")
856            if scope_kind:
857                scope_kind = f" IN {scope_kind}"
858
859            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
860
861        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
862            # Other dialects don't support all of the following parameters, so we need to
863            # generate default values as necessary to ensure the transpilation is correct
864            group = expression.args.get("group")
865            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
866            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
867            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
868
869            return self.func(
870                "REGEXP_SUBSTR",
871                expression.this,
872                expression.expression,
873                position,
874                occurrence,
875                parameters,
876                group,
877            )
878
879        def except_op(self, expression: exp.Except) -> str:
880            if not expression.args.get("distinct", False):
881                self.unsupported("EXCEPT with All is not supported in Snowflake")
882            return super().except_op(expression)
883
884        def intersect_op(self, expression: exp.Intersect) -> str:
885            if not expression.args.get("distinct", False):
886                self.unsupported("INTERSECT with All is not supported in Snowflake")
887            return super().intersect_op(expression)
888
889        def describe_sql(self, expression: exp.Describe) -> str:
890            # Default to table if kind is unknown
891            kind_value = expression.args.get("kind") or "TABLE"
892            kind = f" {kind_value}" if kind_value else ""
893            this = f" {self.sql(expression, 'this')}"
894            expressions = self.expressions(expression, flat=True)
895            expressions = f" {expressions}" if expressions else ""
896            return f"DESCRIBE{kind}{this}{expressions}"
897
898        def generatedasidentitycolumnconstraint_sql(
899            self, expression: exp.GeneratedAsIdentityColumnConstraint
900        ) -> str:
901            start = expression.args.get("start")
902            start = f" START {start}" if start else ""
903            increment = expression.args.get("increment")
904            increment = f" INCREMENT {increment}" if increment else ""
905            return f"AUTOINCREMENT{start}{increment}"
906
907        def swaptable_sql(self, expression: exp.SwapTable) -> str:
908            this = self.sql(expression, "this")
909            return f"SWAP WITH {this}"
910
911        def with_properties(self, properties: exp.Properties) -> str:
912            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Indicates the default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Determines whether or not user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Determines whether or not SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Determines whether or not a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime format.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
400    def quote_identifier(self, expression: E, identify: bool = True) -> E:
401        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
402        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
403        if (
404            isinstance(expression, exp.Identifier)
405            and isinstance(expression.parent, exp.Table)
406            and expression.name.lower() == "dual"
407        ):
408            return t.cast(E, expression)
409
410        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
412    class Parser(parser.Parser):
413        IDENTIFY_PIVOT_STRINGS = True
414
415        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
416
417        FUNCTIONS = {
418            **parser.Parser.FUNCTIONS,
419            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
420            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
421            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
422                this=seq_get(args, 1), expression=seq_get(args, 0)
423            ),
424            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
425                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
426                start=seq_get(args, 0),
427                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
428                step=seq_get(args, 2),
429            ),
430            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
431            "BITXOR": binary_from_function(exp.BitwiseXor),
432            "BIT_XOR": binary_from_function(exp.BitwiseXor),
433            "BOOLXOR": binary_from_function(exp.Xor),
434            "CONVERT_TIMEZONE": _parse_convert_timezone,
435            "DATE_TRUNC": _date_trunc_to_time,
436            "DATEADD": lambda args: exp.DateAdd(
437                this=seq_get(args, 2),
438                expression=seq_get(args, 1),
439                unit=_map_date_part(seq_get(args, 0)),
440            ),
441            "DATEDIFF": _parse_datediff,
442            "DIV0": _div0_to_if,
443            "FLATTEN": exp.Explode.from_arg_list,
444            "IFF": exp.If.from_arg_list,
445            "LAST_DAY": lambda args: exp.LastDay(
446                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
447            ),
448            "LISTAGG": exp.GroupConcat.from_arg_list,
449            "NULLIFZERO": _nullifzero_to_if,
450            "OBJECT_CONSTRUCT": _parse_object_construct,
451            "REGEXP_REPLACE": _parse_regexp_replace,
452            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
453            "RLIKE": exp.RegexpLike.from_arg_list,
454            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
455            "TIMEDIFF": _parse_datediff,
456            "TIMESTAMPDIFF": _parse_datediff,
457            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
458            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
459            "TO_TIMESTAMP": _parse_to_timestamp,
460            "TO_VARCHAR": exp.ToChar.from_arg_list,
461            "ZEROIFNULL": _zeroifnull_to_if,
462        }
463
464        FUNCTION_PARSERS = {
465            **parser.Parser.FUNCTION_PARSERS,
466            "DATE_PART": _parse_date_part,
467            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
468        }
469        FUNCTION_PARSERS.pop("TRIM")
470
471        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
472
473        RANGE_PARSERS = {
474            **parser.Parser.RANGE_PARSERS,
475            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
476            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
477            TokenType.COLON: _parse_colon_get_path,
478        }
479
480        ALTER_PARSERS = {
481            **parser.Parser.ALTER_PARSERS,
482            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
483            "UNSET": lambda self: self.expression(
484                exp.Set,
485                tag=self._match_text_seq("TAG"),
486                expressions=self._parse_csv(self._parse_id_var),
487                unset=True,
488            ),
489            "SWAP": lambda self: self._parse_alter_table_swap(),
490        }
491
492        STATEMENT_PARSERS = {
493            **parser.Parser.STATEMENT_PARSERS,
494            TokenType.SHOW: lambda self: self._parse_show(),
495        }
496
497        PROPERTY_PARSERS = {
498            **parser.Parser.PROPERTY_PARSERS,
499            "LOCATION": lambda self: self._parse_location(),
500        }
501
502        SHOW_PARSERS = {
503            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
504            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
505            "COLUMNS": _show_parser("COLUMNS"),
506        }
507
508        STAGED_FILE_SINGLE_TOKENS = {
509            TokenType.DOT,
510            TokenType.MOD,
511            TokenType.SLASH,
512        }
513
514        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
515
516        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
517            if is_map:
518                # Keys are strings in Snowflake's objects, see also:
519                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
520                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
521                return self._parse_slice(self._parse_string())
522
523            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
524
525        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
526            lateral = super()._parse_lateral()
527            if not lateral:
528                return lateral
529
530            if isinstance(lateral.this, exp.Explode):
531                table_alias = lateral.args.get("alias")
532                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
533                if table_alias and not table_alias.args.get("columns"):
534                    table_alias.set("columns", columns)
535                elif not table_alias:
536                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
537
538            return lateral
539
540        def _parse_at_before(self, table: exp.Table) -> exp.Table:
541            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
542            index = self._index
543            if self._match_texts(("AT", "BEFORE")):
544                this = self._prev.text.upper()
545                kind = (
546                    self._match(TokenType.L_PAREN)
547                    and self._match_texts(self.HISTORICAL_DATA_KIND)
548                    and self._prev.text.upper()
549                )
550                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
551
552                if expression:
553                    self._match_r_paren()
554                    when = self.expression(
555                        exp.HistoricalData, this=this, kind=kind, expression=expression
556                    )
557                    table.set("when", when)
558                else:
559                    self._retreat(index)
560
561            return table
562
563        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
564            # https://docs.snowflake.com/en/user-guide/querying-stage
565            if self._match(TokenType.STRING, advance=False):
566                table = self._parse_string()
567            elif self._match_text_seq("@", advance=False):
568                table = self._parse_location_path()
569            else:
570                table = None
571
572            if table:
573                file_format = None
574                pattern = None
575
576                self._match(TokenType.L_PAREN)
577                while self._curr and not self._match(TokenType.R_PAREN):
578                    if self._match_text_seq("FILE_FORMAT", "=>"):
579                        file_format = self._parse_string() or super()._parse_table_parts()
580                    elif self._match_text_seq("PATTERN", "=>"):
581                        pattern = self._parse_string()
582                    else:
583                        break
584
585                    self._match(TokenType.COMMA)
586
587                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
588            else:
589                table = super()._parse_table_parts(schema=schema)
590
591            return self._parse_at_before(table)
592
593        def _parse_id_var(
594            self,
595            any_token: bool = True,
596            tokens: t.Optional[t.Collection[TokenType]] = None,
597        ) -> t.Optional[exp.Expression]:
598            if self._match_text_seq("IDENTIFIER", "("):
599                identifier = (
600                    super()._parse_id_var(any_token=any_token, tokens=tokens)
601                    or self._parse_string()
602                )
603                self._match_r_paren()
604                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
605
606            return super()._parse_id_var(any_token=any_token, tokens=tokens)
607
608        def _parse_show_snowflake(self, this: str) -> exp.Show:
609            scope = None
610            scope_kind = None
611
612            like = self._parse_string() if self._match(TokenType.LIKE) else None
613
614            if self._match(TokenType.IN):
615                if self._match_text_seq("ACCOUNT"):
616                    scope_kind = "ACCOUNT"
617                elif self._match_set(self.DB_CREATABLES):
618                    scope_kind = self._prev.text
619                    if self._curr:
620                        scope = self._parse_table()
621                elif self._curr:
622                    scope_kind = "TABLE"
623                    scope = self._parse_table()
624
625            return self.expression(
626                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
627            )
628
629        def _parse_alter_table_swap(self) -> exp.SwapTable:
630            self._match_text_seq("WITH")
631            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
632
633        def _parse_location(self) -> exp.LocationProperty:
634            self._match(TokenType.EQ)
635            return self.expression(exp.LocationProperty, this=self._parse_location_path())
636
637        def _parse_location_path(self) -> exp.Var:
638            parts = [self._advance_any(ignore_reserved=True)]
639
640            # We avoid consuming a comma token because external tables like @foo and @bar
641            # can be joined in a query with a comma separator.
642            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
643                parts.append(self._advance_any(ignore_reserved=True))
644
645            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.INT256: 'INT256'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.MODEL: 'MODEL'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UINT128: 'UINT128'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.BIT: 'BIT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.ANY: 'ANY'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NEXT: 'NEXT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CASE: 'CASE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.FALSE: 'FALSE'>, <TokenType.KILL: 'KILL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.UUID: 'UUID'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.SET: 'SET'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.INET: 'INET'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.INT: 'INT'>, <TokenType.ROW: 'ROW'>, <TokenType.DATE: 'DATE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT: 'UINT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.JSON: 'JSON'>, <TokenType.KEEP: 'KEEP'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.CACHE: 'CACHE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.XML: 'XML'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.VAR: 'VAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.ANTI: 'ANTI'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.USE: 'USE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SOME: 'SOME'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.SEMI: 'SEMI'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FILTER: 'FILTER'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.ASC: 'ASC'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.MONEY: 'MONEY'>, <TokenType.INT128: 'INT128'>, <TokenType.UINT256: 'UINT256'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.IS: 'IS'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.END: 'END'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.NULL: 'NULL'>, <TokenType.MAP: 'MAP'>, <TokenType.TOP: 'TOP'>, <TokenType.VIEW: 'VIEW'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TRUE: 'TRUE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.DESC: 'DESC'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.TIME: 'TIME'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.SUPER: 'SUPER'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _parse_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GET_PATH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetPath'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function parse_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _parse_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _parse_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _div0_to_if>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _nullifzero_to_if>, 'OBJECT_CONSTRUCT': <function _parse_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _parse_datediff>, 'TIMESTAMPDIFF': <function _parse_datediff>, 'TO_TIMESTAMP': <function _parse_to_timestamp>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _zeroifnull_to_if>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function _parse_date_part>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.COLON: 'COLON'>: <function _parse_colon_get_path>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SHOW_TRIE: Dict = {'PRIMARY': {'KEYS': {0: True}}, 'TERSE': {'PRIMARY': {'KEYS': {0: True}}}, 'COLUMNS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
647    class Tokenizer(tokens.Tokenizer):
648        STRING_ESCAPES = ["\\", "'"]
649        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
650        RAW_STRINGS = ["$$"]
651        COMMENTS = ["--", "//", ("/*", "*/")]
652
653        KEYWORDS = {
654            **tokens.Tokenizer.KEYWORDS,
655            "BYTEINT": TokenType.INT,
656            "CHAR VARYING": TokenType.VARCHAR,
657            "CHARACTER VARYING": TokenType.VARCHAR,
658            "EXCLUDE": TokenType.EXCEPT,
659            "ILIKE ANY": TokenType.ILIKE_ANY,
660            "LIKE ANY": TokenType.LIKE_ANY,
661            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
662            "MINUS": TokenType.EXCEPT,
663            "NCHAR VARYING": TokenType.VARCHAR,
664            "PUT": TokenType.COMMAND,
665            "RENAME": TokenType.REPLACE,
666            "SAMPLE": TokenType.TABLE_SAMPLE,
667            "SQL_DOUBLE": TokenType.DOUBLE,
668            "SQL_VARCHAR": TokenType.VARCHAR,
669            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
670            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
671            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
672            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
673            "TOP": TokenType.TOP,
674        }
675
676        SINGLE_TOKENS = {
677            **tokens.Tokenizer.SINGLE_TOKENS,
678            "$": TokenType.PARAMETER,
679        }
680
681        VAR_SINGLE_TOKENS = {"$"}
682
683        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>}
class Snowflake.Generator(sqlglot.generator.Generator):
685    class Generator(generator.Generator):
686        PARAMETER_TOKEN = "$"
687        MATCHED_BY_SOURCE = False
688        SINGLE_STRING_INTERVAL = True
689        JOIN_HINTS = False
690        TABLE_HINTS = False
691        QUERY_HINTS = False
692        AGGREGATE_FILTER_SUPPORTED = False
693        SUPPORTS_TABLE_COPY = False
694        COLLATE_IS_FUNC = True
695        LIMIT_ONLY_LITERALS = True
696
697        TRANSFORMS = {
698            **generator.Generator.TRANSFORMS,
699            exp.ArgMax: rename_func("MAX_BY"),
700            exp.ArgMin: rename_func("MIN_BY"),
701            exp.Array: inline_array_sql,
702            exp.ArrayConcat: rename_func("ARRAY_CAT"),
703            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
704            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
705            exp.AtTimeZone: lambda self, e: self.func(
706                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
707            ),
708            exp.BitwiseXor: rename_func("BITXOR"),
709            exp.DateAdd: date_delta_sql("DATEADD"),
710            exp.DateDiff: date_delta_sql("DATEDIFF"),
711            exp.DateStrToDate: datestrtodate_sql,
712            exp.DataType: _datatype_sql,
713            exp.DayOfMonth: rename_func("DAYOFMONTH"),
714            exp.DayOfWeek: rename_func("DAYOFWEEK"),
715            exp.DayOfYear: rename_func("DAYOFYEAR"),
716            exp.Explode: rename_func("FLATTEN"),
717            exp.Extract: rename_func("DATE_PART"),
718            exp.GenerateSeries: lambda self, e: self.func(
719                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
720            ),
721            exp.GroupConcat: rename_func("LISTAGG"),
722            exp.If: if_sql(name="IFF", false_value="NULL"),
723            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
724            exp.JSONKeyValue: json_keyvalue_comma_sql,
725            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
726            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
727            exp.LogicalOr: rename_func("BOOLOR_AGG"),
728            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
729            exp.Max: max_or_greatest,
730            exp.Min: min_or_least,
731            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
732            exp.PercentileCont: transforms.preprocess(
733                [transforms.add_within_group_for_percentiles]
734            ),
735            exp.PercentileDisc: transforms.preprocess(
736                [transforms.add_within_group_for_percentiles]
737            ),
738            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
739            exp.RegexpILike: _regexpilike_sql,
740            exp.Rand: rename_func("RANDOM"),
741            exp.Select: transforms.preprocess(
742                [
743                    transforms.eliminate_distinct_on,
744                    transforms.explode_to_unnest(),
745                    transforms.eliminate_semi_and_anti_joins,
746                ]
747            ),
748            exp.SHA: rename_func("SHA1"),
749            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
750            exp.StartsWith: rename_func("STARTSWITH"),
751            exp.StrPosition: lambda self, e: self.func(
752                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
753            ),
754            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
755            exp.Struct: lambda self, e: self.func(
756                "OBJECT_CONSTRUCT",
757                *(arg for expression in e.expressions for arg in expression.flatten()),
758            ),
759            exp.Stuff: rename_func("INSERT"),
760            exp.TimestampDiff: lambda self, e: self.func(
761                "TIMESTAMPDIFF", e.unit, e.expression, e.this
762            ),
763            exp.TimestampTrunc: timestamptrunc_sql,
764            exp.TimeStrToTime: timestrtotime_sql,
765            exp.TimeToStr: lambda self, e: self.func(
766                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
767            ),
768            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
769            exp.ToArray: rename_func("TO_ARRAY"),
770            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
771            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
772            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
773            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
774            exp.UnixToTime: _unix_to_time_sql,
775            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
776            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
777            exp.Xor: rename_func("BOOLXOR"),
778        }
779
780        TYPE_MAPPING = {
781            **generator.Generator.TYPE_MAPPING,
782            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
783        }
784
785        STAR_MAPPING = {
786            "except": "EXCLUDE",
787            "replace": "RENAME",
788        }
789
790        PROPERTIES_LOCATION = {
791            **generator.Generator.PROPERTIES_LOCATION,
792            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
793            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
794        }
795
796        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
797            milli = expression.args.get("milli")
798            if milli is not None:
799                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
800                expression.set("nano", milli_to_nano)
801
802            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
803
804        def trycast_sql(self, expression: exp.TryCast) -> str:
805            value = expression.this
806
807            if value.type is None:
808                from sqlglot.optimizer.annotate_types import annotate_types
809
810                value = annotate_types(value)
811
812            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
813                return super().trycast_sql(expression)
814
815            # TRY_CAST only works for string values in Snowflake
816            return self.cast_sql(expression)
817
818        def log_sql(self, expression: exp.Log) -> str:
819            if not expression.expression:
820                return self.func("LN", expression.this)
821
822            return super().log_sql(expression)
823
824        def unnest_sql(self, expression: exp.Unnest) -> str:
825            unnest_alias = expression.args.get("alias")
826            offset = expression.args.get("offset")
827
828            columns = [
829                exp.to_identifier("seq"),
830                exp.to_identifier("key"),
831                exp.to_identifier("path"),
832                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
833                seq_get(unnest_alias.columns if unnest_alias else [], 0)
834                or exp.to_identifier("value"),
835                exp.to_identifier("this"),
836            ]
837
838            if unnest_alias:
839                unnest_alias.set("columns", columns)
840            else:
841                unnest_alias = exp.TableAlias(this="_u", columns=columns)
842
843            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
844            alias = self.sql(unnest_alias)
845            alias = f" AS {alias}" if alias else ""
846            return f"{explode}{alias}"
847
848        def show_sql(self, expression: exp.Show) -> str:
849            like = self.sql(expression, "like")
850            like = f" LIKE {like}" if like else ""
851
852            scope = self.sql(expression, "scope")
853            scope = f" {scope}" if scope else ""
854
855            scope_kind = self.sql(expression, "scope_kind")
856            if scope_kind:
857                scope_kind = f" IN {scope_kind}"
858
859            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
860
861        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
862            # Other dialects don't support all of the following parameters, so we need to
863            # generate default values as necessary to ensure the transpilation is correct
864            group = expression.args.get("group")
865            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
866            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
867            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
868
869            return self.func(
870                "REGEXP_SUBSTR",
871                expression.this,
872                expression.expression,
873                position,
874                occurrence,
875                parameters,
876                group,
877            )
878
879        def except_op(self, expression: exp.Except) -> str:
880            if not expression.args.get("distinct", False):
881                self.unsupported("EXCEPT with All is not supported in Snowflake")
882            return super().except_op(expression)
883
884        def intersect_op(self, expression: exp.Intersect) -> str:
885            if not expression.args.get("distinct", False):
886                self.unsupported("INTERSECT with All is not supported in Snowflake")
887            return super().intersect_op(expression)
888
889        def describe_sql(self, expression: exp.Describe) -> str:
890            # Default to table if kind is unknown
891            kind_value = expression.args.get("kind") or "TABLE"
892            kind = f" {kind_value}" if kind_value else ""
893            this = f" {self.sql(expression, 'this')}"
894            expressions = self.expressions(expression, flat=True)
895            expressions = f" {expressions}" if expressions else ""
896            return f"DESCRIBE{kind}{this}{expressions}"
897
898        def generatedasidentitycolumnconstraint_sql(
899            self, expression: exp.GeneratedAsIdentityColumnConstraint
900        ) -> str:
901            start = expression.args.get("start")
902            start = f" START {start}" if start else ""
903            increment = expression.args.get("increment")
904            increment = f" INCREMENT {increment}" if increment else ""
905            return f"AUTOINCREMENT{start}{increment}"
906
907        def swaptable_sql(self, expression: exp.SwapTable) -> str:
908            this = self.sql(expression, "this")
909            return f"SWAP WITH {this}"
910
911        def with_properties(self, properties: exp.Properties) -> str:
912            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DataType'>: <function _datatype_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
796        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
797            milli = expression.args.get("milli")
798            if milli is not None:
799                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
800                expression.set("nano", milli_to_nano)
801
802            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
804        def trycast_sql(self, expression: exp.TryCast) -> str:
805            value = expression.this
806
807            if value.type is None:
808                from sqlglot.optimizer.annotate_types import annotate_types
809
810                value = annotate_types(value)
811
812            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
813                return super().trycast_sql(expression)
814
815            # TRY_CAST only works for string values in Snowflake
816            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
818        def log_sql(self, expression: exp.Log) -> str:
819            if not expression.expression:
820                return self.func("LN", expression.this)
821
822            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
824        def unnest_sql(self, expression: exp.Unnest) -> str:
825            unnest_alias = expression.args.get("alias")
826            offset = expression.args.get("offset")
827
828            columns = [
829                exp.to_identifier("seq"),
830                exp.to_identifier("key"),
831                exp.to_identifier("path"),
832                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
833                seq_get(unnest_alias.columns if unnest_alias else [], 0)
834                or exp.to_identifier("value"),
835                exp.to_identifier("this"),
836            ]
837
838            if unnest_alias:
839                unnest_alias.set("columns", columns)
840            else:
841                unnest_alias = exp.TableAlias(this="_u", columns=columns)
842
843            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
844            alias = self.sql(unnest_alias)
845            alias = f" AS {alias}" if alias else ""
846            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
848        def show_sql(self, expression: exp.Show) -> str:
849            like = self.sql(expression, "like")
850            like = f" LIKE {like}" if like else ""
851
852            scope = self.sql(expression, "scope")
853            scope = f" {scope}" if scope else ""
854
855            scope_kind = self.sql(expression, "scope_kind")
856            if scope_kind:
857                scope_kind = f" IN {scope_kind}"
858
859            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
861        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
862            # Other dialects don't support all of the following parameters, so we need to
863            # generate default values as necessary to ensure the transpilation is correct
864            group = expression.args.get("group")
865            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
866            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
867            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
868
869            return self.func(
870                "REGEXP_SUBSTR",
871                expression.this,
872                expression.expression,
873                position,
874                occurrence,
875                parameters,
876                group,
877            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
879        def except_op(self, expression: exp.Except) -> str:
880            if not expression.args.get("distinct", False):
881                self.unsupported("EXCEPT with All is not supported in Snowflake")
882            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
884        def intersect_op(self, expression: exp.Intersect) -> str:
885            if not expression.args.get("distinct", False):
886                self.unsupported("INTERSECT with All is not supported in Snowflake")
887            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
889        def describe_sql(self, expression: exp.Describe) -> str:
890            # Default to table if kind is unknown
891            kind_value = expression.args.get("kind") or "TABLE"
892            kind = f" {kind_value}" if kind_value else ""
893            this = f" {self.sql(expression, 'this')}"
894            expressions = self.expressions(expression, flat=True)
895            expressions = f" {expressions}" if expressions else ""
896            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
898        def generatedasidentitycolumnconstraint_sql(
899            self, expression: exp.GeneratedAsIdentityColumnConstraint
900        ) -> str:
901            start = expression.args.get("start")
902            start = f" START {start}" if start else ""
903            increment = expression.args.get("increment")
904            increment = f" INCREMENT {increment}" if increment else ""
905            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
907        def swaptable_sql(self, expression: exp.SwapTable) -> str:
908            this = self.sql(expression, "this")
909            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
911        def with_properties(self, properties: exp.Properties) -> str:
912            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
EXPRESSIONS_WITHOUT_NESTED_CTES
KEY_VALUE_DEFINITIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql