sqlglot.tokens
1from __future__ import annotations 2 3import os 4import typing as t 5from enum import auto 6 7from sqlglot.errors import SqlglotError, TokenError 8from sqlglot.helper import AutoName 9from sqlglot.trie import TrieResult, in_trie, new_trie 10 11if t.TYPE_CHECKING: 12 from sqlglot.dialects.dialect import DialectType 13 14 15try: 16 from sqlglotrs import ( # type: ignore 17 Tokenizer as RsTokenizer, 18 TokenizerDialectSettings as RsTokenizerDialectSettings, 19 TokenizerSettings as RsTokenizerSettings, 20 TokenTypeSettings as RsTokenTypeSettings, 21 ) 22 23 USE_RS_TOKENIZER = os.environ.get("SQLGLOTRS_TOKENIZER", "1") == "1" 24except ImportError: 25 USE_RS_TOKENIZER = False 26 27 28class TokenType(AutoName): 29 L_PAREN = auto() 30 R_PAREN = auto() 31 L_BRACKET = auto() 32 R_BRACKET = auto() 33 L_BRACE = auto() 34 R_BRACE = auto() 35 COMMA = auto() 36 DOT = auto() 37 DASH = auto() 38 PLUS = auto() 39 COLON = auto() 40 DCOLON = auto() 41 DQMARK = auto() 42 SEMICOLON = auto() 43 STAR = auto() 44 BACKSLASH = auto() 45 SLASH = auto() 46 LT = auto() 47 LTE = auto() 48 GT = auto() 49 GTE = auto() 50 NOT = auto() 51 EQ = auto() 52 NEQ = auto() 53 NULLSAFE_EQ = auto() 54 COLON_EQ = auto() 55 AND = auto() 56 OR = auto() 57 AMP = auto() 58 DPIPE = auto() 59 PIPE = auto() 60 PIPE_SLASH = auto() 61 DPIPE_SLASH = auto() 62 CARET = auto() 63 TILDA = auto() 64 ARROW = auto() 65 DARROW = auto() 66 FARROW = auto() 67 HASH = auto() 68 HASH_ARROW = auto() 69 DHASH_ARROW = auto() 70 LR_ARROW = auto() 71 DAT = auto() 72 LT_AT = auto() 73 AT_GT = auto() 74 DOLLAR = auto() 75 PARAMETER = auto() 76 SESSION_PARAMETER = auto() 77 DAMP = auto() 78 XOR = auto() 79 DSTAR = auto() 80 81 BLOCK_START = auto() 82 BLOCK_END = auto() 83 84 SPACE = auto() 85 BREAK = auto() 86 87 STRING = auto() 88 NUMBER = auto() 89 IDENTIFIER = auto() 90 DATABASE = auto() 91 COLUMN = auto() 92 COLUMN_DEF = auto() 93 SCHEMA = auto() 94 TABLE = auto() 95 WAREHOUSE = auto() 96 STREAMLIT = auto() 97 VAR = auto() 98 BIT_STRING = auto() 99 HEX_STRING = auto() 100 BYTE_STRING = auto() 101 NATIONAL_STRING = auto() 102 RAW_STRING = auto() 103 HEREDOC_STRING = auto() 104 UNICODE_STRING = auto() 105 106 # types 107 BIT = auto() 108 BOOLEAN = auto() 109 TINYINT = auto() 110 UTINYINT = auto() 111 SMALLINT = auto() 112 USMALLINT = auto() 113 MEDIUMINT = auto() 114 UMEDIUMINT = auto() 115 INT = auto() 116 UINT = auto() 117 BIGINT = auto() 118 UBIGINT = auto() 119 INT128 = auto() 120 UINT128 = auto() 121 INT256 = auto() 122 UINT256 = auto() 123 FLOAT = auto() 124 DOUBLE = auto() 125 DECIMAL = auto() 126 UDECIMAL = auto() 127 BIGDECIMAL = auto() 128 CHAR = auto() 129 NCHAR = auto() 130 VARCHAR = auto() 131 NVARCHAR = auto() 132 BPCHAR = auto() 133 TEXT = auto() 134 MEDIUMTEXT = auto() 135 LONGTEXT = auto() 136 MEDIUMBLOB = auto() 137 LONGBLOB = auto() 138 TINYBLOB = auto() 139 TINYTEXT = auto() 140 NAME = auto() 141 BINARY = auto() 142 VARBINARY = auto() 143 JSON = auto() 144 JSONB = auto() 145 TIME = auto() 146 TIMETZ = auto() 147 TIMESTAMP = auto() 148 TIMESTAMPTZ = auto() 149 TIMESTAMPLTZ = auto() 150 TIMESTAMPNTZ = auto() 151 TIMESTAMP_S = auto() 152 TIMESTAMP_MS = auto() 153 TIMESTAMP_NS = auto() 154 DATETIME = auto() 155 DATETIME64 = auto() 156 DATE = auto() 157 DATE32 = auto() 158 INT4RANGE = auto() 159 INT4MULTIRANGE = auto() 160 INT8RANGE = auto() 161 INT8MULTIRANGE = auto() 162 NUMRANGE = auto() 163 NUMMULTIRANGE = auto() 164 TSRANGE = auto() 165 TSMULTIRANGE = auto() 166 TSTZRANGE = auto() 167 TSTZMULTIRANGE = auto() 168 DATERANGE = auto() 169 DATEMULTIRANGE = auto() 170 UUID = auto() 171 GEOGRAPHY = auto() 172 NULLABLE = auto() 173 GEOMETRY = auto() 174 HLLSKETCH = auto() 175 HSTORE = auto() 176 SUPER = auto() 177 SERIAL = auto() 178 SMALLSERIAL = auto() 179 BIGSERIAL = auto() 180 XML = auto() 181 YEAR = auto() 182 UNIQUEIDENTIFIER = auto() 183 USERDEFINED = auto() 184 MONEY = auto() 185 SMALLMONEY = auto() 186 ROWVERSION = auto() 187 IMAGE = auto() 188 VARIANT = auto() 189 OBJECT = auto() 190 INET = auto() 191 IPADDRESS = auto() 192 IPPREFIX = auto() 193 IPV4 = auto() 194 IPV6 = auto() 195 ENUM = auto() 196 ENUM8 = auto() 197 ENUM16 = auto() 198 FIXEDSTRING = auto() 199 LOWCARDINALITY = auto() 200 NESTED = auto() 201 AGGREGATEFUNCTION = auto() 202 SIMPLEAGGREGATEFUNCTION = auto() 203 TDIGEST = auto() 204 UNKNOWN = auto() 205 VECTOR = auto() 206 207 # keywords 208 ALIAS = auto() 209 ALTER = auto() 210 ALWAYS = auto() 211 ALL = auto() 212 ANTI = auto() 213 ANY = auto() 214 APPLY = auto() 215 ARRAY = auto() 216 ASC = auto() 217 ASOF = auto() 218 AUTO_INCREMENT = auto() 219 BEGIN = auto() 220 BETWEEN = auto() 221 CACHE = auto() 222 CASE = auto() 223 CHARACTER_SET = auto() 224 CLUSTER_BY = auto() 225 COLLATE = auto() 226 COMMAND = auto() 227 COMMENT = auto() 228 COMMIT = auto() 229 CONNECT_BY = auto() 230 CONSTRAINT = auto() 231 COPY = auto() 232 CREATE = auto() 233 CROSS = auto() 234 CUBE = auto() 235 CURRENT_DATE = auto() 236 CURRENT_DATETIME = auto() 237 CURRENT_TIME = auto() 238 CURRENT_TIMESTAMP = auto() 239 CURRENT_USER = auto() 240 DECLARE = auto() 241 DEFAULT = auto() 242 DELETE = auto() 243 DESC = auto() 244 DESCRIBE = auto() 245 DICTIONARY = auto() 246 DISTINCT = auto() 247 DISTRIBUTE_BY = auto() 248 DIV = auto() 249 DROP = auto() 250 ELSE = auto() 251 END = auto() 252 ESCAPE = auto() 253 EXCEPT = auto() 254 EXECUTE = auto() 255 EXISTS = auto() 256 FALSE = auto() 257 FETCH = auto() 258 FILTER = auto() 259 FINAL = auto() 260 FIRST = auto() 261 FOR = auto() 262 FORCE = auto() 263 FOREIGN_KEY = auto() 264 FORMAT = auto() 265 FROM = auto() 266 FULL = auto() 267 FUNCTION = auto() 268 GLOB = auto() 269 GLOBAL = auto() 270 GROUP_BY = auto() 271 GROUPING_SETS = auto() 272 HAVING = auto() 273 HINT = auto() 274 IGNORE = auto() 275 ILIKE = auto() 276 ILIKE_ANY = auto() 277 IN = auto() 278 INDEX = auto() 279 INNER = auto() 280 INSERT = auto() 281 INTERSECT = auto() 282 INTERVAL = auto() 283 INTO = auto() 284 INTRODUCER = auto() 285 IRLIKE = auto() 286 IS = auto() 287 ISNULL = auto() 288 JOIN = auto() 289 JOIN_MARKER = auto() 290 KEEP = auto() 291 KEY = auto() 292 KILL = auto() 293 LANGUAGE = auto() 294 LATERAL = auto() 295 LEFT = auto() 296 LIKE = auto() 297 LIKE_ANY = auto() 298 LIMIT = auto() 299 LIST = auto() 300 LOAD = auto() 301 LOCK = auto() 302 MAP = auto() 303 MATCH_CONDITION = auto() 304 MATCH_RECOGNIZE = auto() 305 MEMBER_OF = auto() 306 MERGE = auto() 307 MOD = auto() 308 MODEL = auto() 309 NATURAL = auto() 310 NEXT = auto() 311 NOTNULL = auto() 312 NULL = auto() 313 OBJECT_IDENTIFIER = auto() 314 OFFSET = auto() 315 ON = auto() 316 ONLY = auto() 317 OPERATOR = auto() 318 ORDER_BY = auto() 319 ORDER_SIBLINGS_BY = auto() 320 ORDERED = auto() 321 ORDINALITY = auto() 322 OUTER = auto() 323 OVER = auto() 324 OVERLAPS = auto() 325 OVERWRITE = auto() 326 PARTITION = auto() 327 PARTITION_BY = auto() 328 PERCENT = auto() 329 PIVOT = auto() 330 PLACEHOLDER = auto() 331 POSITIONAL = auto() 332 PRAGMA = auto() 333 PREWHERE = auto() 334 PRIMARY_KEY = auto() 335 PROCEDURE = auto() 336 PROPERTIES = auto() 337 PSEUDO_TYPE = auto() 338 QUALIFY = auto() 339 QUOTE = auto() 340 RANGE = auto() 341 RECURSIVE = auto() 342 REFRESH = auto() 343 RENAME = auto() 344 REPLACE = auto() 345 RETURNING = auto() 346 REFERENCES = auto() 347 RIGHT = auto() 348 RLIKE = auto() 349 ROLLBACK = auto() 350 ROLLUP = auto() 351 ROW = auto() 352 ROWS = auto() 353 SELECT = auto() 354 SEMI = auto() 355 SEPARATOR = auto() 356 SEQUENCE = auto() 357 SERDE_PROPERTIES = auto() 358 SET = auto() 359 SETTINGS = auto() 360 SHOW = auto() 361 SIMILAR_TO = auto() 362 SOME = auto() 363 SORT_BY = auto() 364 START_WITH = auto() 365 STORAGE_INTEGRATION = auto() 366 STRAIGHT_JOIN = auto() 367 STRUCT = auto() 368 SUMMARIZE = auto() 369 TABLE_SAMPLE = auto() 370 TAG = auto() 371 TEMPORARY = auto() 372 TOP = auto() 373 THEN = auto() 374 TRUE = auto() 375 TRUNCATE = auto() 376 UNCACHE = auto() 377 UNION = auto() 378 UNNEST = auto() 379 UNPIVOT = auto() 380 UPDATE = auto() 381 USE = auto() 382 USING = auto() 383 VALUES = auto() 384 VIEW = auto() 385 VOLATILE = auto() 386 WHEN = auto() 387 WHERE = auto() 388 WINDOW = auto() 389 WITH = auto() 390 UNIQUE = auto() 391 VERSION_SNAPSHOT = auto() 392 TIMESTAMP_SNAPSHOT = auto() 393 OPTION = auto() 394 395 396_ALL_TOKEN_TYPES = list(TokenType) 397_TOKEN_TYPE_TO_INDEX = {token_type: i for i, token_type in enumerate(_ALL_TOKEN_TYPES)} 398 399 400class Token: 401 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 402 403 @classmethod 404 def number(cls, number: int) -> Token: 405 """Returns a NUMBER token with `number` as its text.""" 406 return cls(TokenType.NUMBER, str(number)) 407 408 @classmethod 409 def string(cls, string: str) -> Token: 410 """Returns a STRING token with `string` as its text.""" 411 return cls(TokenType.STRING, string) 412 413 @classmethod 414 def identifier(cls, identifier: str) -> Token: 415 """Returns an IDENTIFIER token with `identifier` as its text.""" 416 return cls(TokenType.IDENTIFIER, identifier) 417 418 @classmethod 419 def var(cls, var: str) -> Token: 420 """Returns an VAR token with `var` as its text.""" 421 return cls(TokenType.VAR, var) 422 423 def __init__( 424 self, 425 token_type: TokenType, 426 text: str, 427 line: int = 1, 428 col: int = 1, 429 start: int = 0, 430 end: int = 0, 431 comments: t.Optional[t.List[str]] = None, 432 ) -> None: 433 """Token initializer. 434 435 Args: 436 token_type: The TokenType Enum. 437 text: The text of the token. 438 line: The line that the token ends on. 439 col: The column that the token ends on. 440 start: The start index of the token. 441 end: The ending index of the token. 442 comments: The comments to attach to the token. 443 """ 444 self.token_type = token_type 445 self.text = text 446 self.line = line 447 self.col = col 448 self.start = start 449 self.end = end 450 self.comments = [] if comments is None else comments 451 452 def __repr__(self) -> str: 453 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 454 return f"<Token {attributes}>" 455 456 457class _Tokenizer(type): 458 def __new__(cls, clsname, bases, attrs): 459 klass = super().__new__(cls, clsname, bases, attrs) 460 461 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 462 return dict( 463 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr 464 ) 465 466 def _quotes_to_format( 467 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] 468 ) -> t.Dict[str, t.Tuple[str, TokenType]]: 469 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} 470 471 klass._QUOTES = _convert_quotes(klass.QUOTES) 472 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) 473 474 klass._FORMAT_STRINGS = { 475 **{ 476 p + s: (e, TokenType.NATIONAL_STRING) 477 for s, e in klass._QUOTES.items() 478 for p in ("n", "N") 479 }, 480 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), 481 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), 482 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), 483 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), 484 **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS), 485 **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS), 486 } 487 488 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 489 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 490 klass._COMMENTS = { 491 **dict( 492 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 493 for comment in klass.COMMENTS 494 ), 495 "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects 496 } 497 498 klass._KEYWORD_TRIE = new_trie( 499 key.upper() 500 for key in ( 501 *klass.KEYWORDS, 502 *klass._COMMENTS, 503 *klass._QUOTES, 504 *klass._FORMAT_STRINGS, 505 ) 506 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 507 ) 508 509 if USE_RS_TOKENIZER: 510 settings = RsTokenizerSettings( 511 white_space={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.WHITE_SPACE.items()}, 512 single_tokens={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.SINGLE_TOKENS.items()}, 513 keywords={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.KEYWORDS.items()}, 514 numeric_literals=klass.NUMERIC_LITERALS, 515 identifiers=klass._IDENTIFIERS, 516 identifier_escapes=klass._IDENTIFIER_ESCAPES, 517 string_escapes=klass._STRING_ESCAPES, 518 quotes=klass._QUOTES, 519 format_strings={ 520 k: (v1, _TOKEN_TYPE_TO_INDEX[v2]) 521 for k, (v1, v2) in klass._FORMAT_STRINGS.items() 522 }, 523 has_bit_strings=bool(klass.BIT_STRINGS), 524 has_hex_strings=bool(klass.HEX_STRINGS), 525 comments=klass._COMMENTS, 526 var_single_tokens=klass.VAR_SINGLE_TOKENS, 527 commands={_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMANDS}, 528 command_prefix_tokens={ 529 _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS 530 }, 531 heredoc_tag_is_identifier=klass.HEREDOC_TAG_IS_IDENTIFIER, 532 string_escapes_allowed_in_raw_strings=klass.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS, 533 nested_comments=klass.NESTED_COMMENTS, 534 ) 535 token_types = RsTokenTypeSettings( 536 bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING], 537 break_=_TOKEN_TYPE_TO_INDEX[TokenType.BREAK], 538 dcolon=_TOKEN_TYPE_TO_INDEX[TokenType.DCOLON], 539 heredoc_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEREDOC_STRING], 540 raw_string=_TOKEN_TYPE_TO_INDEX[TokenType.RAW_STRING], 541 hex_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEX_STRING], 542 identifier=_TOKEN_TYPE_TO_INDEX[TokenType.IDENTIFIER], 543 number=_TOKEN_TYPE_TO_INDEX[TokenType.NUMBER], 544 parameter=_TOKEN_TYPE_TO_INDEX[TokenType.PARAMETER], 545 semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON], 546 string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING], 547 var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR], 548 heredoc_string_alternative=_TOKEN_TYPE_TO_INDEX[klass.HEREDOC_STRING_ALTERNATIVE], 549 ) 550 klass._RS_TOKENIZER = RsTokenizer(settings, token_types) 551 else: 552 klass._RS_TOKENIZER = None 553 554 return klass 555 556 557class Tokenizer(metaclass=_Tokenizer): 558 SINGLE_TOKENS = { 559 "(": TokenType.L_PAREN, 560 ")": TokenType.R_PAREN, 561 "[": TokenType.L_BRACKET, 562 "]": TokenType.R_BRACKET, 563 "{": TokenType.L_BRACE, 564 "}": TokenType.R_BRACE, 565 "&": TokenType.AMP, 566 "^": TokenType.CARET, 567 ":": TokenType.COLON, 568 ",": TokenType.COMMA, 569 ".": TokenType.DOT, 570 "-": TokenType.DASH, 571 "=": TokenType.EQ, 572 ">": TokenType.GT, 573 "<": TokenType.LT, 574 "%": TokenType.MOD, 575 "!": TokenType.NOT, 576 "|": TokenType.PIPE, 577 "+": TokenType.PLUS, 578 ";": TokenType.SEMICOLON, 579 "/": TokenType.SLASH, 580 "\\": TokenType.BACKSLASH, 581 "*": TokenType.STAR, 582 "~": TokenType.TILDA, 583 "?": TokenType.PLACEHOLDER, 584 "@": TokenType.PARAMETER, 585 "#": TokenType.HASH, 586 # Used for breaking a var like x'y' but nothing else the token type doesn't matter 587 "'": TokenType.UNKNOWN, 588 "`": TokenType.UNKNOWN, 589 '"': TokenType.UNKNOWN, 590 } 591 592 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 593 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 594 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 595 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 596 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 597 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 598 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 599 IDENTIFIER_ESCAPES = ['"'] 600 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 601 STRING_ESCAPES = ["'"] 602 VAR_SINGLE_TOKENS: t.Set[str] = set() 603 604 # Whether the heredoc tags follow the same lexical rules as unquoted identifiers 605 HEREDOC_TAG_IS_IDENTIFIER = False 606 607 # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc 608 HEREDOC_STRING_ALTERNATIVE = TokenType.VAR 609 610 # Whether string escape characters function as such when placed within raw strings 611 STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True 612 613 NESTED_COMMENTS = True 614 615 # Autofilled 616 _COMMENTS: t.Dict[str, str] = {} 617 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 618 _IDENTIFIERS: t.Dict[str, str] = {} 619 _IDENTIFIER_ESCAPES: t.Set[str] = set() 620 _QUOTES: t.Dict[str, str] = {} 621 _STRING_ESCAPES: t.Set[str] = set() 622 _KEYWORD_TRIE: t.Dict = {} 623 _RS_TOKENIZER: t.Optional[t.Any] = None 624 625 KEYWORDS: t.Dict[str, TokenType] = { 626 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 627 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 628 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 629 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 630 "/*+": TokenType.HINT, 631 "==": TokenType.EQ, 632 "::": TokenType.DCOLON, 633 "||": TokenType.DPIPE, 634 ">=": TokenType.GTE, 635 "<=": TokenType.LTE, 636 "<>": TokenType.NEQ, 637 "!=": TokenType.NEQ, 638 ":=": TokenType.COLON_EQ, 639 "<=>": TokenType.NULLSAFE_EQ, 640 "->": TokenType.ARROW, 641 "->>": TokenType.DARROW, 642 "=>": TokenType.FARROW, 643 "#>": TokenType.HASH_ARROW, 644 "#>>": TokenType.DHASH_ARROW, 645 "<->": TokenType.LR_ARROW, 646 "&&": TokenType.DAMP, 647 "??": TokenType.DQMARK, 648 "ALL": TokenType.ALL, 649 "ALWAYS": TokenType.ALWAYS, 650 "AND": TokenType.AND, 651 "ANTI": TokenType.ANTI, 652 "ANY": TokenType.ANY, 653 "ASC": TokenType.ASC, 654 "AS": TokenType.ALIAS, 655 "ASOF": TokenType.ASOF, 656 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 657 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 658 "BEGIN": TokenType.BEGIN, 659 "BETWEEN": TokenType.BETWEEN, 660 "CACHE": TokenType.CACHE, 661 "UNCACHE": TokenType.UNCACHE, 662 "CASE": TokenType.CASE, 663 "CHARACTER SET": TokenType.CHARACTER_SET, 664 "CLUSTER BY": TokenType.CLUSTER_BY, 665 "COLLATE": TokenType.COLLATE, 666 "COLUMN": TokenType.COLUMN, 667 "COMMIT": TokenType.COMMIT, 668 "CONNECT BY": TokenType.CONNECT_BY, 669 "CONSTRAINT": TokenType.CONSTRAINT, 670 "COPY": TokenType.COPY, 671 "CREATE": TokenType.CREATE, 672 "CROSS": TokenType.CROSS, 673 "CUBE": TokenType.CUBE, 674 "CURRENT_DATE": TokenType.CURRENT_DATE, 675 "CURRENT_TIME": TokenType.CURRENT_TIME, 676 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 677 "CURRENT_USER": TokenType.CURRENT_USER, 678 "DATABASE": TokenType.DATABASE, 679 "DEFAULT": TokenType.DEFAULT, 680 "DELETE": TokenType.DELETE, 681 "DESC": TokenType.DESC, 682 "DESCRIBE": TokenType.DESCRIBE, 683 "DISTINCT": TokenType.DISTINCT, 684 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 685 "DIV": TokenType.DIV, 686 "DROP": TokenType.DROP, 687 "ELSE": TokenType.ELSE, 688 "END": TokenType.END, 689 "ENUM": TokenType.ENUM, 690 "ESCAPE": TokenType.ESCAPE, 691 "EXCEPT": TokenType.EXCEPT, 692 "EXECUTE": TokenType.EXECUTE, 693 "EXISTS": TokenType.EXISTS, 694 "FALSE": TokenType.FALSE, 695 "FETCH": TokenType.FETCH, 696 "FILTER": TokenType.FILTER, 697 "FIRST": TokenType.FIRST, 698 "FULL": TokenType.FULL, 699 "FUNCTION": TokenType.FUNCTION, 700 "FOR": TokenType.FOR, 701 "FOREIGN KEY": TokenType.FOREIGN_KEY, 702 "FORMAT": TokenType.FORMAT, 703 "FROM": TokenType.FROM, 704 "GEOGRAPHY": TokenType.GEOGRAPHY, 705 "GEOMETRY": TokenType.GEOMETRY, 706 "GLOB": TokenType.GLOB, 707 "GROUP BY": TokenType.GROUP_BY, 708 "GROUPING SETS": TokenType.GROUPING_SETS, 709 "HAVING": TokenType.HAVING, 710 "ILIKE": TokenType.ILIKE, 711 "IN": TokenType.IN, 712 "INDEX": TokenType.INDEX, 713 "INET": TokenType.INET, 714 "INNER": TokenType.INNER, 715 "INSERT": TokenType.INSERT, 716 "INTERVAL": TokenType.INTERVAL, 717 "INTERSECT": TokenType.INTERSECT, 718 "INTO": TokenType.INTO, 719 "IS": TokenType.IS, 720 "ISNULL": TokenType.ISNULL, 721 "JOIN": TokenType.JOIN, 722 "KEEP": TokenType.KEEP, 723 "KILL": TokenType.KILL, 724 "LATERAL": TokenType.LATERAL, 725 "LEFT": TokenType.LEFT, 726 "LIKE": TokenType.LIKE, 727 "LIMIT": TokenType.LIMIT, 728 "LOAD": TokenType.LOAD, 729 "LOCK": TokenType.LOCK, 730 "MERGE": TokenType.MERGE, 731 "NATURAL": TokenType.NATURAL, 732 "NEXT": TokenType.NEXT, 733 "NOT": TokenType.NOT, 734 "NOTNULL": TokenType.NOTNULL, 735 "NULL": TokenType.NULL, 736 "OBJECT": TokenType.OBJECT, 737 "OFFSET": TokenType.OFFSET, 738 "ON": TokenType.ON, 739 "OR": TokenType.OR, 740 "XOR": TokenType.XOR, 741 "ORDER BY": TokenType.ORDER_BY, 742 "ORDINALITY": TokenType.ORDINALITY, 743 "OUTER": TokenType.OUTER, 744 "OVER": TokenType.OVER, 745 "OVERLAPS": TokenType.OVERLAPS, 746 "OVERWRITE": TokenType.OVERWRITE, 747 "PARTITION": TokenType.PARTITION, 748 "PARTITION BY": TokenType.PARTITION_BY, 749 "PARTITIONED BY": TokenType.PARTITION_BY, 750 "PARTITIONED_BY": TokenType.PARTITION_BY, 751 "PERCENT": TokenType.PERCENT, 752 "PIVOT": TokenType.PIVOT, 753 "PRAGMA": TokenType.PRAGMA, 754 "PRIMARY KEY": TokenType.PRIMARY_KEY, 755 "PROCEDURE": TokenType.PROCEDURE, 756 "QUALIFY": TokenType.QUALIFY, 757 "RANGE": TokenType.RANGE, 758 "RECURSIVE": TokenType.RECURSIVE, 759 "REGEXP": TokenType.RLIKE, 760 "RENAME": TokenType.RENAME, 761 "REPLACE": TokenType.REPLACE, 762 "RETURNING": TokenType.RETURNING, 763 "REFERENCES": TokenType.REFERENCES, 764 "RIGHT": TokenType.RIGHT, 765 "RLIKE": TokenType.RLIKE, 766 "ROLLBACK": TokenType.ROLLBACK, 767 "ROLLUP": TokenType.ROLLUP, 768 "ROW": TokenType.ROW, 769 "ROWS": TokenType.ROWS, 770 "SCHEMA": TokenType.SCHEMA, 771 "SELECT": TokenType.SELECT, 772 "SEMI": TokenType.SEMI, 773 "SET": TokenType.SET, 774 "SETTINGS": TokenType.SETTINGS, 775 "SHOW": TokenType.SHOW, 776 "SIMILAR TO": TokenType.SIMILAR_TO, 777 "SOME": TokenType.SOME, 778 "SORT BY": TokenType.SORT_BY, 779 "START WITH": TokenType.START_WITH, 780 "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN, 781 "TABLE": TokenType.TABLE, 782 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 783 "TEMP": TokenType.TEMPORARY, 784 "TEMPORARY": TokenType.TEMPORARY, 785 "THEN": TokenType.THEN, 786 "TRUE": TokenType.TRUE, 787 "TRUNCATE": TokenType.TRUNCATE, 788 "UNION": TokenType.UNION, 789 "UNKNOWN": TokenType.UNKNOWN, 790 "UNNEST": TokenType.UNNEST, 791 "UNPIVOT": TokenType.UNPIVOT, 792 "UPDATE": TokenType.UPDATE, 793 "USE": TokenType.USE, 794 "USING": TokenType.USING, 795 "UUID": TokenType.UUID, 796 "VALUES": TokenType.VALUES, 797 "VIEW": TokenType.VIEW, 798 "VOLATILE": TokenType.VOLATILE, 799 "WHEN": TokenType.WHEN, 800 "WHERE": TokenType.WHERE, 801 "WINDOW": TokenType.WINDOW, 802 "WITH": TokenType.WITH, 803 "APPLY": TokenType.APPLY, 804 "ARRAY": TokenType.ARRAY, 805 "BIT": TokenType.BIT, 806 "BOOL": TokenType.BOOLEAN, 807 "BOOLEAN": TokenType.BOOLEAN, 808 "BYTE": TokenType.TINYINT, 809 "MEDIUMINT": TokenType.MEDIUMINT, 810 "INT1": TokenType.TINYINT, 811 "TINYINT": TokenType.TINYINT, 812 "INT16": TokenType.SMALLINT, 813 "SHORT": TokenType.SMALLINT, 814 "SMALLINT": TokenType.SMALLINT, 815 "INT128": TokenType.INT128, 816 "HUGEINT": TokenType.INT128, 817 "INT2": TokenType.SMALLINT, 818 "INTEGER": TokenType.INT, 819 "INT": TokenType.INT, 820 "INT4": TokenType.INT, 821 "INT32": TokenType.INT, 822 "INT64": TokenType.BIGINT, 823 "LONG": TokenType.BIGINT, 824 "BIGINT": TokenType.BIGINT, 825 "INT8": TokenType.TINYINT, 826 "UINT": TokenType.UINT, 827 "DEC": TokenType.DECIMAL, 828 "DECIMAL": TokenType.DECIMAL, 829 "BIGDECIMAL": TokenType.BIGDECIMAL, 830 "BIGNUMERIC": TokenType.BIGDECIMAL, 831 "LIST": TokenType.LIST, 832 "MAP": TokenType.MAP, 833 "NULLABLE": TokenType.NULLABLE, 834 "NUMBER": TokenType.DECIMAL, 835 "NUMERIC": TokenType.DECIMAL, 836 "FIXED": TokenType.DECIMAL, 837 "REAL": TokenType.FLOAT, 838 "FLOAT": TokenType.FLOAT, 839 "FLOAT4": TokenType.FLOAT, 840 "FLOAT8": TokenType.DOUBLE, 841 "DOUBLE": TokenType.DOUBLE, 842 "DOUBLE PRECISION": TokenType.DOUBLE, 843 "JSON": TokenType.JSON, 844 "JSONB": TokenType.JSONB, 845 "CHAR": TokenType.CHAR, 846 "CHARACTER": TokenType.CHAR, 847 "NCHAR": TokenType.NCHAR, 848 "VARCHAR": TokenType.VARCHAR, 849 "VARCHAR2": TokenType.VARCHAR, 850 "NVARCHAR": TokenType.NVARCHAR, 851 "NVARCHAR2": TokenType.NVARCHAR, 852 "BPCHAR": TokenType.BPCHAR, 853 "STR": TokenType.TEXT, 854 "STRING": TokenType.TEXT, 855 "TEXT": TokenType.TEXT, 856 "LONGTEXT": TokenType.LONGTEXT, 857 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 858 "TINYTEXT": TokenType.TINYTEXT, 859 "CLOB": TokenType.TEXT, 860 "LONGVARCHAR": TokenType.TEXT, 861 "BINARY": TokenType.BINARY, 862 "BLOB": TokenType.VARBINARY, 863 "LONGBLOB": TokenType.LONGBLOB, 864 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 865 "TINYBLOB": TokenType.TINYBLOB, 866 "BYTEA": TokenType.VARBINARY, 867 "VARBINARY": TokenType.VARBINARY, 868 "TIME": TokenType.TIME, 869 "TIMETZ": TokenType.TIMETZ, 870 "TIMESTAMP": TokenType.TIMESTAMP, 871 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 872 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 873 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 874 "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ, 875 "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ, 876 "DATE": TokenType.DATE, 877 "DATETIME": TokenType.DATETIME, 878 "INT4RANGE": TokenType.INT4RANGE, 879 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 880 "INT8RANGE": TokenType.INT8RANGE, 881 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 882 "NUMRANGE": TokenType.NUMRANGE, 883 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 884 "TSRANGE": TokenType.TSRANGE, 885 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 886 "TSTZRANGE": TokenType.TSTZRANGE, 887 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 888 "DATERANGE": TokenType.DATERANGE, 889 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 890 "UNIQUE": TokenType.UNIQUE, 891 "VECTOR": TokenType.VECTOR, 892 "STRUCT": TokenType.STRUCT, 893 "SEQUENCE": TokenType.SEQUENCE, 894 "VARIANT": TokenType.VARIANT, 895 "ALTER": TokenType.ALTER, 896 "ANALYZE": TokenType.COMMAND, 897 "CALL": TokenType.COMMAND, 898 "COMMENT": TokenType.COMMENT, 899 "EXPLAIN": TokenType.COMMAND, 900 "GRANT": TokenType.COMMAND, 901 "OPTIMIZE": TokenType.COMMAND, 902 "PREPARE": TokenType.COMMAND, 903 "VACUUM": TokenType.COMMAND, 904 "USER-DEFINED": TokenType.USERDEFINED, 905 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 906 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 907 } 908 909 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 910 " ": TokenType.SPACE, 911 "\t": TokenType.SPACE, 912 "\n": TokenType.BREAK, 913 "\r": TokenType.BREAK, 914 } 915 916 COMMANDS = { 917 TokenType.COMMAND, 918 TokenType.EXECUTE, 919 TokenType.FETCH, 920 TokenType.SHOW, 921 TokenType.RENAME, 922 } 923 924 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 925 926 # Handle numeric literals like in hive (3L = BIGINT) 927 NUMERIC_LITERALS: t.Dict[str, str] = {} 928 929 COMMENTS = ["--", ("/*", "*/")] 930 931 __slots__ = ( 932 "sql", 933 "size", 934 "tokens", 935 "dialect", 936 "_start", 937 "_current", 938 "_line", 939 "_col", 940 "_comments", 941 "_char", 942 "_end", 943 "_peek", 944 "_prev_token_line", 945 "_rs_dialect_settings", 946 ) 947 948 def __init__(self, dialect: DialectType = None) -> None: 949 from sqlglot.dialects import Dialect 950 951 self.dialect = Dialect.get_or_raise(dialect) 952 953 if USE_RS_TOKENIZER: 954 self._rs_dialect_settings = RsTokenizerDialectSettings( 955 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 956 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 957 ) 958 959 self.reset() 960 961 def reset(self) -> None: 962 self.sql = "" 963 self.size = 0 964 self.tokens: t.List[Token] = [] 965 self._start = 0 966 self._current = 0 967 self._line = 1 968 self._col = 0 969 self._comments: t.List[str] = [] 970 971 self._char = "" 972 self._end = False 973 self._peek = "" 974 self._prev_token_line = -1 975 976 def tokenize(self, sql: str) -> t.List[Token]: 977 """Returns a list of tokens corresponding to the SQL string `sql`.""" 978 if USE_RS_TOKENIZER: 979 return self.tokenize_rs(sql) 980 981 self.reset() 982 self.sql = sql 983 self.size = len(sql) 984 985 try: 986 self._scan() 987 except Exception as e: 988 start = max(self._current - 50, 0) 989 end = min(self._current + 50, self.size - 1) 990 context = self.sql[start:end] 991 raise TokenError(f"Error tokenizing '{context}'") from e 992 993 return self.tokens 994 995 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 996 while self.size and not self._end: 997 current = self._current 998 999 # Skip spaces here rather than iteratively calling advance() for performance reasons 1000 while current < self.size: 1001 char = self.sql[current] 1002 1003 if char.isspace() and (char == " " or char == "\t"): 1004 current += 1 1005 else: 1006 break 1007 1008 offset = current - self._current if current > self._current else 1 1009 1010 self._start = current 1011 self._advance(offset) 1012 1013 if not self._char.isspace(): 1014 if self._char.isdigit(): 1015 self._scan_number() 1016 elif self._char in self._IDENTIFIERS: 1017 self._scan_identifier(self._IDENTIFIERS[self._char]) 1018 else: 1019 self._scan_keywords() 1020 1021 if until and until(): 1022 break 1023 1024 if self.tokens and self._comments: 1025 self.tokens[-1].comments.extend(self._comments) 1026 1027 def _chars(self, size: int) -> str: 1028 if size == 1: 1029 return self._char 1030 1031 start = self._current - 1 1032 end = start + size 1033 1034 return self.sql[start:end] if end <= self.size else "" 1035 1036 def _advance(self, i: int = 1, alnum: bool = False) -> None: 1037 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 1038 # Ensures we don't count an extra line if we get a \r\n line break sequence 1039 if not (self._char == "\r" and self._peek == "\n"): 1040 self._col = 1 1041 self._line += 1 1042 else: 1043 self._col += i 1044 1045 self._current += i 1046 self._end = self._current >= self.size 1047 self._char = self.sql[self._current - 1] 1048 self._peek = "" if self._end else self.sql[self._current] 1049 1050 if alnum and self._char.isalnum(): 1051 # Here we use local variables instead of attributes for better performance 1052 _col = self._col 1053 _current = self._current 1054 _end = self._end 1055 _peek = self._peek 1056 1057 while _peek.isalnum(): 1058 _col += 1 1059 _current += 1 1060 _end = _current >= self.size 1061 _peek = "" if _end else self.sql[_current] 1062 1063 self._col = _col 1064 self._current = _current 1065 self._end = _end 1066 self._peek = _peek 1067 self._char = self.sql[_current - 1] 1068 1069 @property 1070 def _text(self) -> str: 1071 return self.sql[self._start : self._current] 1072 1073 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1074 self._prev_token_line = self._line 1075 1076 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1077 self.tokens[-1].comments.extend(self._comments) 1078 self._comments = [] 1079 1080 self.tokens.append( 1081 Token( 1082 token_type, 1083 text=self._text if text is None else text, 1084 line=self._line, 1085 col=self._col, 1086 start=self._start, 1087 end=self._current - 1, 1088 comments=self._comments, 1089 ) 1090 ) 1091 self._comments = [] 1092 1093 # If we have either a semicolon or a begin token before the command's token, we'll parse 1094 # whatever follows the command's token as a string 1095 if ( 1096 token_type in self.COMMANDS 1097 and self._peek != ";" 1098 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1099 ): 1100 start = self._current 1101 tokens = len(self.tokens) 1102 self._scan(lambda: self._peek == ";") 1103 self.tokens = self.tokens[:tokens] 1104 text = self.sql[start : self._current].strip() 1105 if text: 1106 self._add(TokenType.STRING, text) 1107 1108 def _scan_keywords(self) -> None: 1109 size = 0 1110 word = None 1111 chars = self._text 1112 char = chars 1113 prev_space = False 1114 skip = False 1115 trie = self._KEYWORD_TRIE 1116 single_token = char in self.SINGLE_TOKENS 1117 1118 while chars: 1119 if skip: 1120 result = TrieResult.PREFIX 1121 else: 1122 result, trie = in_trie(trie, char.upper()) 1123 1124 if result == TrieResult.FAILED: 1125 break 1126 if result == TrieResult.EXISTS: 1127 word = chars 1128 1129 end = self._current + size 1130 size += 1 1131 1132 if end < self.size: 1133 char = self.sql[end] 1134 single_token = single_token or char in self.SINGLE_TOKENS 1135 is_space = char.isspace() 1136 1137 if not is_space or not prev_space: 1138 if is_space: 1139 char = " " 1140 chars += char 1141 prev_space = is_space 1142 skip = False 1143 else: 1144 skip = True 1145 else: 1146 char = "" 1147 break 1148 1149 if word: 1150 if self._scan_string(word): 1151 return 1152 if self._scan_comment(word): 1153 return 1154 if prev_space or single_token or not char: 1155 self._advance(size - 1) 1156 word = word.upper() 1157 self._add(self.KEYWORDS[word], text=word) 1158 return 1159 1160 if self._char in self.SINGLE_TOKENS: 1161 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1162 return 1163 1164 self._scan_var() 1165 1166 def _scan_comment(self, comment_start: str) -> bool: 1167 if comment_start not in self._COMMENTS: 1168 return False 1169 1170 comment_start_line = self._line 1171 comment_start_size = len(comment_start) 1172 comment_end = self._COMMENTS[comment_start] 1173 1174 if comment_end: 1175 # Skip the comment's start delimiter 1176 self._advance(comment_start_size) 1177 1178 comment_count = 1 1179 comment_end_size = len(comment_end) 1180 1181 while not self._end: 1182 if self._chars(comment_end_size) == comment_end: 1183 comment_count -= 1 1184 if not comment_count: 1185 break 1186 1187 self._advance(alnum=True) 1188 1189 # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres 1190 if ( 1191 self.NESTED_COMMENTS 1192 and not self._end 1193 and self._chars(comment_end_size) == comment_start 1194 ): 1195 self._advance(comment_start_size) 1196 comment_count += 1 1197 1198 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1199 self._advance(comment_end_size - 1) 1200 else: 1201 while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK: 1202 self._advance(alnum=True) 1203 self._comments.append(self._text[comment_start_size:]) 1204 1205 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1206 # Multiple consecutive comments are preserved by appending them to the current comments list. 1207 if comment_start_line == self._prev_token_line: 1208 self.tokens[-1].comments.extend(self._comments) 1209 self._comments = [] 1210 self._prev_token_line = self._line 1211 1212 return True 1213 1214 def _scan_number(self) -> None: 1215 if self._char == "0": 1216 peek = self._peek.upper() 1217 if peek == "B": 1218 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1219 elif peek == "X": 1220 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1221 1222 decimal = False 1223 scientific = 0 1224 1225 while True: 1226 if self._peek.isdigit(): 1227 self._advance() 1228 elif self._peek == "." and not decimal: 1229 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER: 1230 return self._add(TokenType.NUMBER) 1231 decimal = True 1232 self._advance() 1233 elif self._peek in ("-", "+") and scientific == 1: 1234 scientific += 1 1235 self._advance() 1236 elif self._peek.upper() == "E" and not scientific: 1237 scientific += 1 1238 self._advance() 1239 elif self._peek.isidentifier(): 1240 number_text = self._text 1241 literal = "" 1242 1243 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1244 literal += self._peek 1245 self._advance() 1246 1247 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1248 1249 if token_type: 1250 self._add(TokenType.NUMBER, number_text) 1251 self._add(TokenType.DCOLON, "::") 1252 return self._add(token_type, literal) 1253 elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1254 return self._add(TokenType.VAR) 1255 1256 self._advance(-len(literal)) 1257 return self._add(TokenType.NUMBER, number_text) 1258 else: 1259 return self._add(TokenType.NUMBER) 1260 1261 def _scan_bits(self) -> None: 1262 self._advance() 1263 value = self._extract_value() 1264 try: 1265 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1266 int(value, 2) 1267 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1268 except ValueError: 1269 self._add(TokenType.IDENTIFIER) 1270 1271 def _scan_hex(self) -> None: 1272 self._advance() 1273 value = self._extract_value() 1274 try: 1275 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1276 int(value, 16) 1277 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1278 except ValueError: 1279 self._add(TokenType.IDENTIFIER) 1280 1281 def _extract_value(self) -> str: 1282 while True: 1283 char = self._peek.strip() 1284 if char and char not in self.SINGLE_TOKENS: 1285 self._advance(alnum=True) 1286 else: 1287 break 1288 1289 return self._text 1290 1291 def _scan_string(self, start: str) -> bool: 1292 base = None 1293 token_type = TokenType.STRING 1294 1295 if start in self._QUOTES: 1296 end = self._QUOTES[start] 1297 elif start in self._FORMAT_STRINGS: 1298 end, token_type = self._FORMAT_STRINGS[start] 1299 1300 if token_type == TokenType.HEX_STRING: 1301 base = 16 1302 elif token_type == TokenType.BIT_STRING: 1303 base = 2 1304 elif token_type == TokenType.HEREDOC_STRING: 1305 self._advance() 1306 1307 if self._char == end: 1308 tag = "" 1309 else: 1310 tag = self._extract_string( 1311 end, 1312 raw_string=True, 1313 raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER, 1314 ) 1315 1316 if tag and self.HEREDOC_TAG_IS_IDENTIFIER and (self._end or not tag.isidentifier()): 1317 if not self._end: 1318 self._advance(-1) 1319 1320 self._advance(-len(tag)) 1321 self._add(self.HEREDOC_STRING_ALTERNATIVE) 1322 return True 1323 1324 end = f"{start}{tag}{end}" 1325 else: 1326 return False 1327 1328 self._advance(len(start)) 1329 text = self._extract_string(end, raw_string=token_type == TokenType.RAW_STRING) 1330 1331 if base: 1332 try: 1333 int(text, base) 1334 except Exception: 1335 raise TokenError( 1336 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1337 ) 1338 1339 self._add(token_type, text) 1340 return True 1341 1342 def _scan_identifier(self, identifier_end: str) -> None: 1343 self._advance() 1344 text = self._extract_string(identifier_end, escapes=self._IDENTIFIER_ESCAPES) 1345 self._add(TokenType.IDENTIFIER, text) 1346 1347 def _scan_var(self) -> None: 1348 while True: 1349 char = self._peek.strip() 1350 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1351 self._advance(alnum=True) 1352 else: 1353 break 1354 1355 self._add( 1356 TokenType.VAR 1357 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1358 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1359 ) 1360 1361 def _extract_string( 1362 self, 1363 delimiter: str, 1364 escapes: t.Optional[t.Set[str]] = None, 1365 raw_string: bool = False, 1366 raise_unmatched: bool = True, 1367 ) -> str: 1368 text = "" 1369 delim_size = len(delimiter) 1370 escapes = self._STRING_ESCAPES if escapes is None else escapes 1371 1372 while True: 1373 if ( 1374 not raw_string 1375 and self.dialect.UNESCAPED_SEQUENCES 1376 and self._peek 1377 and self._char in self.STRING_ESCAPES 1378 ): 1379 unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek) 1380 if unescaped_sequence: 1381 self._advance(2) 1382 text += unescaped_sequence 1383 continue 1384 if ( 1385 (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string) 1386 and self._char in escapes 1387 and (self._peek == delimiter or self._peek in escapes) 1388 and (self._char not in self._QUOTES or self._char == self._peek) 1389 ): 1390 if self._peek == delimiter: 1391 text += self._peek 1392 else: 1393 text += self._char + self._peek 1394 1395 if self._current + 1 < self.size: 1396 self._advance(2) 1397 else: 1398 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1399 else: 1400 if self._chars(delim_size) == delimiter: 1401 if delim_size > 1: 1402 self._advance(delim_size - 1) 1403 break 1404 1405 if self._end: 1406 if not raise_unmatched: 1407 return text + self._char 1408 1409 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1410 1411 current = self._current - 1 1412 self._advance(alnum=True) 1413 text += self.sql[current : self._current - 1] 1414 1415 return text 1416 1417 def tokenize_rs(self, sql: str) -> t.List[Token]: 1418 if not self._RS_TOKENIZER: 1419 raise SqlglotError("Rust tokenizer is not available") 1420 1421 try: 1422 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1423 for token in tokens: 1424 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1425 return tokens 1426 except Exception as e: 1427 raise TokenError(str(e))
29class TokenType(AutoName): 30 L_PAREN = auto() 31 R_PAREN = auto() 32 L_BRACKET = auto() 33 R_BRACKET = auto() 34 L_BRACE = auto() 35 R_BRACE = auto() 36 COMMA = auto() 37 DOT = auto() 38 DASH = auto() 39 PLUS = auto() 40 COLON = auto() 41 DCOLON = auto() 42 DQMARK = auto() 43 SEMICOLON = auto() 44 STAR = auto() 45 BACKSLASH = auto() 46 SLASH = auto() 47 LT = auto() 48 LTE = auto() 49 GT = auto() 50 GTE = auto() 51 NOT = auto() 52 EQ = auto() 53 NEQ = auto() 54 NULLSAFE_EQ = auto() 55 COLON_EQ = auto() 56 AND = auto() 57 OR = auto() 58 AMP = auto() 59 DPIPE = auto() 60 PIPE = auto() 61 PIPE_SLASH = auto() 62 DPIPE_SLASH = auto() 63 CARET = auto() 64 TILDA = auto() 65 ARROW = auto() 66 DARROW = auto() 67 FARROW = auto() 68 HASH = auto() 69 HASH_ARROW = auto() 70 DHASH_ARROW = auto() 71 LR_ARROW = auto() 72 DAT = auto() 73 LT_AT = auto() 74 AT_GT = auto() 75 DOLLAR = auto() 76 PARAMETER = auto() 77 SESSION_PARAMETER = auto() 78 DAMP = auto() 79 XOR = auto() 80 DSTAR = auto() 81 82 BLOCK_START = auto() 83 BLOCK_END = auto() 84 85 SPACE = auto() 86 BREAK = auto() 87 88 STRING = auto() 89 NUMBER = auto() 90 IDENTIFIER = auto() 91 DATABASE = auto() 92 COLUMN = auto() 93 COLUMN_DEF = auto() 94 SCHEMA = auto() 95 TABLE = auto() 96 WAREHOUSE = auto() 97 STREAMLIT = auto() 98 VAR = auto() 99 BIT_STRING = auto() 100 HEX_STRING = auto() 101 BYTE_STRING = auto() 102 NATIONAL_STRING = auto() 103 RAW_STRING = auto() 104 HEREDOC_STRING = auto() 105 UNICODE_STRING = auto() 106 107 # types 108 BIT = auto() 109 BOOLEAN = auto() 110 TINYINT = auto() 111 UTINYINT = auto() 112 SMALLINT = auto() 113 USMALLINT = auto() 114 MEDIUMINT = auto() 115 UMEDIUMINT = auto() 116 INT = auto() 117 UINT = auto() 118 BIGINT = auto() 119 UBIGINT = auto() 120 INT128 = auto() 121 UINT128 = auto() 122 INT256 = auto() 123 UINT256 = auto() 124 FLOAT = auto() 125 DOUBLE = auto() 126 DECIMAL = auto() 127 UDECIMAL = auto() 128 BIGDECIMAL = auto() 129 CHAR = auto() 130 NCHAR = auto() 131 VARCHAR = auto() 132 NVARCHAR = auto() 133 BPCHAR = auto() 134 TEXT = auto() 135 MEDIUMTEXT = auto() 136 LONGTEXT = auto() 137 MEDIUMBLOB = auto() 138 LONGBLOB = auto() 139 TINYBLOB = auto() 140 TINYTEXT = auto() 141 NAME = auto() 142 BINARY = auto() 143 VARBINARY = auto() 144 JSON = auto() 145 JSONB = auto() 146 TIME = auto() 147 TIMETZ = auto() 148 TIMESTAMP = auto() 149 TIMESTAMPTZ = auto() 150 TIMESTAMPLTZ = auto() 151 TIMESTAMPNTZ = auto() 152 TIMESTAMP_S = auto() 153 TIMESTAMP_MS = auto() 154 TIMESTAMP_NS = auto() 155 DATETIME = auto() 156 DATETIME64 = auto() 157 DATE = auto() 158 DATE32 = auto() 159 INT4RANGE = auto() 160 INT4MULTIRANGE = auto() 161 INT8RANGE = auto() 162 INT8MULTIRANGE = auto() 163 NUMRANGE = auto() 164 NUMMULTIRANGE = auto() 165 TSRANGE = auto() 166 TSMULTIRANGE = auto() 167 TSTZRANGE = auto() 168 TSTZMULTIRANGE = auto() 169 DATERANGE = auto() 170 DATEMULTIRANGE = auto() 171 UUID = auto() 172 GEOGRAPHY = auto() 173 NULLABLE = auto() 174 GEOMETRY = auto() 175 HLLSKETCH = auto() 176 HSTORE = auto() 177 SUPER = auto() 178 SERIAL = auto() 179 SMALLSERIAL = auto() 180 BIGSERIAL = auto() 181 XML = auto() 182 YEAR = auto() 183 UNIQUEIDENTIFIER = auto() 184 USERDEFINED = auto() 185 MONEY = auto() 186 SMALLMONEY = auto() 187 ROWVERSION = auto() 188 IMAGE = auto() 189 VARIANT = auto() 190 OBJECT = auto() 191 INET = auto() 192 IPADDRESS = auto() 193 IPPREFIX = auto() 194 IPV4 = auto() 195 IPV6 = auto() 196 ENUM = auto() 197 ENUM8 = auto() 198 ENUM16 = auto() 199 FIXEDSTRING = auto() 200 LOWCARDINALITY = auto() 201 NESTED = auto() 202 AGGREGATEFUNCTION = auto() 203 SIMPLEAGGREGATEFUNCTION = auto() 204 TDIGEST = auto() 205 UNKNOWN = auto() 206 VECTOR = auto() 207 208 # keywords 209 ALIAS = auto() 210 ALTER = auto() 211 ALWAYS = auto() 212 ALL = auto() 213 ANTI = auto() 214 ANY = auto() 215 APPLY = auto() 216 ARRAY = auto() 217 ASC = auto() 218 ASOF = auto() 219 AUTO_INCREMENT = auto() 220 BEGIN = auto() 221 BETWEEN = auto() 222 CACHE = auto() 223 CASE = auto() 224 CHARACTER_SET = auto() 225 CLUSTER_BY = auto() 226 COLLATE = auto() 227 COMMAND = auto() 228 COMMENT = auto() 229 COMMIT = auto() 230 CONNECT_BY = auto() 231 CONSTRAINT = auto() 232 COPY = auto() 233 CREATE = auto() 234 CROSS = auto() 235 CUBE = auto() 236 CURRENT_DATE = auto() 237 CURRENT_DATETIME = auto() 238 CURRENT_TIME = auto() 239 CURRENT_TIMESTAMP = auto() 240 CURRENT_USER = auto() 241 DECLARE = auto() 242 DEFAULT = auto() 243 DELETE = auto() 244 DESC = auto() 245 DESCRIBE = auto() 246 DICTIONARY = auto() 247 DISTINCT = auto() 248 DISTRIBUTE_BY = auto() 249 DIV = auto() 250 DROP = auto() 251 ELSE = auto() 252 END = auto() 253 ESCAPE = auto() 254 EXCEPT = auto() 255 EXECUTE = auto() 256 EXISTS = auto() 257 FALSE = auto() 258 FETCH = auto() 259 FILTER = auto() 260 FINAL = auto() 261 FIRST = auto() 262 FOR = auto() 263 FORCE = auto() 264 FOREIGN_KEY = auto() 265 FORMAT = auto() 266 FROM = auto() 267 FULL = auto() 268 FUNCTION = auto() 269 GLOB = auto() 270 GLOBAL = auto() 271 GROUP_BY = auto() 272 GROUPING_SETS = auto() 273 HAVING = auto() 274 HINT = auto() 275 IGNORE = auto() 276 ILIKE = auto() 277 ILIKE_ANY = auto() 278 IN = auto() 279 INDEX = auto() 280 INNER = auto() 281 INSERT = auto() 282 INTERSECT = auto() 283 INTERVAL = auto() 284 INTO = auto() 285 INTRODUCER = auto() 286 IRLIKE = auto() 287 IS = auto() 288 ISNULL = auto() 289 JOIN = auto() 290 JOIN_MARKER = auto() 291 KEEP = auto() 292 KEY = auto() 293 KILL = auto() 294 LANGUAGE = auto() 295 LATERAL = auto() 296 LEFT = auto() 297 LIKE = auto() 298 LIKE_ANY = auto() 299 LIMIT = auto() 300 LIST = auto() 301 LOAD = auto() 302 LOCK = auto() 303 MAP = auto() 304 MATCH_CONDITION = auto() 305 MATCH_RECOGNIZE = auto() 306 MEMBER_OF = auto() 307 MERGE = auto() 308 MOD = auto() 309 MODEL = auto() 310 NATURAL = auto() 311 NEXT = auto() 312 NOTNULL = auto() 313 NULL = auto() 314 OBJECT_IDENTIFIER = auto() 315 OFFSET = auto() 316 ON = auto() 317 ONLY = auto() 318 OPERATOR = auto() 319 ORDER_BY = auto() 320 ORDER_SIBLINGS_BY = auto() 321 ORDERED = auto() 322 ORDINALITY = auto() 323 OUTER = auto() 324 OVER = auto() 325 OVERLAPS = auto() 326 OVERWRITE = auto() 327 PARTITION = auto() 328 PARTITION_BY = auto() 329 PERCENT = auto() 330 PIVOT = auto() 331 PLACEHOLDER = auto() 332 POSITIONAL = auto() 333 PRAGMA = auto() 334 PREWHERE = auto() 335 PRIMARY_KEY = auto() 336 PROCEDURE = auto() 337 PROPERTIES = auto() 338 PSEUDO_TYPE = auto() 339 QUALIFY = auto() 340 QUOTE = auto() 341 RANGE = auto() 342 RECURSIVE = auto() 343 REFRESH = auto() 344 RENAME = auto() 345 REPLACE = auto() 346 RETURNING = auto() 347 REFERENCES = auto() 348 RIGHT = auto() 349 RLIKE = auto() 350 ROLLBACK = auto() 351 ROLLUP = auto() 352 ROW = auto() 353 ROWS = auto() 354 SELECT = auto() 355 SEMI = auto() 356 SEPARATOR = auto() 357 SEQUENCE = auto() 358 SERDE_PROPERTIES = auto() 359 SET = auto() 360 SETTINGS = auto() 361 SHOW = auto() 362 SIMILAR_TO = auto() 363 SOME = auto() 364 SORT_BY = auto() 365 START_WITH = auto() 366 STORAGE_INTEGRATION = auto() 367 STRAIGHT_JOIN = auto() 368 STRUCT = auto() 369 SUMMARIZE = auto() 370 TABLE_SAMPLE = auto() 371 TAG = auto() 372 TEMPORARY = auto() 373 TOP = auto() 374 THEN = auto() 375 TRUE = auto() 376 TRUNCATE = auto() 377 UNCACHE = auto() 378 UNION = auto() 379 UNNEST = auto() 380 UNPIVOT = auto() 381 UPDATE = auto() 382 USE = auto() 383 USING = auto() 384 VALUES = auto() 385 VIEW = auto() 386 VOLATILE = auto() 387 WHEN = auto() 388 WHERE = auto() 389 WINDOW = auto() 390 WITH = auto() 391 UNIQUE = auto() 392 VERSION_SNAPSHOT = auto() 393 TIMESTAMP_SNAPSHOT = auto() 394 OPTION = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
DQMARK =
<TokenType.DQMARK: 'DQMARK'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
COLON_EQ =
<TokenType.COLON_EQ: 'COLON_EQ'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE =
<TokenType.PIPE: 'PIPE'>
PIPE_SLASH =
<TokenType.PIPE_SLASH: 'PIPE_SLASH'>
DPIPE_SLASH =
<TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>
CARET =
<TokenType.CARET: 'CARET'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
DAT =
<TokenType.DAT: 'DAT'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP =
<TokenType.DAMP: 'DAMP'>
XOR =
<TokenType.XOR: 'XOR'>
DSTAR =
<TokenType.DSTAR: 'DSTAR'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
WAREHOUSE =
<TokenType.WAREHOUSE: 'WAREHOUSE'>
STREAMLIT =
<TokenType.STREAMLIT: 'STREAMLIT'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING =
<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING =
<TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING =
<TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
UNICODE_STRING =
<TokenType.UNICODE_STRING: 'UNICODE_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT =
<TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT =
<TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
INT128 =
<TokenType.INT128: 'INT128'>
UINT128 =
<TokenType.UINT128: 'UINT128'>
INT256 =
<TokenType.INT256: 'INT256'>
UINT256 =
<TokenType.UINT256: 'UINT256'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
UDECIMAL =
<TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL =
<TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
BPCHAR =
<TokenType.BPCHAR: 'BPCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB =
<TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT =
<TokenType.TINYTEXT: 'TINYTEXT'>
NAME =
<TokenType.NAME: 'NAME'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMETZ =
<TokenType.TIMETZ: 'TIMETZ'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
TIMESTAMPNTZ =
<TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>
TIMESTAMP_S =
<TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>
TIMESTAMP_MS =
<TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>
TIMESTAMP_NS =
<TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATETIME64 =
<TokenType.DATETIME64: 'DATETIME64'>
DATE =
<TokenType.DATE: 'DATE'>
DATE32 =
<TokenType.DATE32: 'DATE32'>
INT4RANGE =
<TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE =
<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE =
<TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE =
<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE =
<TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE =
<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE =
<TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE =
<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE =
<TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE =
<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE =
<TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE =
<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
YEAR =
<TokenType.YEAR: 'YEAR'>
UNIQUEIDENTIFIER =
<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
USERDEFINED =
<TokenType.USERDEFINED: 'USERDEFINED'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
IPADDRESS =
<TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX =
<TokenType.IPPREFIX: 'IPPREFIX'>
IPV4 =
<TokenType.IPV4: 'IPV4'>
IPV6 =
<TokenType.IPV6: 'IPV6'>
ENUM =
<TokenType.ENUM: 'ENUM'>
ENUM8 =
<TokenType.ENUM8: 'ENUM8'>
ENUM16 =
<TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING =
<TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY =
<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED =
<TokenType.NESTED: 'NESTED'>
AGGREGATEFUNCTION =
<TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>
SIMPLEAGGREGATEFUNCTION =
<TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>
TDIGEST =
<TokenType.TDIGEST: 'TDIGEST'>
UNKNOWN =
<TokenType.UNKNOWN: 'UNKNOWN'>
VECTOR =
<TokenType.VECTOR: 'VECTOR'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALWAYS =
<TokenType.ALWAYS: 'ALWAYS'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
CONNECT_BY =
<TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
COPY =
<TokenType.COPY: 'COPY'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER =
<TokenType.CURRENT_USER: 'CURRENT_USER'>
DECLARE =
<TokenType.DECLARE: 'DECLARE'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY =
<TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOR =
<TokenType.FOR: 'FOR'>
FORCE =
<TokenType.FORCE: 'FORCE'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IGNORE =
<TokenType.IGNORE: 'IGNORE'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY =
<TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP =
<TokenType.KEEP: 'KEEP'>
KEY =
<TokenType.KEY: 'KEY'>
KILL =
<TokenType.KILL: 'KILL'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIKE_ANY =
<TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LIST =
<TokenType.LIST: 'LIST'>
LOAD =
<TokenType.LOAD: 'LOAD'>
LOCK =
<TokenType.LOCK: 'LOCK'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_CONDITION =
<TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF =
<TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
MODEL =
<TokenType.MODEL: 'MODEL'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER =
<TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
ONLY =
<TokenType.ONLY: 'ONLY'>
OPERATOR =
<TokenType.OPERATOR: 'OPERATOR'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDER_SIBLINGS_BY =
<TokenType.ORDER_SIBLINGS_BY: 'ORDER_SIBLINGS_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
POSITIONAL =
<TokenType.POSITIONAL: 'POSITIONAL'>
PRAGMA =
<TokenType.PRAGMA: 'PRAGMA'>
PREWHERE =
<TokenType.PREWHERE: 'PREWHERE'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REFRESH =
<TokenType.REFRESH: 'REFRESH'>
RENAME =
<TokenType.RENAME: 'RENAME'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SEQUENCE =
<TokenType.SEQUENCE: 'SEQUENCE'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SETTINGS =
<TokenType.SETTINGS: 'SETTINGS'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
START_WITH =
<TokenType.START_WITH: 'START_WITH'>
STORAGE_INTEGRATION =
<TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>
STRAIGHT_JOIN =
<TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
SUMMARIZE =
<TokenType.SUMMARIZE: 'SUMMARIZE'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TAG =
<TokenType.TAG: 'TAG'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRUE =
<TokenType.TRUE: 'TRUE'>
TRUNCATE =
<TokenType.TRUNCATE: 'TRUNCATE'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
VERSION_SNAPSHOT =
<TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT =
<TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
OPTION =
<TokenType.OPTION: 'OPTION'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
401class Token: 402 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 403 404 @classmethod 405 def number(cls, number: int) -> Token: 406 """Returns a NUMBER token with `number` as its text.""" 407 return cls(TokenType.NUMBER, str(number)) 408 409 @classmethod 410 def string(cls, string: str) -> Token: 411 """Returns a STRING token with `string` as its text.""" 412 return cls(TokenType.STRING, string) 413 414 @classmethod 415 def identifier(cls, identifier: str) -> Token: 416 """Returns an IDENTIFIER token with `identifier` as its text.""" 417 return cls(TokenType.IDENTIFIER, identifier) 418 419 @classmethod 420 def var(cls, var: str) -> Token: 421 """Returns an VAR token with `var` as its text.""" 422 return cls(TokenType.VAR, var) 423 424 def __init__( 425 self, 426 token_type: TokenType, 427 text: str, 428 line: int = 1, 429 col: int = 1, 430 start: int = 0, 431 end: int = 0, 432 comments: t.Optional[t.List[str]] = None, 433 ) -> None: 434 """Token initializer. 435 436 Args: 437 token_type: The TokenType Enum. 438 text: The text of the token. 439 line: The line that the token ends on. 440 col: The column that the token ends on. 441 start: The start index of the token. 442 end: The ending index of the token. 443 comments: The comments to attach to the token. 444 """ 445 self.token_type = token_type 446 self.text = text 447 self.line = line 448 self.col = col 449 self.start = start 450 self.end = end 451 self.comments = [] if comments is None else comments 452 453 def __repr__(self) -> str: 454 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 455 return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: Optional[List[str]] = None)
424 def __init__( 425 self, 426 token_type: TokenType, 427 text: str, 428 line: int = 1, 429 col: int = 1, 430 start: int = 0, 431 end: int = 0, 432 comments: t.Optional[t.List[str]] = None, 433 ) -> None: 434 """Token initializer. 435 436 Args: 437 token_type: The TokenType Enum. 438 text: The text of the token. 439 line: The line that the token ends on. 440 col: The column that the token ends on. 441 start: The start index of the token. 442 end: The ending index of the token. 443 comments: The comments to attach to the token. 444 """ 445 self.token_type = token_type 446 self.text = text 447 self.line = line 448 self.col = col 449 self.start = start 450 self.end = end 451 self.comments = [] if comments is None else comments
Token initializer.
Arguments:
- token_type: The TokenType Enum.
- text: The text of the token.
- line: The line that the token ends on.
- col: The column that the token ends on.
- start: The start index of the token.
- end: The ending index of the token.
- comments: The comments to attach to the token.
404 @classmethod 405 def number(cls, number: int) -> Token: 406 """Returns a NUMBER token with `number` as its text.""" 407 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
409 @classmethod 410 def string(cls, string: str) -> Token: 411 """Returns a STRING token with `string` as its text.""" 412 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
414 @classmethod 415 def identifier(cls, identifier: str) -> Token: 416 """Returns an IDENTIFIER token with `identifier` as its text.""" 417 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
558class Tokenizer(metaclass=_Tokenizer): 559 SINGLE_TOKENS = { 560 "(": TokenType.L_PAREN, 561 ")": TokenType.R_PAREN, 562 "[": TokenType.L_BRACKET, 563 "]": TokenType.R_BRACKET, 564 "{": TokenType.L_BRACE, 565 "}": TokenType.R_BRACE, 566 "&": TokenType.AMP, 567 "^": TokenType.CARET, 568 ":": TokenType.COLON, 569 ",": TokenType.COMMA, 570 ".": TokenType.DOT, 571 "-": TokenType.DASH, 572 "=": TokenType.EQ, 573 ">": TokenType.GT, 574 "<": TokenType.LT, 575 "%": TokenType.MOD, 576 "!": TokenType.NOT, 577 "|": TokenType.PIPE, 578 "+": TokenType.PLUS, 579 ";": TokenType.SEMICOLON, 580 "/": TokenType.SLASH, 581 "\\": TokenType.BACKSLASH, 582 "*": TokenType.STAR, 583 "~": TokenType.TILDA, 584 "?": TokenType.PLACEHOLDER, 585 "@": TokenType.PARAMETER, 586 "#": TokenType.HASH, 587 # Used for breaking a var like x'y' but nothing else the token type doesn't matter 588 "'": TokenType.UNKNOWN, 589 "`": TokenType.UNKNOWN, 590 '"': TokenType.UNKNOWN, 591 } 592 593 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 594 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 595 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 596 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 597 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 598 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 599 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 600 IDENTIFIER_ESCAPES = ['"'] 601 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 602 STRING_ESCAPES = ["'"] 603 VAR_SINGLE_TOKENS: t.Set[str] = set() 604 605 # Whether the heredoc tags follow the same lexical rules as unquoted identifiers 606 HEREDOC_TAG_IS_IDENTIFIER = False 607 608 # Token that we'll generate as a fallback if the heredoc prefix doesn't correspond to a heredoc 609 HEREDOC_STRING_ALTERNATIVE = TokenType.VAR 610 611 # Whether string escape characters function as such when placed within raw strings 612 STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS = True 613 614 NESTED_COMMENTS = True 615 616 # Autofilled 617 _COMMENTS: t.Dict[str, str] = {} 618 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 619 _IDENTIFIERS: t.Dict[str, str] = {} 620 _IDENTIFIER_ESCAPES: t.Set[str] = set() 621 _QUOTES: t.Dict[str, str] = {} 622 _STRING_ESCAPES: t.Set[str] = set() 623 _KEYWORD_TRIE: t.Dict = {} 624 _RS_TOKENIZER: t.Optional[t.Any] = None 625 626 KEYWORDS: t.Dict[str, TokenType] = { 627 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 628 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 629 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 630 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 631 "/*+": TokenType.HINT, 632 "==": TokenType.EQ, 633 "::": TokenType.DCOLON, 634 "||": TokenType.DPIPE, 635 ">=": TokenType.GTE, 636 "<=": TokenType.LTE, 637 "<>": TokenType.NEQ, 638 "!=": TokenType.NEQ, 639 ":=": TokenType.COLON_EQ, 640 "<=>": TokenType.NULLSAFE_EQ, 641 "->": TokenType.ARROW, 642 "->>": TokenType.DARROW, 643 "=>": TokenType.FARROW, 644 "#>": TokenType.HASH_ARROW, 645 "#>>": TokenType.DHASH_ARROW, 646 "<->": TokenType.LR_ARROW, 647 "&&": TokenType.DAMP, 648 "??": TokenType.DQMARK, 649 "ALL": TokenType.ALL, 650 "ALWAYS": TokenType.ALWAYS, 651 "AND": TokenType.AND, 652 "ANTI": TokenType.ANTI, 653 "ANY": TokenType.ANY, 654 "ASC": TokenType.ASC, 655 "AS": TokenType.ALIAS, 656 "ASOF": TokenType.ASOF, 657 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 658 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 659 "BEGIN": TokenType.BEGIN, 660 "BETWEEN": TokenType.BETWEEN, 661 "CACHE": TokenType.CACHE, 662 "UNCACHE": TokenType.UNCACHE, 663 "CASE": TokenType.CASE, 664 "CHARACTER SET": TokenType.CHARACTER_SET, 665 "CLUSTER BY": TokenType.CLUSTER_BY, 666 "COLLATE": TokenType.COLLATE, 667 "COLUMN": TokenType.COLUMN, 668 "COMMIT": TokenType.COMMIT, 669 "CONNECT BY": TokenType.CONNECT_BY, 670 "CONSTRAINT": TokenType.CONSTRAINT, 671 "COPY": TokenType.COPY, 672 "CREATE": TokenType.CREATE, 673 "CROSS": TokenType.CROSS, 674 "CUBE": TokenType.CUBE, 675 "CURRENT_DATE": TokenType.CURRENT_DATE, 676 "CURRENT_TIME": TokenType.CURRENT_TIME, 677 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 678 "CURRENT_USER": TokenType.CURRENT_USER, 679 "DATABASE": TokenType.DATABASE, 680 "DEFAULT": TokenType.DEFAULT, 681 "DELETE": TokenType.DELETE, 682 "DESC": TokenType.DESC, 683 "DESCRIBE": TokenType.DESCRIBE, 684 "DISTINCT": TokenType.DISTINCT, 685 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 686 "DIV": TokenType.DIV, 687 "DROP": TokenType.DROP, 688 "ELSE": TokenType.ELSE, 689 "END": TokenType.END, 690 "ENUM": TokenType.ENUM, 691 "ESCAPE": TokenType.ESCAPE, 692 "EXCEPT": TokenType.EXCEPT, 693 "EXECUTE": TokenType.EXECUTE, 694 "EXISTS": TokenType.EXISTS, 695 "FALSE": TokenType.FALSE, 696 "FETCH": TokenType.FETCH, 697 "FILTER": TokenType.FILTER, 698 "FIRST": TokenType.FIRST, 699 "FULL": TokenType.FULL, 700 "FUNCTION": TokenType.FUNCTION, 701 "FOR": TokenType.FOR, 702 "FOREIGN KEY": TokenType.FOREIGN_KEY, 703 "FORMAT": TokenType.FORMAT, 704 "FROM": TokenType.FROM, 705 "GEOGRAPHY": TokenType.GEOGRAPHY, 706 "GEOMETRY": TokenType.GEOMETRY, 707 "GLOB": TokenType.GLOB, 708 "GROUP BY": TokenType.GROUP_BY, 709 "GROUPING SETS": TokenType.GROUPING_SETS, 710 "HAVING": TokenType.HAVING, 711 "ILIKE": TokenType.ILIKE, 712 "IN": TokenType.IN, 713 "INDEX": TokenType.INDEX, 714 "INET": TokenType.INET, 715 "INNER": TokenType.INNER, 716 "INSERT": TokenType.INSERT, 717 "INTERVAL": TokenType.INTERVAL, 718 "INTERSECT": TokenType.INTERSECT, 719 "INTO": TokenType.INTO, 720 "IS": TokenType.IS, 721 "ISNULL": TokenType.ISNULL, 722 "JOIN": TokenType.JOIN, 723 "KEEP": TokenType.KEEP, 724 "KILL": TokenType.KILL, 725 "LATERAL": TokenType.LATERAL, 726 "LEFT": TokenType.LEFT, 727 "LIKE": TokenType.LIKE, 728 "LIMIT": TokenType.LIMIT, 729 "LOAD": TokenType.LOAD, 730 "LOCK": TokenType.LOCK, 731 "MERGE": TokenType.MERGE, 732 "NATURAL": TokenType.NATURAL, 733 "NEXT": TokenType.NEXT, 734 "NOT": TokenType.NOT, 735 "NOTNULL": TokenType.NOTNULL, 736 "NULL": TokenType.NULL, 737 "OBJECT": TokenType.OBJECT, 738 "OFFSET": TokenType.OFFSET, 739 "ON": TokenType.ON, 740 "OR": TokenType.OR, 741 "XOR": TokenType.XOR, 742 "ORDER BY": TokenType.ORDER_BY, 743 "ORDINALITY": TokenType.ORDINALITY, 744 "OUTER": TokenType.OUTER, 745 "OVER": TokenType.OVER, 746 "OVERLAPS": TokenType.OVERLAPS, 747 "OVERWRITE": TokenType.OVERWRITE, 748 "PARTITION": TokenType.PARTITION, 749 "PARTITION BY": TokenType.PARTITION_BY, 750 "PARTITIONED BY": TokenType.PARTITION_BY, 751 "PARTITIONED_BY": TokenType.PARTITION_BY, 752 "PERCENT": TokenType.PERCENT, 753 "PIVOT": TokenType.PIVOT, 754 "PRAGMA": TokenType.PRAGMA, 755 "PRIMARY KEY": TokenType.PRIMARY_KEY, 756 "PROCEDURE": TokenType.PROCEDURE, 757 "QUALIFY": TokenType.QUALIFY, 758 "RANGE": TokenType.RANGE, 759 "RECURSIVE": TokenType.RECURSIVE, 760 "REGEXP": TokenType.RLIKE, 761 "RENAME": TokenType.RENAME, 762 "REPLACE": TokenType.REPLACE, 763 "RETURNING": TokenType.RETURNING, 764 "REFERENCES": TokenType.REFERENCES, 765 "RIGHT": TokenType.RIGHT, 766 "RLIKE": TokenType.RLIKE, 767 "ROLLBACK": TokenType.ROLLBACK, 768 "ROLLUP": TokenType.ROLLUP, 769 "ROW": TokenType.ROW, 770 "ROWS": TokenType.ROWS, 771 "SCHEMA": TokenType.SCHEMA, 772 "SELECT": TokenType.SELECT, 773 "SEMI": TokenType.SEMI, 774 "SET": TokenType.SET, 775 "SETTINGS": TokenType.SETTINGS, 776 "SHOW": TokenType.SHOW, 777 "SIMILAR TO": TokenType.SIMILAR_TO, 778 "SOME": TokenType.SOME, 779 "SORT BY": TokenType.SORT_BY, 780 "START WITH": TokenType.START_WITH, 781 "STRAIGHT_JOIN": TokenType.STRAIGHT_JOIN, 782 "TABLE": TokenType.TABLE, 783 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 784 "TEMP": TokenType.TEMPORARY, 785 "TEMPORARY": TokenType.TEMPORARY, 786 "THEN": TokenType.THEN, 787 "TRUE": TokenType.TRUE, 788 "TRUNCATE": TokenType.TRUNCATE, 789 "UNION": TokenType.UNION, 790 "UNKNOWN": TokenType.UNKNOWN, 791 "UNNEST": TokenType.UNNEST, 792 "UNPIVOT": TokenType.UNPIVOT, 793 "UPDATE": TokenType.UPDATE, 794 "USE": TokenType.USE, 795 "USING": TokenType.USING, 796 "UUID": TokenType.UUID, 797 "VALUES": TokenType.VALUES, 798 "VIEW": TokenType.VIEW, 799 "VOLATILE": TokenType.VOLATILE, 800 "WHEN": TokenType.WHEN, 801 "WHERE": TokenType.WHERE, 802 "WINDOW": TokenType.WINDOW, 803 "WITH": TokenType.WITH, 804 "APPLY": TokenType.APPLY, 805 "ARRAY": TokenType.ARRAY, 806 "BIT": TokenType.BIT, 807 "BOOL": TokenType.BOOLEAN, 808 "BOOLEAN": TokenType.BOOLEAN, 809 "BYTE": TokenType.TINYINT, 810 "MEDIUMINT": TokenType.MEDIUMINT, 811 "INT1": TokenType.TINYINT, 812 "TINYINT": TokenType.TINYINT, 813 "INT16": TokenType.SMALLINT, 814 "SHORT": TokenType.SMALLINT, 815 "SMALLINT": TokenType.SMALLINT, 816 "INT128": TokenType.INT128, 817 "HUGEINT": TokenType.INT128, 818 "INT2": TokenType.SMALLINT, 819 "INTEGER": TokenType.INT, 820 "INT": TokenType.INT, 821 "INT4": TokenType.INT, 822 "INT32": TokenType.INT, 823 "INT64": TokenType.BIGINT, 824 "LONG": TokenType.BIGINT, 825 "BIGINT": TokenType.BIGINT, 826 "INT8": TokenType.TINYINT, 827 "UINT": TokenType.UINT, 828 "DEC": TokenType.DECIMAL, 829 "DECIMAL": TokenType.DECIMAL, 830 "BIGDECIMAL": TokenType.BIGDECIMAL, 831 "BIGNUMERIC": TokenType.BIGDECIMAL, 832 "LIST": TokenType.LIST, 833 "MAP": TokenType.MAP, 834 "NULLABLE": TokenType.NULLABLE, 835 "NUMBER": TokenType.DECIMAL, 836 "NUMERIC": TokenType.DECIMAL, 837 "FIXED": TokenType.DECIMAL, 838 "REAL": TokenType.FLOAT, 839 "FLOAT": TokenType.FLOAT, 840 "FLOAT4": TokenType.FLOAT, 841 "FLOAT8": TokenType.DOUBLE, 842 "DOUBLE": TokenType.DOUBLE, 843 "DOUBLE PRECISION": TokenType.DOUBLE, 844 "JSON": TokenType.JSON, 845 "JSONB": TokenType.JSONB, 846 "CHAR": TokenType.CHAR, 847 "CHARACTER": TokenType.CHAR, 848 "NCHAR": TokenType.NCHAR, 849 "VARCHAR": TokenType.VARCHAR, 850 "VARCHAR2": TokenType.VARCHAR, 851 "NVARCHAR": TokenType.NVARCHAR, 852 "NVARCHAR2": TokenType.NVARCHAR, 853 "BPCHAR": TokenType.BPCHAR, 854 "STR": TokenType.TEXT, 855 "STRING": TokenType.TEXT, 856 "TEXT": TokenType.TEXT, 857 "LONGTEXT": TokenType.LONGTEXT, 858 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 859 "TINYTEXT": TokenType.TINYTEXT, 860 "CLOB": TokenType.TEXT, 861 "LONGVARCHAR": TokenType.TEXT, 862 "BINARY": TokenType.BINARY, 863 "BLOB": TokenType.VARBINARY, 864 "LONGBLOB": TokenType.LONGBLOB, 865 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 866 "TINYBLOB": TokenType.TINYBLOB, 867 "BYTEA": TokenType.VARBINARY, 868 "VARBINARY": TokenType.VARBINARY, 869 "TIME": TokenType.TIME, 870 "TIMETZ": TokenType.TIMETZ, 871 "TIMESTAMP": TokenType.TIMESTAMP, 872 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 873 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 874 "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ, 875 "TIMESTAMPNTZ": TokenType.TIMESTAMPNTZ, 876 "TIMESTAMP_NTZ": TokenType.TIMESTAMPNTZ, 877 "DATE": TokenType.DATE, 878 "DATETIME": TokenType.DATETIME, 879 "INT4RANGE": TokenType.INT4RANGE, 880 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 881 "INT8RANGE": TokenType.INT8RANGE, 882 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 883 "NUMRANGE": TokenType.NUMRANGE, 884 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 885 "TSRANGE": TokenType.TSRANGE, 886 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 887 "TSTZRANGE": TokenType.TSTZRANGE, 888 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 889 "DATERANGE": TokenType.DATERANGE, 890 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 891 "UNIQUE": TokenType.UNIQUE, 892 "VECTOR": TokenType.VECTOR, 893 "STRUCT": TokenType.STRUCT, 894 "SEQUENCE": TokenType.SEQUENCE, 895 "VARIANT": TokenType.VARIANT, 896 "ALTER": TokenType.ALTER, 897 "ANALYZE": TokenType.COMMAND, 898 "CALL": TokenType.COMMAND, 899 "COMMENT": TokenType.COMMENT, 900 "EXPLAIN": TokenType.COMMAND, 901 "GRANT": TokenType.COMMAND, 902 "OPTIMIZE": TokenType.COMMAND, 903 "PREPARE": TokenType.COMMAND, 904 "VACUUM": TokenType.COMMAND, 905 "USER-DEFINED": TokenType.USERDEFINED, 906 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 907 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 908 } 909 910 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 911 " ": TokenType.SPACE, 912 "\t": TokenType.SPACE, 913 "\n": TokenType.BREAK, 914 "\r": TokenType.BREAK, 915 } 916 917 COMMANDS = { 918 TokenType.COMMAND, 919 TokenType.EXECUTE, 920 TokenType.FETCH, 921 TokenType.SHOW, 922 TokenType.RENAME, 923 } 924 925 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 926 927 # Handle numeric literals like in hive (3L = BIGINT) 928 NUMERIC_LITERALS: t.Dict[str, str] = {} 929 930 COMMENTS = ["--", ("/*", "*/")] 931 932 __slots__ = ( 933 "sql", 934 "size", 935 "tokens", 936 "dialect", 937 "_start", 938 "_current", 939 "_line", 940 "_col", 941 "_comments", 942 "_char", 943 "_end", 944 "_peek", 945 "_prev_token_line", 946 "_rs_dialect_settings", 947 ) 948 949 def __init__(self, dialect: DialectType = None) -> None: 950 from sqlglot.dialects import Dialect 951 952 self.dialect = Dialect.get_or_raise(dialect) 953 954 if USE_RS_TOKENIZER: 955 self._rs_dialect_settings = RsTokenizerDialectSettings( 956 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 957 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 958 ) 959 960 self.reset() 961 962 def reset(self) -> None: 963 self.sql = "" 964 self.size = 0 965 self.tokens: t.List[Token] = [] 966 self._start = 0 967 self._current = 0 968 self._line = 1 969 self._col = 0 970 self._comments: t.List[str] = [] 971 972 self._char = "" 973 self._end = False 974 self._peek = "" 975 self._prev_token_line = -1 976 977 def tokenize(self, sql: str) -> t.List[Token]: 978 """Returns a list of tokens corresponding to the SQL string `sql`.""" 979 if USE_RS_TOKENIZER: 980 return self.tokenize_rs(sql) 981 982 self.reset() 983 self.sql = sql 984 self.size = len(sql) 985 986 try: 987 self._scan() 988 except Exception as e: 989 start = max(self._current - 50, 0) 990 end = min(self._current + 50, self.size - 1) 991 context = self.sql[start:end] 992 raise TokenError(f"Error tokenizing '{context}'") from e 993 994 return self.tokens 995 996 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 997 while self.size and not self._end: 998 current = self._current 999 1000 # Skip spaces here rather than iteratively calling advance() for performance reasons 1001 while current < self.size: 1002 char = self.sql[current] 1003 1004 if char.isspace() and (char == " " or char == "\t"): 1005 current += 1 1006 else: 1007 break 1008 1009 offset = current - self._current if current > self._current else 1 1010 1011 self._start = current 1012 self._advance(offset) 1013 1014 if not self._char.isspace(): 1015 if self._char.isdigit(): 1016 self._scan_number() 1017 elif self._char in self._IDENTIFIERS: 1018 self._scan_identifier(self._IDENTIFIERS[self._char]) 1019 else: 1020 self._scan_keywords() 1021 1022 if until and until(): 1023 break 1024 1025 if self.tokens and self._comments: 1026 self.tokens[-1].comments.extend(self._comments) 1027 1028 def _chars(self, size: int) -> str: 1029 if size == 1: 1030 return self._char 1031 1032 start = self._current - 1 1033 end = start + size 1034 1035 return self.sql[start:end] if end <= self.size else "" 1036 1037 def _advance(self, i: int = 1, alnum: bool = False) -> None: 1038 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 1039 # Ensures we don't count an extra line if we get a \r\n line break sequence 1040 if not (self._char == "\r" and self._peek == "\n"): 1041 self._col = 1 1042 self._line += 1 1043 else: 1044 self._col += i 1045 1046 self._current += i 1047 self._end = self._current >= self.size 1048 self._char = self.sql[self._current - 1] 1049 self._peek = "" if self._end else self.sql[self._current] 1050 1051 if alnum and self._char.isalnum(): 1052 # Here we use local variables instead of attributes for better performance 1053 _col = self._col 1054 _current = self._current 1055 _end = self._end 1056 _peek = self._peek 1057 1058 while _peek.isalnum(): 1059 _col += 1 1060 _current += 1 1061 _end = _current >= self.size 1062 _peek = "" if _end else self.sql[_current] 1063 1064 self._col = _col 1065 self._current = _current 1066 self._end = _end 1067 self._peek = _peek 1068 self._char = self.sql[_current - 1] 1069 1070 @property 1071 def _text(self) -> str: 1072 return self.sql[self._start : self._current] 1073 1074 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1075 self._prev_token_line = self._line 1076 1077 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1078 self.tokens[-1].comments.extend(self._comments) 1079 self._comments = [] 1080 1081 self.tokens.append( 1082 Token( 1083 token_type, 1084 text=self._text if text is None else text, 1085 line=self._line, 1086 col=self._col, 1087 start=self._start, 1088 end=self._current - 1, 1089 comments=self._comments, 1090 ) 1091 ) 1092 self._comments = [] 1093 1094 # If we have either a semicolon or a begin token before the command's token, we'll parse 1095 # whatever follows the command's token as a string 1096 if ( 1097 token_type in self.COMMANDS 1098 and self._peek != ";" 1099 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1100 ): 1101 start = self._current 1102 tokens = len(self.tokens) 1103 self._scan(lambda: self._peek == ";") 1104 self.tokens = self.tokens[:tokens] 1105 text = self.sql[start : self._current].strip() 1106 if text: 1107 self._add(TokenType.STRING, text) 1108 1109 def _scan_keywords(self) -> None: 1110 size = 0 1111 word = None 1112 chars = self._text 1113 char = chars 1114 prev_space = False 1115 skip = False 1116 trie = self._KEYWORD_TRIE 1117 single_token = char in self.SINGLE_TOKENS 1118 1119 while chars: 1120 if skip: 1121 result = TrieResult.PREFIX 1122 else: 1123 result, trie = in_trie(trie, char.upper()) 1124 1125 if result == TrieResult.FAILED: 1126 break 1127 if result == TrieResult.EXISTS: 1128 word = chars 1129 1130 end = self._current + size 1131 size += 1 1132 1133 if end < self.size: 1134 char = self.sql[end] 1135 single_token = single_token or char in self.SINGLE_TOKENS 1136 is_space = char.isspace() 1137 1138 if not is_space or not prev_space: 1139 if is_space: 1140 char = " " 1141 chars += char 1142 prev_space = is_space 1143 skip = False 1144 else: 1145 skip = True 1146 else: 1147 char = "" 1148 break 1149 1150 if word: 1151 if self._scan_string(word): 1152 return 1153 if self._scan_comment(word): 1154 return 1155 if prev_space or single_token or not char: 1156 self._advance(size - 1) 1157 word = word.upper() 1158 self._add(self.KEYWORDS[word], text=word) 1159 return 1160 1161 if self._char in self.SINGLE_TOKENS: 1162 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1163 return 1164 1165 self._scan_var() 1166 1167 def _scan_comment(self, comment_start: str) -> bool: 1168 if comment_start not in self._COMMENTS: 1169 return False 1170 1171 comment_start_line = self._line 1172 comment_start_size = len(comment_start) 1173 comment_end = self._COMMENTS[comment_start] 1174 1175 if comment_end: 1176 # Skip the comment's start delimiter 1177 self._advance(comment_start_size) 1178 1179 comment_count = 1 1180 comment_end_size = len(comment_end) 1181 1182 while not self._end: 1183 if self._chars(comment_end_size) == comment_end: 1184 comment_count -= 1 1185 if not comment_count: 1186 break 1187 1188 self._advance(alnum=True) 1189 1190 # Nested comments are allowed by some dialects, e.g. databricks, duckdb, postgres 1191 if ( 1192 self.NESTED_COMMENTS 1193 and not self._end 1194 and self._chars(comment_end_size) == comment_start 1195 ): 1196 self._advance(comment_start_size) 1197 comment_count += 1 1198 1199 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1200 self._advance(comment_end_size - 1) 1201 else: 1202 while not self._end and self.WHITE_SPACE.get(self._peek) is not TokenType.BREAK: 1203 self._advance(alnum=True) 1204 self._comments.append(self._text[comment_start_size:]) 1205 1206 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1207 # Multiple consecutive comments are preserved by appending them to the current comments list. 1208 if comment_start_line == self._prev_token_line: 1209 self.tokens[-1].comments.extend(self._comments) 1210 self._comments = [] 1211 self._prev_token_line = self._line 1212 1213 return True 1214 1215 def _scan_number(self) -> None: 1216 if self._char == "0": 1217 peek = self._peek.upper() 1218 if peek == "B": 1219 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1220 elif peek == "X": 1221 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1222 1223 decimal = False 1224 scientific = 0 1225 1226 while True: 1227 if self._peek.isdigit(): 1228 self._advance() 1229 elif self._peek == "." and not decimal: 1230 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER: 1231 return self._add(TokenType.NUMBER) 1232 decimal = True 1233 self._advance() 1234 elif self._peek in ("-", "+") and scientific == 1: 1235 scientific += 1 1236 self._advance() 1237 elif self._peek.upper() == "E" and not scientific: 1238 scientific += 1 1239 self._advance() 1240 elif self._peek.isidentifier(): 1241 number_text = self._text 1242 literal = "" 1243 1244 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1245 literal += self._peek 1246 self._advance() 1247 1248 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1249 1250 if token_type: 1251 self._add(TokenType.NUMBER, number_text) 1252 self._add(TokenType.DCOLON, "::") 1253 return self._add(token_type, literal) 1254 elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1255 return self._add(TokenType.VAR) 1256 1257 self._advance(-len(literal)) 1258 return self._add(TokenType.NUMBER, number_text) 1259 else: 1260 return self._add(TokenType.NUMBER) 1261 1262 def _scan_bits(self) -> None: 1263 self._advance() 1264 value = self._extract_value() 1265 try: 1266 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1267 int(value, 2) 1268 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1269 except ValueError: 1270 self._add(TokenType.IDENTIFIER) 1271 1272 def _scan_hex(self) -> None: 1273 self._advance() 1274 value = self._extract_value() 1275 try: 1276 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1277 int(value, 16) 1278 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1279 except ValueError: 1280 self._add(TokenType.IDENTIFIER) 1281 1282 def _extract_value(self) -> str: 1283 while True: 1284 char = self._peek.strip() 1285 if char and char not in self.SINGLE_TOKENS: 1286 self._advance(alnum=True) 1287 else: 1288 break 1289 1290 return self._text 1291 1292 def _scan_string(self, start: str) -> bool: 1293 base = None 1294 token_type = TokenType.STRING 1295 1296 if start in self._QUOTES: 1297 end = self._QUOTES[start] 1298 elif start in self._FORMAT_STRINGS: 1299 end, token_type = self._FORMAT_STRINGS[start] 1300 1301 if token_type == TokenType.HEX_STRING: 1302 base = 16 1303 elif token_type == TokenType.BIT_STRING: 1304 base = 2 1305 elif token_type == TokenType.HEREDOC_STRING: 1306 self._advance() 1307 1308 if self._char == end: 1309 tag = "" 1310 else: 1311 tag = self._extract_string( 1312 end, 1313 raw_string=True, 1314 raise_unmatched=not self.HEREDOC_TAG_IS_IDENTIFIER, 1315 ) 1316 1317 if tag and self.HEREDOC_TAG_IS_IDENTIFIER and (self._end or not tag.isidentifier()): 1318 if not self._end: 1319 self._advance(-1) 1320 1321 self._advance(-len(tag)) 1322 self._add(self.HEREDOC_STRING_ALTERNATIVE) 1323 return True 1324 1325 end = f"{start}{tag}{end}" 1326 else: 1327 return False 1328 1329 self._advance(len(start)) 1330 text = self._extract_string(end, raw_string=token_type == TokenType.RAW_STRING) 1331 1332 if base: 1333 try: 1334 int(text, base) 1335 except Exception: 1336 raise TokenError( 1337 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1338 ) 1339 1340 self._add(token_type, text) 1341 return True 1342 1343 def _scan_identifier(self, identifier_end: str) -> None: 1344 self._advance() 1345 text = self._extract_string(identifier_end, escapes=self._IDENTIFIER_ESCAPES) 1346 self._add(TokenType.IDENTIFIER, text) 1347 1348 def _scan_var(self) -> None: 1349 while True: 1350 char = self._peek.strip() 1351 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1352 self._advance(alnum=True) 1353 else: 1354 break 1355 1356 self._add( 1357 TokenType.VAR 1358 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1359 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1360 ) 1361 1362 def _extract_string( 1363 self, 1364 delimiter: str, 1365 escapes: t.Optional[t.Set[str]] = None, 1366 raw_string: bool = False, 1367 raise_unmatched: bool = True, 1368 ) -> str: 1369 text = "" 1370 delim_size = len(delimiter) 1371 escapes = self._STRING_ESCAPES if escapes is None else escapes 1372 1373 while True: 1374 if ( 1375 not raw_string 1376 and self.dialect.UNESCAPED_SEQUENCES 1377 and self._peek 1378 and self._char in self.STRING_ESCAPES 1379 ): 1380 unescaped_sequence = self.dialect.UNESCAPED_SEQUENCES.get(self._char + self._peek) 1381 if unescaped_sequence: 1382 self._advance(2) 1383 text += unescaped_sequence 1384 continue 1385 if ( 1386 (self.STRING_ESCAPES_ALLOWED_IN_RAW_STRINGS or not raw_string) 1387 and self._char in escapes 1388 and (self._peek == delimiter or self._peek in escapes) 1389 and (self._char not in self._QUOTES or self._char == self._peek) 1390 ): 1391 if self._peek == delimiter: 1392 text += self._peek 1393 else: 1394 text += self._char + self._peek 1395 1396 if self._current + 1 < self.size: 1397 self._advance(2) 1398 else: 1399 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1400 else: 1401 if self._chars(delim_size) == delimiter: 1402 if delim_size > 1: 1403 self._advance(delim_size - 1) 1404 break 1405 1406 if self._end: 1407 if not raise_unmatched: 1408 return text + self._char 1409 1410 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1411 1412 current = self._current - 1 1413 self._advance(alnum=True) 1414 text += self.sql[current : self._current - 1] 1415 1416 return text 1417 1418 def tokenize_rs(self, sql: str) -> t.List[Token]: 1419 if not self._RS_TOKENIZER: 1420 raise SqlglotError("Rust tokenizer is not available") 1421 1422 try: 1423 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1424 for token in tokens: 1425 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1426 return tokens 1427 except Exception as e: 1428 raise TokenError(str(e))
Tokenizer( dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None)
949 def __init__(self, dialect: DialectType = None) -> None: 950 from sqlglot.dialects import Dialect 951 952 self.dialect = Dialect.get_or_raise(dialect) 953 954 if USE_RS_TOKENIZER: 955 self._rs_dialect_settings = RsTokenizerDialectSettings( 956 unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES, 957 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 958 ) 959 960 self.reset()
SINGLE_TOKENS =
{'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>}
HEREDOC_STRING_ALTERNATIVE =
<TokenType.VAR: 'VAR'>
KEYWORDS: Dict[str, TokenType] =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] =
{' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>}
COMMANDS =
{<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>}
COMMAND_PREFIX_TOKENS =
{<TokenType.BEGIN: 'BEGIN'>, <TokenType.SEMICOLON: 'SEMICOLON'>}
def
reset(self) -> None:
962 def reset(self) -> None: 963 self.sql = "" 964 self.size = 0 965 self.tokens: t.List[Token] = [] 966 self._start = 0 967 self._current = 0 968 self._line = 1 969 self._col = 0 970 self._comments: t.List[str] = [] 971 972 self._char = "" 973 self._end = False 974 self._peek = "" 975 self._prev_token_line = -1
977 def tokenize(self, sql: str) -> t.List[Token]: 978 """Returns a list of tokens corresponding to the SQL string `sql`.""" 979 if USE_RS_TOKENIZER: 980 return self.tokenize_rs(sql) 981 982 self.reset() 983 self.sql = sql 984 self.size = len(sql) 985 986 try: 987 self._scan() 988 except Exception as e: 989 start = max(self._current - 50, 0) 990 end = min(self._current + 50, self.size - 1) 991 context = self.sql[start:end] 992 raise TokenError(f"Error tokenizing '{context}'") from e 993 994 return self.tokens
Returns a list of tokens corresponding to the SQL string sql
.
1418 def tokenize_rs(self, sql: str) -> t.List[Token]: 1419 if not self._RS_TOKENIZER: 1420 raise SqlglotError("Rust tokenizer is not available") 1421 1422 try: 1423 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1424 for token in tokens: 1425 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1426 return tokens 1427 except Exception as e: 1428 raise TokenError(str(e))