sqlglot.tokens
1from __future__ import annotations 2 3import typing as t 4from enum import auto 5 6from sqlglot.helper import AutoName 7from sqlglot.trie import in_trie, new_trie 8 9 10class TokenType(AutoName): 11 L_PAREN = auto() 12 R_PAREN = auto() 13 L_BRACKET = auto() 14 R_BRACKET = auto() 15 L_BRACE = auto() 16 R_BRACE = auto() 17 COMMA = auto() 18 DOT = auto() 19 DASH = auto() 20 PLUS = auto() 21 COLON = auto() 22 DCOLON = auto() 23 SEMICOLON = auto() 24 STAR = auto() 25 BACKSLASH = auto() 26 SLASH = auto() 27 LT = auto() 28 LTE = auto() 29 GT = auto() 30 GTE = auto() 31 NOT = auto() 32 EQ = auto() 33 NEQ = auto() 34 NULLSAFE_EQ = auto() 35 AND = auto() 36 OR = auto() 37 AMP = auto() 38 DPIPE = auto() 39 PIPE = auto() 40 CARET = auto() 41 TILDA = auto() 42 ARROW = auto() 43 DARROW = auto() 44 FARROW = auto() 45 HASH = auto() 46 HASH_ARROW = auto() 47 DHASH_ARROW = auto() 48 LR_ARROW = auto() 49 LT_AT = auto() 50 AT_GT = auto() 51 DOLLAR = auto() 52 PARAMETER = auto() 53 SESSION_PARAMETER = auto() 54 DAMP = auto() 55 56 BLOCK_START = auto() 57 BLOCK_END = auto() 58 59 SPACE = auto() 60 BREAK = auto() 61 62 STRING = auto() 63 NUMBER = auto() 64 IDENTIFIER = auto() 65 DATABASE = auto() 66 COLUMN = auto() 67 COLUMN_DEF = auto() 68 SCHEMA = auto() 69 TABLE = auto() 70 VAR = auto() 71 BIT_STRING = auto() 72 HEX_STRING = auto() 73 BYTE_STRING = auto() 74 NATIONAL_STRING = auto() 75 RAW_STRING = auto() 76 77 # types 78 BIT = auto() 79 BOOLEAN = auto() 80 TINYINT = auto() 81 UTINYINT = auto() 82 SMALLINT = auto() 83 USMALLINT = auto() 84 INT = auto() 85 UINT = auto() 86 BIGINT = auto() 87 UBIGINT = auto() 88 INT128 = auto() 89 UINT128 = auto() 90 INT256 = auto() 91 UINT256 = auto() 92 FLOAT = auto() 93 DOUBLE = auto() 94 DECIMAL = auto() 95 BIGDECIMAL = auto() 96 CHAR = auto() 97 NCHAR = auto() 98 VARCHAR = auto() 99 NVARCHAR = auto() 100 TEXT = auto() 101 MEDIUMTEXT = auto() 102 LONGTEXT = auto() 103 MEDIUMBLOB = auto() 104 LONGBLOB = auto() 105 BINARY = auto() 106 VARBINARY = auto() 107 JSON = auto() 108 JSONB = auto() 109 TIME = auto() 110 TIMESTAMP = auto() 111 TIMESTAMPTZ = auto() 112 TIMESTAMPLTZ = auto() 113 DATETIME = auto() 114 DATETIME64 = auto() 115 DATE = auto() 116 INT4RANGE = auto() 117 INT4MULTIRANGE = auto() 118 INT8RANGE = auto() 119 INT8MULTIRANGE = auto() 120 NUMRANGE = auto() 121 NUMMULTIRANGE = auto() 122 TSRANGE = auto() 123 TSMULTIRANGE = auto() 124 TSTZRANGE = auto() 125 TSTZMULTIRANGE = auto() 126 DATERANGE = auto() 127 DATEMULTIRANGE = auto() 128 UUID = auto() 129 GEOGRAPHY = auto() 130 NULLABLE = auto() 131 GEOMETRY = auto() 132 HLLSKETCH = auto() 133 HSTORE = auto() 134 SUPER = auto() 135 SERIAL = auto() 136 SMALLSERIAL = auto() 137 BIGSERIAL = auto() 138 XML = auto() 139 UNIQUEIDENTIFIER = auto() 140 MONEY = auto() 141 SMALLMONEY = auto() 142 ROWVERSION = auto() 143 IMAGE = auto() 144 VARIANT = auto() 145 OBJECT = auto() 146 INET = auto() 147 ENUM = auto() 148 149 # keywords 150 ALIAS = auto() 151 ALTER = auto() 152 ALWAYS = auto() 153 ALL = auto() 154 ANTI = auto() 155 ANY = auto() 156 APPLY = auto() 157 ARRAY = auto() 158 ASC = auto() 159 ASOF = auto() 160 AUTO_INCREMENT = auto() 161 BEGIN = auto() 162 BETWEEN = auto() 163 CACHE = auto() 164 CASE = auto() 165 CHARACTER_SET = auto() 166 CLUSTER_BY = auto() 167 COLLATE = auto() 168 COMMAND = auto() 169 COMMENT = auto() 170 COMMIT = auto() 171 CONSTRAINT = auto() 172 CREATE = auto() 173 CROSS = auto() 174 CUBE = auto() 175 CURRENT_DATE = auto() 176 CURRENT_DATETIME = auto() 177 CURRENT_TIME = auto() 178 CURRENT_TIMESTAMP = auto() 179 CURRENT_USER = auto() 180 DEFAULT = auto() 181 DELETE = auto() 182 DESC = auto() 183 DESCRIBE = auto() 184 DICTIONARY = auto() 185 DISTINCT = auto() 186 DISTRIBUTE_BY = auto() 187 DIV = auto() 188 DROP = auto() 189 ELSE = auto() 190 END = auto() 191 ESCAPE = auto() 192 EXCEPT = auto() 193 EXECUTE = auto() 194 EXISTS = auto() 195 FALSE = auto() 196 FETCH = auto() 197 FILTER = auto() 198 FINAL = auto() 199 FIRST = auto() 200 FOR = auto() 201 FOREIGN_KEY = auto() 202 FORMAT = auto() 203 FROM = auto() 204 FULL = auto() 205 FUNCTION = auto() 206 GLOB = auto() 207 GLOBAL = auto() 208 GROUP_BY = auto() 209 GROUPING_SETS = auto() 210 HAVING = auto() 211 HINT = auto() 212 IF = auto() 213 ILIKE = auto() 214 ILIKE_ANY = auto() 215 IN = auto() 216 INDEX = auto() 217 INNER = auto() 218 INSERT = auto() 219 INTERSECT = auto() 220 INTERVAL = auto() 221 INTO = auto() 222 INTRODUCER = auto() 223 IRLIKE = auto() 224 IS = auto() 225 ISNULL = auto() 226 JOIN = auto() 227 JOIN_MARKER = auto() 228 KEEP = auto() 229 LANGUAGE = auto() 230 LATERAL = auto() 231 LEFT = auto() 232 LIKE = auto() 233 LIKE_ANY = auto() 234 LIMIT = auto() 235 LOAD = auto() 236 LOCK = auto() 237 MAP = auto() 238 MATCH_RECOGNIZE = auto() 239 MERGE = auto() 240 MOD = auto() 241 NATURAL = auto() 242 NEXT = auto() 243 NEXT_VALUE_FOR = auto() 244 NOTNULL = auto() 245 NULL = auto() 246 OFFSET = auto() 247 ON = auto() 248 ORDER_BY = auto() 249 ORDERED = auto() 250 ORDINALITY = auto() 251 OUTER = auto() 252 OVER = auto() 253 OVERLAPS = auto() 254 OVERWRITE = auto() 255 PARTITION = auto() 256 PARTITION_BY = auto() 257 PERCENT = auto() 258 PIVOT = auto() 259 PLACEHOLDER = auto() 260 PRAGMA = auto() 261 PRIMARY_KEY = auto() 262 PROCEDURE = auto() 263 PROPERTIES = auto() 264 PSEUDO_TYPE = auto() 265 QUALIFY = auto() 266 QUOTE = auto() 267 RANGE = auto() 268 RECURSIVE = auto() 269 REPLACE = auto() 270 RETURNING = auto() 271 REFERENCES = auto() 272 RIGHT = auto() 273 RLIKE = auto() 274 ROLLBACK = auto() 275 ROLLUP = auto() 276 ROW = auto() 277 ROWS = auto() 278 SELECT = auto() 279 SEMI = auto() 280 SEPARATOR = auto() 281 SERDE_PROPERTIES = auto() 282 SET = auto() 283 SETTINGS = auto() 284 SHOW = auto() 285 SIMILAR_TO = auto() 286 SOME = auto() 287 SORT_BY = auto() 288 STRUCT = auto() 289 TABLE_SAMPLE = auto() 290 TEMPORARY = auto() 291 TOP = auto() 292 THEN = auto() 293 TRUE = auto() 294 UNCACHE = auto() 295 UNION = auto() 296 UNNEST = auto() 297 UNPIVOT = auto() 298 UPDATE = auto() 299 USE = auto() 300 USING = auto() 301 VALUES = auto() 302 VIEW = auto() 303 VOLATILE = auto() 304 WHEN = auto() 305 WHERE = auto() 306 WINDOW = auto() 307 WITH = auto() 308 UNIQUE = auto() 309 310 311class Token: 312 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 313 314 @classmethod 315 def number(cls, number: int) -> Token: 316 """Returns a NUMBER token with `number` as its text.""" 317 return cls(TokenType.NUMBER, str(number)) 318 319 @classmethod 320 def string(cls, string: str) -> Token: 321 """Returns a STRING token with `string` as its text.""" 322 return cls(TokenType.STRING, string) 323 324 @classmethod 325 def identifier(cls, identifier: str) -> Token: 326 """Returns an IDENTIFIER token with `identifier` as its text.""" 327 return cls(TokenType.IDENTIFIER, identifier) 328 329 @classmethod 330 def var(cls, var: str) -> Token: 331 """Returns an VAR token with `var` as its text.""" 332 return cls(TokenType.VAR, var) 333 334 def __init__( 335 self, 336 token_type: TokenType, 337 text: str, 338 line: int = 1, 339 col: int = 1, 340 start: int = 0, 341 end: int = 0, 342 comments: t.List[str] = [], 343 ) -> None: 344 """Token initializer. 345 346 Args: 347 token_type: The TokenType Enum. 348 text: The text of the token. 349 line: The line that the token ends on. 350 col: The column that the token ends on. 351 start: The start index of the token. 352 end: The ending index of the token. 353 comments: The comments to attach to the token. 354 """ 355 self.token_type = token_type 356 self.text = text 357 self.line = line 358 self.col = col 359 self.start = start 360 self.end = end 361 self.comments = comments 362 363 def __repr__(self) -> str: 364 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 365 return f"<Token {attributes}>" 366 367 368class _Tokenizer(type): 369 def __new__(cls, clsname, bases, attrs): 370 klass = super().__new__(cls, clsname, bases, attrs) 371 372 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 373 return dict( 374 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr 375 ) 376 377 def _quotes_to_format( 378 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] 379 ) -> t.Dict[str, t.Tuple[str, TokenType]]: 380 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} 381 382 klass._QUOTES = _convert_quotes(klass.QUOTES) 383 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) 384 385 klass._FORMAT_STRINGS = { 386 **{ 387 p + s: (e, TokenType.NATIONAL_STRING) 388 for s, e in klass._QUOTES.items() 389 for p in ("n", "N") 390 }, 391 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), 392 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), 393 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), 394 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), 395 } 396 397 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 398 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 399 klass._COMMENTS = { 400 **dict( 401 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 402 for comment in klass.COMMENTS 403 ), 404 "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects 405 } 406 407 klass._KEYWORD_TRIE = new_trie( 408 key.upper() 409 for key in ( 410 *klass.KEYWORDS, 411 *klass._COMMENTS, 412 *klass._QUOTES, 413 *klass._FORMAT_STRINGS, 414 ) 415 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 416 ) 417 418 return klass 419 420 421class Tokenizer(metaclass=_Tokenizer): 422 SINGLE_TOKENS = { 423 "(": TokenType.L_PAREN, 424 ")": TokenType.R_PAREN, 425 "[": TokenType.L_BRACKET, 426 "]": TokenType.R_BRACKET, 427 "{": TokenType.L_BRACE, 428 "}": TokenType.R_BRACE, 429 "&": TokenType.AMP, 430 "^": TokenType.CARET, 431 ":": TokenType.COLON, 432 ",": TokenType.COMMA, 433 ".": TokenType.DOT, 434 "-": TokenType.DASH, 435 "=": TokenType.EQ, 436 ">": TokenType.GT, 437 "<": TokenType.LT, 438 "%": TokenType.MOD, 439 "!": TokenType.NOT, 440 "|": TokenType.PIPE, 441 "+": TokenType.PLUS, 442 ";": TokenType.SEMICOLON, 443 "/": TokenType.SLASH, 444 "\\": TokenType.BACKSLASH, 445 "*": TokenType.STAR, 446 "~": TokenType.TILDA, 447 "?": TokenType.PLACEHOLDER, 448 "@": TokenType.PARAMETER, 449 # used for breaking a var like x'y' but nothing else 450 # the token type doesn't matter 451 "'": TokenType.QUOTE, 452 "`": TokenType.IDENTIFIER, 453 '"': TokenType.IDENTIFIER, 454 "#": TokenType.HASH, 455 } 456 457 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 458 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 459 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 460 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 461 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 462 IDENTIFIER_ESCAPES = ['"'] 463 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 464 STRING_ESCAPES = ["'"] 465 VAR_SINGLE_TOKENS: t.Set[str] = set() 466 467 # Autofilled 468 IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False 469 470 _COMMENTS: t.Dict[str, str] = {} 471 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 472 _IDENTIFIERS: t.Dict[str, str] = {} 473 _IDENTIFIER_ESCAPES: t.Set[str] = set() 474 _QUOTES: t.Dict[str, str] = {} 475 _STRING_ESCAPES: t.Set[str] = set() 476 _KEYWORD_TRIE: t.Dict = {} 477 478 KEYWORDS: t.Dict[str, TokenType] = { 479 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 480 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 481 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 482 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 483 "/*+": TokenType.HINT, 484 "==": TokenType.EQ, 485 "::": TokenType.DCOLON, 486 "||": TokenType.DPIPE, 487 ">=": TokenType.GTE, 488 "<=": TokenType.LTE, 489 "<>": TokenType.NEQ, 490 "!=": TokenType.NEQ, 491 "<=>": TokenType.NULLSAFE_EQ, 492 "->": TokenType.ARROW, 493 "->>": TokenType.DARROW, 494 "=>": TokenType.FARROW, 495 "#>": TokenType.HASH_ARROW, 496 "#>>": TokenType.DHASH_ARROW, 497 "<->": TokenType.LR_ARROW, 498 "&&": TokenType.DAMP, 499 "ALL": TokenType.ALL, 500 "ALWAYS": TokenType.ALWAYS, 501 "AND": TokenType.AND, 502 "ANTI": TokenType.ANTI, 503 "ANY": TokenType.ANY, 504 "ASC": TokenType.ASC, 505 "AS": TokenType.ALIAS, 506 "ASOF": TokenType.ASOF, 507 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 508 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 509 "BEGIN": TokenType.BEGIN, 510 "BETWEEN": TokenType.BETWEEN, 511 "CACHE": TokenType.CACHE, 512 "UNCACHE": TokenType.UNCACHE, 513 "CASE": TokenType.CASE, 514 "CHARACTER SET": TokenType.CHARACTER_SET, 515 "CLUSTER BY": TokenType.CLUSTER_BY, 516 "COLLATE": TokenType.COLLATE, 517 "COLUMN": TokenType.COLUMN, 518 "COMMIT": TokenType.COMMIT, 519 "CONSTRAINT": TokenType.CONSTRAINT, 520 "CREATE": TokenType.CREATE, 521 "CROSS": TokenType.CROSS, 522 "CUBE": TokenType.CUBE, 523 "CURRENT_DATE": TokenType.CURRENT_DATE, 524 "CURRENT_TIME": TokenType.CURRENT_TIME, 525 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 526 "CURRENT_USER": TokenType.CURRENT_USER, 527 "DATABASE": TokenType.DATABASE, 528 "DEFAULT": TokenType.DEFAULT, 529 "DELETE": TokenType.DELETE, 530 "DESC": TokenType.DESC, 531 "DESCRIBE": TokenType.DESCRIBE, 532 "DISTINCT": TokenType.DISTINCT, 533 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 534 "DIV": TokenType.DIV, 535 "DROP": TokenType.DROP, 536 "ELSE": TokenType.ELSE, 537 "END": TokenType.END, 538 "ESCAPE": TokenType.ESCAPE, 539 "EXCEPT": TokenType.EXCEPT, 540 "EXECUTE": TokenType.EXECUTE, 541 "EXISTS": TokenType.EXISTS, 542 "FALSE": TokenType.FALSE, 543 "FETCH": TokenType.FETCH, 544 "FILTER": TokenType.FILTER, 545 "FIRST": TokenType.FIRST, 546 "FULL": TokenType.FULL, 547 "FUNCTION": TokenType.FUNCTION, 548 "FOR": TokenType.FOR, 549 "FOREIGN KEY": TokenType.FOREIGN_KEY, 550 "FORMAT": TokenType.FORMAT, 551 "FROM": TokenType.FROM, 552 "GEOGRAPHY": TokenType.GEOGRAPHY, 553 "GEOMETRY": TokenType.GEOMETRY, 554 "GLOB": TokenType.GLOB, 555 "GROUP BY": TokenType.GROUP_BY, 556 "GROUPING SETS": TokenType.GROUPING_SETS, 557 "HAVING": TokenType.HAVING, 558 "IF": TokenType.IF, 559 "ILIKE": TokenType.ILIKE, 560 "IN": TokenType.IN, 561 "INDEX": TokenType.INDEX, 562 "INET": TokenType.INET, 563 "INNER": TokenType.INNER, 564 "INSERT": TokenType.INSERT, 565 "INTERVAL": TokenType.INTERVAL, 566 "INTERSECT": TokenType.INTERSECT, 567 "INTO": TokenType.INTO, 568 "IS": TokenType.IS, 569 "ISNULL": TokenType.ISNULL, 570 "JOIN": TokenType.JOIN, 571 "KEEP": TokenType.KEEP, 572 "LATERAL": TokenType.LATERAL, 573 "LEFT": TokenType.LEFT, 574 "LIKE": TokenType.LIKE, 575 "LIMIT": TokenType.LIMIT, 576 "LOAD": TokenType.LOAD, 577 "LOCK": TokenType.LOCK, 578 "MERGE": TokenType.MERGE, 579 "NATURAL": TokenType.NATURAL, 580 "NEXT": TokenType.NEXT, 581 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, 582 "NOT": TokenType.NOT, 583 "NOTNULL": TokenType.NOTNULL, 584 "NULL": TokenType.NULL, 585 "OBJECT": TokenType.OBJECT, 586 "OFFSET": TokenType.OFFSET, 587 "ON": TokenType.ON, 588 "OR": TokenType.OR, 589 "ORDER BY": TokenType.ORDER_BY, 590 "ORDINALITY": TokenType.ORDINALITY, 591 "OUTER": TokenType.OUTER, 592 "OVER": TokenType.OVER, 593 "OVERLAPS": TokenType.OVERLAPS, 594 "OVERWRITE": TokenType.OVERWRITE, 595 "PARTITION": TokenType.PARTITION, 596 "PARTITION BY": TokenType.PARTITION_BY, 597 "PARTITIONED BY": TokenType.PARTITION_BY, 598 "PARTITIONED_BY": TokenType.PARTITION_BY, 599 "PERCENT": TokenType.PERCENT, 600 "PIVOT": TokenType.PIVOT, 601 "PRAGMA": TokenType.PRAGMA, 602 "PRIMARY KEY": TokenType.PRIMARY_KEY, 603 "PROCEDURE": TokenType.PROCEDURE, 604 "QUALIFY": TokenType.QUALIFY, 605 "RANGE": TokenType.RANGE, 606 "RECURSIVE": TokenType.RECURSIVE, 607 "REGEXP": TokenType.RLIKE, 608 "REPLACE": TokenType.REPLACE, 609 "RETURNING": TokenType.RETURNING, 610 "REFERENCES": TokenType.REFERENCES, 611 "RIGHT": TokenType.RIGHT, 612 "RLIKE": TokenType.RLIKE, 613 "ROLLBACK": TokenType.ROLLBACK, 614 "ROLLUP": TokenType.ROLLUP, 615 "ROW": TokenType.ROW, 616 "ROWS": TokenType.ROWS, 617 "SCHEMA": TokenType.SCHEMA, 618 "SELECT": TokenType.SELECT, 619 "SEMI": TokenType.SEMI, 620 "SET": TokenType.SET, 621 "SETTINGS": TokenType.SETTINGS, 622 "SHOW": TokenType.SHOW, 623 "SIMILAR TO": TokenType.SIMILAR_TO, 624 "SOME": TokenType.SOME, 625 "SORT BY": TokenType.SORT_BY, 626 "TABLE": TokenType.TABLE, 627 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 628 "TEMP": TokenType.TEMPORARY, 629 "TEMPORARY": TokenType.TEMPORARY, 630 "THEN": TokenType.THEN, 631 "TRUE": TokenType.TRUE, 632 "UNION": TokenType.UNION, 633 "UNNEST": TokenType.UNNEST, 634 "UNPIVOT": TokenType.UNPIVOT, 635 "UPDATE": TokenType.UPDATE, 636 "USE": TokenType.USE, 637 "USING": TokenType.USING, 638 "UUID": TokenType.UUID, 639 "VALUES": TokenType.VALUES, 640 "VIEW": TokenType.VIEW, 641 "VOLATILE": TokenType.VOLATILE, 642 "WHEN": TokenType.WHEN, 643 "WHERE": TokenType.WHERE, 644 "WINDOW": TokenType.WINDOW, 645 "WITH": TokenType.WITH, 646 "APPLY": TokenType.APPLY, 647 "ARRAY": TokenType.ARRAY, 648 "BIT": TokenType.BIT, 649 "BOOL": TokenType.BOOLEAN, 650 "BOOLEAN": TokenType.BOOLEAN, 651 "BYTE": TokenType.TINYINT, 652 "TINYINT": TokenType.TINYINT, 653 "SHORT": TokenType.SMALLINT, 654 "SMALLINT": TokenType.SMALLINT, 655 "INT2": TokenType.SMALLINT, 656 "INTEGER": TokenType.INT, 657 "INT": TokenType.INT, 658 "INT4": TokenType.INT, 659 "LONG": TokenType.BIGINT, 660 "BIGINT": TokenType.BIGINT, 661 "INT8": TokenType.BIGINT, 662 "DEC": TokenType.DECIMAL, 663 "DECIMAL": TokenType.DECIMAL, 664 "BIGDECIMAL": TokenType.BIGDECIMAL, 665 "BIGNUMERIC": TokenType.BIGDECIMAL, 666 "MAP": TokenType.MAP, 667 "NULLABLE": TokenType.NULLABLE, 668 "NUMBER": TokenType.DECIMAL, 669 "NUMERIC": TokenType.DECIMAL, 670 "FIXED": TokenType.DECIMAL, 671 "REAL": TokenType.FLOAT, 672 "FLOAT": TokenType.FLOAT, 673 "FLOAT4": TokenType.FLOAT, 674 "FLOAT8": TokenType.DOUBLE, 675 "DOUBLE": TokenType.DOUBLE, 676 "DOUBLE PRECISION": TokenType.DOUBLE, 677 "JSON": TokenType.JSON, 678 "CHAR": TokenType.CHAR, 679 "CHARACTER": TokenType.CHAR, 680 "NCHAR": TokenType.NCHAR, 681 "VARCHAR": TokenType.VARCHAR, 682 "VARCHAR2": TokenType.VARCHAR, 683 "NVARCHAR": TokenType.NVARCHAR, 684 "NVARCHAR2": TokenType.NVARCHAR, 685 "STR": TokenType.TEXT, 686 "STRING": TokenType.TEXT, 687 "TEXT": TokenType.TEXT, 688 "CLOB": TokenType.TEXT, 689 "LONGVARCHAR": TokenType.TEXT, 690 "BINARY": TokenType.BINARY, 691 "BLOB": TokenType.VARBINARY, 692 "BYTEA": TokenType.VARBINARY, 693 "VARBINARY": TokenType.VARBINARY, 694 "TIME": TokenType.TIME, 695 "TIMESTAMP": TokenType.TIMESTAMP, 696 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 697 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 698 "DATE": TokenType.DATE, 699 "DATETIME": TokenType.DATETIME, 700 "INT4RANGE": TokenType.INT4RANGE, 701 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 702 "INT8RANGE": TokenType.INT8RANGE, 703 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 704 "NUMRANGE": TokenType.NUMRANGE, 705 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 706 "TSRANGE": TokenType.TSRANGE, 707 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 708 "TSTZRANGE": TokenType.TSTZRANGE, 709 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 710 "DATERANGE": TokenType.DATERANGE, 711 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 712 "UNIQUE": TokenType.UNIQUE, 713 "STRUCT": TokenType.STRUCT, 714 "VARIANT": TokenType.VARIANT, 715 "ALTER": TokenType.ALTER, 716 "ANALYZE": TokenType.COMMAND, 717 "CALL": TokenType.COMMAND, 718 "COMMENT": TokenType.COMMENT, 719 "COPY": TokenType.COMMAND, 720 "EXPLAIN": TokenType.COMMAND, 721 "GRANT": TokenType.COMMAND, 722 "OPTIMIZE": TokenType.COMMAND, 723 "PREPARE": TokenType.COMMAND, 724 "TRUNCATE": TokenType.COMMAND, 725 "VACUUM": TokenType.COMMAND, 726 } 727 728 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 729 " ": TokenType.SPACE, 730 "\t": TokenType.SPACE, 731 "\n": TokenType.BREAK, 732 "\r": TokenType.BREAK, 733 "\r\n": TokenType.BREAK, 734 } 735 736 COMMANDS = { 737 TokenType.COMMAND, 738 TokenType.EXECUTE, 739 TokenType.FETCH, 740 TokenType.SHOW, 741 } 742 743 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 744 745 # handle numeric literals like in hive (3L = BIGINT) 746 NUMERIC_LITERALS: t.Dict[str, str] = {} 747 ENCODE: t.Optional[str] = None 748 749 COMMENTS = ["--", ("/*", "*/")] 750 751 __slots__ = ( 752 "sql", 753 "size", 754 "tokens", 755 "_start", 756 "_current", 757 "_line", 758 "_col", 759 "_comments", 760 "_char", 761 "_end", 762 "_peek", 763 "_prev_token_line", 764 ) 765 766 def __init__(self) -> None: 767 self.reset() 768 769 def reset(self) -> None: 770 self.sql = "" 771 self.size = 0 772 self.tokens: t.List[Token] = [] 773 self._start = 0 774 self._current = 0 775 self._line = 1 776 self._col = 0 777 self._comments: t.List[str] = [] 778 779 self._char = "" 780 self._end = False 781 self._peek = "" 782 self._prev_token_line = -1 783 784 def tokenize(self, sql: str) -> t.List[Token]: 785 """Returns a list of tokens corresponding to the SQL string `sql`.""" 786 self.reset() 787 self.sql = sql 788 self.size = len(sql) 789 790 try: 791 self._scan() 792 except Exception as e: 793 start = max(self._current - 50, 0) 794 end = min(self._current + 50, self.size - 1) 795 context = self.sql[start:end] 796 raise ValueError(f"Error tokenizing '{context}'") from e 797 798 return self.tokens 799 800 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 801 while self.size and not self._end: 802 self._start = self._current 803 self._advance() 804 805 if self._char is None: 806 break 807 808 if self._char not in self.WHITE_SPACE: 809 if self._char.isdigit(): 810 self._scan_number() 811 elif self._char in self._IDENTIFIERS: 812 self._scan_identifier(self._IDENTIFIERS[self._char]) 813 else: 814 self._scan_keywords() 815 816 if until and until(): 817 break 818 819 if self.tokens and self._comments: 820 self.tokens[-1].comments.extend(self._comments) 821 822 def _chars(self, size: int) -> str: 823 if size == 1: 824 return self._char 825 826 start = self._current - 1 827 end = start + size 828 829 return self.sql[start:end] if end <= self.size else "" 830 831 def _advance(self, i: int = 1, alnum: bool = False) -> None: 832 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 833 self._col = 1 834 self._line += 1 835 else: 836 self._col += i 837 838 self._current += i 839 self._end = self._current >= self.size 840 self._char = self.sql[self._current - 1] 841 self._peek = "" if self._end else self.sql[self._current] 842 843 if alnum and self._char.isalnum(): 844 # Here we use local variables instead of attributes for better performance 845 _col = self._col 846 _current = self._current 847 _end = self._end 848 _peek = self._peek 849 850 while _peek.isalnum(): 851 _col += 1 852 _current += 1 853 _end = _current >= self.size 854 _peek = "" if _end else self.sql[_current] 855 856 self._col = _col 857 self._current = _current 858 self._end = _end 859 self._peek = _peek 860 self._char = self.sql[_current - 1] 861 862 @property 863 def _text(self) -> str: 864 return self.sql[self._start : self._current] 865 866 def peek(self, i: int = 0) -> str: 867 i = self._current + i 868 if i < self.size: 869 return self.sql[i] 870 return "" 871 872 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 873 self._prev_token_line = self._line 874 self.tokens.append( 875 Token( 876 token_type, 877 text=self._text if text is None else text, 878 line=self._line, 879 col=self._col, 880 start=self._start, 881 end=self._current - 1, 882 comments=self._comments, 883 ) 884 ) 885 self._comments = [] 886 887 # If we have either a semicolon or a begin token before the command's token, we'll parse 888 # whatever follows the command's token as a string 889 if ( 890 token_type in self.COMMANDS 891 and self._peek != ";" 892 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 893 ): 894 start = self._current 895 tokens = len(self.tokens) 896 self._scan(lambda: self._peek == ";") 897 self.tokens = self.tokens[:tokens] 898 text = self.sql[start : self._current].strip() 899 if text: 900 self._add(TokenType.STRING, text) 901 902 def _scan_keywords(self) -> None: 903 size = 0 904 word = None 905 chars = self._text 906 char = chars 907 prev_space = False 908 skip = False 909 trie = self._KEYWORD_TRIE 910 single_token = char in self.SINGLE_TOKENS 911 912 while chars: 913 if skip: 914 result = 1 915 else: 916 result, trie = in_trie(trie, char.upper()) 917 918 if result == 0: 919 break 920 if result == 2: 921 word = chars 922 923 size += 1 924 end = self._current - 1 + size 925 926 if end < self.size: 927 char = self.sql[end] 928 single_token = single_token or char in self.SINGLE_TOKENS 929 is_space = char in self.WHITE_SPACE 930 931 if not is_space or not prev_space: 932 if is_space: 933 char = " " 934 chars += char 935 prev_space = is_space 936 skip = False 937 else: 938 skip = True 939 else: 940 char = "" 941 chars = " " 942 943 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 944 945 if not word: 946 if self._char in self.SINGLE_TOKENS: 947 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 948 return 949 self._scan_var() 950 return 951 952 if self._scan_string(word): 953 return 954 if self._scan_comment(word): 955 return 956 957 self._advance(size - 1) 958 word = word.upper() 959 self._add(self.KEYWORDS[word], text=word) 960 961 def _scan_comment(self, comment_start: str) -> bool: 962 if comment_start not in self._COMMENTS: 963 return False 964 965 comment_start_line = self._line 966 comment_start_size = len(comment_start) 967 comment_end = self._COMMENTS[comment_start] 968 969 if comment_end: 970 # Skip the comment's start delimiter 971 self._advance(comment_start_size) 972 973 comment_end_size = len(comment_end) 974 while not self._end and self._chars(comment_end_size) != comment_end: 975 self._advance(alnum=True) 976 977 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 978 self._advance(comment_end_size - 1) 979 else: 980 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 981 self._advance(alnum=True) 982 self._comments.append(self._text[comment_start_size:]) 983 984 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 985 # Multiple consecutive comments are preserved by appending them to the current comments list. 986 if comment_start_line == self._prev_token_line: 987 self.tokens[-1].comments.extend(self._comments) 988 self._comments = [] 989 self._prev_token_line = self._line 990 991 return True 992 993 def _scan_number(self) -> None: 994 if self._char == "0": 995 peek = self._peek.upper() 996 if peek == "B": 997 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 998 elif peek == "X": 999 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1000 1001 decimal = False 1002 scientific = 0 1003 1004 while True: 1005 if self._peek.isdigit(): 1006 self._advance() 1007 elif self._peek == "." and not decimal: 1008 after = self.peek(1) 1009 if after.isdigit() or not after.isalpha(): 1010 decimal = True 1011 self._advance() 1012 else: 1013 return self._add(TokenType.VAR) 1014 elif self._peek in ("-", "+") and scientific == 1: 1015 scientific += 1 1016 self._advance() 1017 elif self._peek.upper() == "E" and not scientific: 1018 scientific += 1 1019 self._advance() 1020 elif self._peek.isidentifier(): 1021 number_text = self._text 1022 literal = "" 1023 1024 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1025 literal += self._peek.upper() 1026 self._advance() 1027 1028 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, "")) 1029 1030 if token_type: 1031 self._add(TokenType.NUMBER, number_text) 1032 self._add(TokenType.DCOLON, "::") 1033 return self._add(token_type, literal) 1034 elif self.IDENTIFIERS_CAN_START_WITH_DIGIT: 1035 return self._add(TokenType.VAR) 1036 1037 self._add(TokenType.NUMBER, number_text) 1038 return self._advance(-len(literal)) 1039 else: 1040 return self._add(TokenType.NUMBER) 1041 1042 def _scan_bits(self) -> None: 1043 self._advance() 1044 value = self._extract_value() 1045 try: 1046 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1047 int(value, 2) 1048 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1049 except ValueError: 1050 self._add(TokenType.IDENTIFIER) 1051 1052 def _scan_hex(self) -> None: 1053 self._advance() 1054 value = self._extract_value() 1055 try: 1056 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1057 int(value, 16) 1058 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1059 except ValueError: 1060 self._add(TokenType.IDENTIFIER) 1061 1062 def _extract_value(self) -> str: 1063 while True: 1064 char = self._peek.strip() 1065 if char and char not in self.SINGLE_TOKENS: 1066 self._advance(alnum=True) 1067 else: 1068 break 1069 1070 return self._text 1071 1072 def _scan_string(self, start: str) -> bool: 1073 base = None 1074 token_type = TokenType.STRING 1075 1076 if start in self._QUOTES: 1077 end = self._QUOTES[start] 1078 elif start in self._FORMAT_STRINGS: 1079 end, token_type = self._FORMAT_STRINGS[start] 1080 1081 if token_type == TokenType.HEX_STRING: 1082 base = 16 1083 elif token_type == TokenType.BIT_STRING: 1084 base = 2 1085 else: 1086 return False 1087 1088 self._advance(len(start)) 1089 text = self._extract_string(end) 1090 1091 if base: 1092 try: 1093 int(text, base) 1094 except: 1095 raise RuntimeError( 1096 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1097 ) 1098 else: 1099 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text 1100 1101 self._add(token_type, text) 1102 return True 1103 1104 def _scan_identifier(self, identifier_end: str) -> None: 1105 self._advance() 1106 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1107 self._add(TokenType.IDENTIFIER, text) 1108 1109 def _scan_var(self) -> None: 1110 while True: 1111 char = self._peek.strip() 1112 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1113 self._advance(alnum=True) 1114 else: 1115 break 1116 1117 self._add( 1118 TokenType.VAR 1119 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1120 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1121 ) 1122 1123 def _extract_string(self, delimiter: str, escapes=None) -> str: 1124 text = "" 1125 delim_size = len(delimiter) 1126 escapes = self._STRING_ESCAPES if escapes is None else escapes 1127 1128 while True: 1129 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): 1130 if self._peek == delimiter: 1131 text += self._peek 1132 else: 1133 text += self._char + self._peek 1134 1135 if self._current + 1 < self.size: 1136 self._advance(2) 1137 else: 1138 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1139 else: 1140 if self._chars(delim_size) == delimiter: 1141 if delim_size > 1: 1142 self._advance(delim_size - 1) 1143 break 1144 1145 if self._end: 1146 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1147 1148 current = self._current - 1 1149 self._advance(alnum=True) 1150 text += self.sql[current : self._current - 1] 1151 1152 return text
11class TokenType(AutoName): 12 L_PAREN = auto() 13 R_PAREN = auto() 14 L_BRACKET = auto() 15 R_BRACKET = auto() 16 L_BRACE = auto() 17 R_BRACE = auto() 18 COMMA = auto() 19 DOT = auto() 20 DASH = auto() 21 PLUS = auto() 22 COLON = auto() 23 DCOLON = auto() 24 SEMICOLON = auto() 25 STAR = auto() 26 BACKSLASH = auto() 27 SLASH = auto() 28 LT = auto() 29 LTE = auto() 30 GT = auto() 31 GTE = auto() 32 NOT = auto() 33 EQ = auto() 34 NEQ = auto() 35 NULLSAFE_EQ = auto() 36 AND = auto() 37 OR = auto() 38 AMP = auto() 39 DPIPE = auto() 40 PIPE = auto() 41 CARET = auto() 42 TILDA = auto() 43 ARROW = auto() 44 DARROW = auto() 45 FARROW = auto() 46 HASH = auto() 47 HASH_ARROW = auto() 48 DHASH_ARROW = auto() 49 LR_ARROW = auto() 50 LT_AT = auto() 51 AT_GT = auto() 52 DOLLAR = auto() 53 PARAMETER = auto() 54 SESSION_PARAMETER = auto() 55 DAMP = auto() 56 57 BLOCK_START = auto() 58 BLOCK_END = auto() 59 60 SPACE = auto() 61 BREAK = auto() 62 63 STRING = auto() 64 NUMBER = auto() 65 IDENTIFIER = auto() 66 DATABASE = auto() 67 COLUMN = auto() 68 COLUMN_DEF = auto() 69 SCHEMA = auto() 70 TABLE = auto() 71 VAR = auto() 72 BIT_STRING = auto() 73 HEX_STRING = auto() 74 BYTE_STRING = auto() 75 NATIONAL_STRING = auto() 76 RAW_STRING = auto() 77 78 # types 79 BIT = auto() 80 BOOLEAN = auto() 81 TINYINT = auto() 82 UTINYINT = auto() 83 SMALLINT = auto() 84 USMALLINT = auto() 85 INT = auto() 86 UINT = auto() 87 BIGINT = auto() 88 UBIGINT = auto() 89 INT128 = auto() 90 UINT128 = auto() 91 INT256 = auto() 92 UINT256 = auto() 93 FLOAT = auto() 94 DOUBLE = auto() 95 DECIMAL = auto() 96 BIGDECIMAL = auto() 97 CHAR = auto() 98 NCHAR = auto() 99 VARCHAR = auto() 100 NVARCHAR = auto() 101 TEXT = auto() 102 MEDIUMTEXT = auto() 103 LONGTEXT = auto() 104 MEDIUMBLOB = auto() 105 LONGBLOB = auto() 106 BINARY = auto() 107 VARBINARY = auto() 108 JSON = auto() 109 JSONB = auto() 110 TIME = auto() 111 TIMESTAMP = auto() 112 TIMESTAMPTZ = auto() 113 TIMESTAMPLTZ = auto() 114 DATETIME = auto() 115 DATETIME64 = auto() 116 DATE = auto() 117 INT4RANGE = auto() 118 INT4MULTIRANGE = auto() 119 INT8RANGE = auto() 120 INT8MULTIRANGE = auto() 121 NUMRANGE = auto() 122 NUMMULTIRANGE = auto() 123 TSRANGE = auto() 124 TSMULTIRANGE = auto() 125 TSTZRANGE = auto() 126 TSTZMULTIRANGE = auto() 127 DATERANGE = auto() 128 DATEMULTIRANGE = auto() 129 UUID = auto() 130 GEOGRAPHY = auto() 131 NULLABLE = auto() 132 GEOMETRY = auto() 133 HLLSKETCH = auto() 134 HSTORE = auto() 135 SUPER = auto() 136 SERIAL = auto() 137 SMALLSERIAL = auto() 138 BIGSERIAL = auto() 139 XML = auto() 140 UNIQUEIDENTIFIER = auto() 141 MONEY = auto() 142 SMALLMONEY = auto() 143 ROWVERSION = auto() 144 IMAGE = auto() 145 VARIANT = auto() 146 OBJECT = auto() 147 INET = auto() 148 ENUM = auto() 149 150 # keywords 151 ALIAS = auto() 152 ALTER = auto() 153 ALWAYS = auto() 154 ALL = auto() 155 ANTI = auto() 156 ANY = auto() 157 APPLY = auto() 158 ARRAY = auto() 159 ASC = auto() 160 ASOF = auto() 161 AUTO_INCREMENT = auto() 162 BEGIN = auto() 163 BETWEEN = auto() 164 CACHE = auto() 165 CASE = auto() 166 CHARACTER_SET = auto() 167 CLUSTER_BY = auto() 168 COLLATE = auto() 169 COMMAND = auto() 170 COMMENT = auto() 171 COMMIT = auto() 172 CONSTRAINT = auto() 173 CREATE = auto() 174 CROSS = auto() 175 CUBE = auto() 176 CURRENT_DATE = auto() 177 CURRENT_DATETIME = auto() 178 CURRENT_TIME = auto() 179 CURRENT_TIMESTAMP = auto() 180 CURRENT_USER = auto() 181 DEFAULT = auto() 182 DELETE = auto() 183 DESC = auto() 184 DESCRIBE = auto() 185 DICTIONARY = auto() 186 DISTINCT = auto() 187 DISTRIBUTE_BY = auto() 188 DIV = auto() 189 DROP = auto() 190 ELSE = auto() 191 END = auto() 192 ESCAPE = auto() 193 EXCEPT = auto() 194 EXECUTE = auto() 195 EXISTS = auto() 196 FALSE = auto() 197 FETCH = auto() 198 FILTER = auto() 199 FINAL = auto() 200 FIRST = auto() 201 FOR = auto() 202 FOREIGN_KEY = auto() 203 FORMAT = auto() 204 FROM = auto() 205 FULL = auto() 206 FUNCTION = auto() 207 GLOB = auto() 208 GLOBAL = auto() 209 GROUP_BY = auto() 210 GROUPING_SETS = auto() 211 HAVING = auto() 212 HINT = auto() 213 IF = auto() 214 ILIKE = auto() 215 ILIKE_ANY = auto() 216 IN = auto() 217 INDEX = auto() 218 INNER = auto() 219 INSERT = auto() 220 INTERSECT = auto() 221 INTERVAL = auto() 222 INTO = auto() 223 INTRODUCER = auto() 224 IRLIKE = auto() 225 IS = auto() 226 ISNULL = auto() 227 JOIN = auto() 228 JOIN_MARKER = auto() 229 KEEP = auto() 230 LANGUAGE = auto() 231 LATERAL = auto() 232 LEFT = auto() 233 LIKE = auto() 234 LIKE_ANY = auto() 235 LIMIT = auto() 236 LOAD = auto() 237 LOCK = auto() 238 MAP = auto() 239 MATCH_RECOGNIZE = auto() 240 MERGE = auto() 241 MOD = auto() 242 NATURAL = auto() 243 NEXT = auto() 244 NEXT_VALUE_FOR = auto() 245 NOTNULL = auto() 246 NULL = auto() 247 OFFSET = auto() 248 ON = auto() 249 ORDER_BY = auto() 250 ORDERED = auto() 251 ORDINALITY = auto() 252 OUTER = auto() 253 OVER = auto() 254 OVERLAPS = auto() 255 OVERWRITE = auto() 256 PARTITION = auto() 257 PARTITION_BY = auto() 258 PERCENT = auto() 259 PIVOT = auto() 260 PLACEHOLDER = auto() 261 PRAGMA = auto() 262 PRIMARY_KEY = auto() 263 PROCEDURE = auto() 264 PROPERTIES = auto() 265 PSEUDO_TYPE = auto() 266 QUALIFY = auto() 267 QUOTE = auto() 268 RANGE = auto() 269 RECURSIVE = auto() 270 REPLACE = auto() 271 RETURNING = auto() 272 REFERENCES = auto() 273 RIGHT = auto() 274 RLIKE = auto() 275 ROLLBACK = auto() 276 ROLLUP = auto() 277 ROW = auto() 278 ROWS = auto() 279 SELECT = auto() 280 SEMI = auto() 281 SEPARATOR = auto() 282 SERDE_PROPERTIES = auto() 283 SET = auto() 284 SETTINGS = auto() 285 SHOW = auto() 286 SIMILAR_TO = auto() 287 SOME = auto() 288 SORT_BY = auto() 289 STRUCT = auto() 290 TABLE_SAMPLE = auto() 291 TEMPORARY = auto() 292 TOP = auto() 293 THEN = auto() 294 TRUE = auto() 295 UNCACHE = auto() 296 UNION = auto() 297 UNNEST = auto() 298 UNPIVOT = auto() 299 UPDATE = auto() 300 USE = auto() 301 USING = auto() 302 VALUES = auto() 303 VIEW = auto() 304 VOLATILE = auto() 305 WHEN = auto() 306 WHERE = auto() 307 WINDOW = auto() 308 WITH = auto() 309 UNIQUE = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE =
<TokenType.PIPE: 'PIPE'>
CARET =
<TokenType.CARET: 'CARET'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP =
<TokenType.DAMP: 'DAMP'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING =
<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING =
<TokenType.RAW_STRING: 'RAW_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
INT128 =
<TokenType.INT128: 'INT128'>
UINT128 =
<TokenType.UINT128: 'UINT128'>
INT256 =
<TokenType.INT256: 'INT256'>
UINT256 =
<TokenType.UINT256: 'UINT256'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
BIGDECIMAL =
<TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATETIME64 =
<TokenType.DATETIME64: 'DATETIME64'>
DATE =
<TokenType.DATE: 'DATE'>
INT4RANGE =
<TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE =
<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE =
<TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE =
<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE =
<TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE =
<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE =
<TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE =
<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE =
<TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE =
<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE =
<TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE =
<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
UNIQUEIDENTIFIER =
<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
ENUM =
<TokenType.ENUM: 'ENUM'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALWAYS =
<TokenType.ALWAYS: 'ALWAYS'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER =
<TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY =
<TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOR =
<TokenType.FOR: 'FOR'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IF =
<TokenType.IF: 'IF'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY =
<TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP =
<TokenType.KEEP: 'KEEP'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIKE_ANY =
<TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LOAD =
<TokenType.LOAD: 'LOAD'>
LOCK =
<TokenType.LOCK: 'LOCK'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NEXT_VALUE_FOR =
<TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA =
<TokenType.PRAGMA: 'PRAGMA'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SETTINGS =
<TokenType.SETTINGS: 'SETTINGS'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRUE =
<TokenType.TRUE: 'TRUE'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
312class Token: 313 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 314 315 @classmethod 316 def number(cls, number: int) -> Token: 317 """Returns a NUMBER token with `number` as its text.""" 318 return cls(TokenType.NUMBER, str(number)) 319 320 @classmethod 321 def string(cls, string: str) -> Token: 322 """Returns a STRING token with `string` as its text.""" 323 return cls(TokenType.STRING, string) 324 325 @classmethod 326 def identifier(cls, identifier: str) -> Token: 327 """Returns an IDENTIFIER token with `identifier` as its text.""" 328 return cls(TokenType.IDENTIFIER, identifier) 329 330 @classmethod 331 def var(cls, var: str) -> Token: 332 """Returns an VAR token with `var` as its text.""" 333 return cls(TokenType.VAR, var) 334 335 def __init__( 336 self, 337 token_type: TokenType, 338 text: str, 339 line: int = 1, 340 col: int = 1, 341 start: int = 0, 342 end: int = 0, 343 comments: t.List[str] = [], 344 ) -> None: 345 """Token initializer. 346 347 Args: 348 token_type: The TokenType Enum. 349 text: The text of the token. 350 line: The line that the token ends on. 351 col: The column that the token ends on. 352 start: The start index of the token. 353 end: The ending index of the token. 354 comments: The comments to attach to the token. 355 """ 356 self.token_type = token_type 357 self.text = text 358 self.line = line 359 self.col = col 360 self.start = start 361 self.end = end 362 self.comments = comments 363 364 def __repr__(self) -> str: 365 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 366 return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: List[str] = [])
335 def __init__( 336 self, 337 token_type: TokenType, 338 text: str, 339 line: int = 1, 340 col: int = 1, 341 start: int = 0, 342 end: int = 0, 343 comments: t.List[str] = [], 344 ) -> None: 345 """Token initializer. 346 347 Args: 348 token_type: The TokenType Enum. 349 text: The text of the token. 350 line: The line that the token ends on. 351 col: The column that the token ends on. 352 start: The start index of the token. 353 end: The ending index of the token. 354 comments: The comments to attach to the token. 355 """ 356 self.token_type = token_type 357 self.text = text 358 self.line = line 359 self.col = col 360 self.start = start 361 self.end = end 362 self.comments = comments
Token initializer.
Arguments:
- token_type: The TokenType Enum.
- text: The text of the token.
- line: The line that the token ends on.
- col: The column that the token ends on.
- start: The start index of the token.
- end: The ending index of the token.
- comments: The comments to attach to the token.
315 @classmethod 316 def number(cls, number: int) -> Token: 317 """Returns a NUMBER token with `number` as its text.""" 318 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
320 @classmethod 321 def string(cls, string: str) -> Token: 322 """Returns a STRING token with `string` as its text.""" 323 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
325 @classmethod 326 def identifier(cls, identifier: str) -> Token: 327 """Returns an IDENTIFIER token with `identifier` as its text.""" 328 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
422class Tokenizer(metaclass=_Tokenizer): 423 SINGLE_TOKENS = { 424 "(": TokenType.L_PAREN, 425 ")": TokenType.R_PAREN, 426 "[": TokenType.L_BRACKET, 427 "]": TokenType.R_BRACKET, 428 "{": TokenType.L_BRACE, 429 "}": TokenType.R_BRACE, 430 "&": TokenType.AMP, 431 "^": TokenType.CARET, 432 ":": TokenType.COLON, 433 ",": TokenType.COMMA, 434 ".": TokenType.DOT, 435 "-": TokenType.DASH, 436 "=": TokenType.EQ, 437 ">": TokenType.GT, 438 "<": TokenType.LT, 439 "%": TokenType.MOD, 440 "!": TokenType.NOT, 441 "|": TokenType.PIPE, 442 "+": TokenType.PLUS, 443 ";": TokenType.SEMICOLON, 444 "/": TokenType.SLASH, 445 "\\": TokenType.BACKSLASH, 446 "*": TokenType.STAR, 447 "~": TokenType.TILDA, 448 "?": TokenType.PLACEHOLDER, 449 "@": TokenType.PARAMETER, 450 # used for breaking a var like x'y' but nothing else 451 # the token type doesn't matter 452 "'": TokenType.QUOTE, 453 "`": TokenType.IDENTIFIER, 454 '"': TokenType.IDENTIFIER, 455 "#": TokenType.HASH, 456 } 457 458 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 459 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 460 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 461 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 462 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 463 IDENTIFIER_ESCAPES = ['"'] 464 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 465 STRING_ESCAPES = ["'"] 466 VAR_SINGLE_TOKENS: t.Set[str] = set() 467 468 # Autofilled 469 IDENTIFIERS_CAN_START_WITH_DIGIT: bool = False 470 471 _COMMENTS: t.Dict[str, str] = {} 472 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 473 _IDENTIFIERS: t.Dict[str, str] = {} 474 _IDENTIFIER_ESCAPES: t.Set[str] = set() 475 _QUOTES: t.Dict[str, str] = {} 476 _STRING_ESCAPES: t.Set[str] = set() 477 _KEYWORD_TRIE: t.Dict = {} 478 479 KEYWORDS: t.Dict[str, TokenType] = { 480 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 481 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 482 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 483 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 484 "/*+": TokenType.HINT, 485 "==": TokenType.EQ, 486 "::": TokenType.DCOLON, 487 "||": TokenType.DPIPE, 488 ">=": TokenType.GTE, 489 "<=": TokenType.LTE, 490 "<>": TokenType.NEQ, 491 "!=": TokenType.NEQ, 492 "<=>": TokenType.NULLSAFE_EQ, 493 "->": TokenType.ARROW, 494 "->>": TokenType.DARROW, 495 "=>": TokenType.FARROW, 496 "#>": TokenType.HASH_ARROW, 497 "#>>": TokenType.DHASH_ARROW, 498 "<->": TokenType.LR_ARROW, 499 "&&": TokenType.DAMP, 500 "ALL": TokenType.ALL, 501 "ALWAYS": TokenType.ALWAYS, 502 "AND": TokenType.AND, 503 "ANTI": TokenType.ANTI, 504 "ANY": TokenType.ANY, 505 "ASC": TokenType.ASC, 506 "AS": TokenType.ALIAS, 507 "ASOF": TokenType.ASOF, 508 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 509 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 510 "BEGIN": TokenType.BEGIN, 511 "BETWEEN": TokenType.BETWEEN, 512 "CACHE": TokenType.CACHE, 513 "UNCACHE": TokenType.UNCACHE, 514 "CASE": TokenType.CASE, 515 "CHARACTER SET": TokenType.CHARACTER_SET, 516 "CLUSTER BY": TokenType.CLUSTER_BY, 517 "COLLATE": TokenType.COLLATE, 518 "COLUMN": TokenType.COLUMN, 519 "COMMIT": TokenType.COMMIT, 520 "CONSTRAINT": TokenType.CONSTRAINT, 521 "CREATE": TokenType.CREATE, 522 "CROSS": TokenType.CROSS, 523 "CUBE": TokenType.CUBE, 524 "CURRENT_DATE": TokenType.CURRENT_DATE, 525 "CURRENT_TIME": TokenType.CURRENT_TIME, 526 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 527 "CURRENT_USER": TokenType.CURRENT_USER, 528 "DATABASE": TokenType.DATABASE, 529 "DEFAULT": TokenType.DEFAULT, 530 "DELETE": TokenType.DELETE, 531 "DESC": TokenType.DESC, 532 "DESCRIBE": TokenType.DESCRIBE, 533 "DISTINCT": TokenType.DISTINCT, 534 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 535 "DIV": TokenType.DIV, 536 "DROP": TokenType.DROP, 537 "ELSE": TokenType.ELSE, 538 "END": TokenType.END, 539 "ESCAPE": TokenType.ESCAPE, 540 "EXCEPT": TokenType.EXCEPT, 541 "EXECUTE": TokenType.EXECUTE, 542 "EXISTS": TokenType.EXISTS, 543 "FALSE": TokenType.FALSE, 544 "FETCH": TokenType.FETCH, 545 "FILTER": TokenType.FILTER, 546 "FIRST": TokenType.FIRST, 547 "FULL": TokenType.FULL, 548 "FUNCTION": TokenType.FUNCTION, 549 "FOR": TokenType.FOR, 550 "FOREIGN KEY": TokenType.FOREIGN_KEY, 551 "FORMAT": TokenType.FORMAT, 552 "FROM": TokenType.FROM, 553 "GEOGRAPHY": TokenType.GEOGRAPHY, 554 "GEOMETRY": TokenType.GEOMETRY, 555 "GLOB": TokenType.GLOB, 556 "GROUP BY": TokenType.GROUP_BY, 557 "GROUPING SETS": TokenType.GROUPING_SETS, 558 "HAVING": TokenType.HAVING, 559 "IF": TokenType.IF, 560 "ILIKE": TokenType.ILIKE, 561 "IN": TokenType.IN, 562 "INDEX": TokenType.INDEX, 563 "INET": TokenType.INET, 564 "INNER": TokenType.INNER, 565 "INSERT": TokenType.INSERT, 566 "INTERVAL": TokenType.INTERVAL, 567 "INTERSECT": TokenType.INTERSECT, 568 "INTO": TokenType.INTO, 569 "IS": TokenType.IS, 570 "ISNULL": TokenType.ISNULL, 571 "JOIN": TokenType.JOIN, 572 "KEEP": TokenType.KEEP, 573 "LATERAL": TokenType.LATERAL, 574 "LEFT": TokenType.LEFT, 575 "LIKE": TokenType.LIKE, 576 "LIMIT": TokenType.LIMIT, 577 "LOAD": TokenType.LOAD, 578 "LOCK": TokenType.LOCK, 579 "MERGE": TokenType.MERGE, 580 "NATURAL": TokenType.NATURAL, 581 "NEXT": TokenType.NEXT, 582 "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR, 583 "NOT": TokenType.NOT, 584 "NOTNULL": TokenType.NOTNULL, 585 "NULL": TokenType.NULL, 586 "OBJECT": TokenType.OBJECT, 587 "OFFSET": TokenType.OFFSET, 588 "ON": TokenType.ON, 589 "OR": TokenType.OR, 590 "ORDER BY": TokenType.ORDER_BY, 591 "ORDINALITY": TokenType.ORDINALITY, 592 "OUTER": TokenType.OUTER, 593 "OVER": TokenType.OVER, 594 "OVERLAPS": TokenType.OVERLAPS, 595 "OVERWRITE": TokenType.OVERWRITE, 596 "PARTITION": TokenType.PARTITION, 597 "PARTITION BY": TokenType.PARTITION_BY, 598 "PARTITIONED BY": TokenType.PARTITION_BY, 599 "PARTITIONED_BY": TokenType.PARTITION_BY, 600 "PERCENT": TokenType.PERCENT, 601 "PIVOT": TokenType.PIVOT, 602 "PRAGMA": TokenType.PRAGMA, 603 "PRIMARY KEY": TokenType.PRIMARY_KEY, 604 "PROCEDURE": TokenType.PROCEDURE, 605 "QUALIFY": TokenType.QUALIFY, 606 "RANGE": TokenType.RANGE, 607 "RECURSIVE": TokenType.RECURSIVE, 608 "REGEXP": TokenType.RLIKE, 609 "REPLACE": TokenType.REPLACE, 610 "RETURNING": TokenType.RETURNING, 611 "REFERENCES": TokenType.REFERENCES, 612 "RIGHT": TokenType.RIGHT, 613 "RLIKE": TokenType.RLIKE, 614 "ROLLBACK": TokenType.ROLLBACK, 615 "ROLLUP": TokenType.ROLLUP, 616 "ROW": TokenType.ROW, 617 "ROWS": TokenType.ROWS, 618 "SCHEMA": TokenType.SCHEMA, 619 "SELECT": TokenType.SELECT, 620 "SEMI": TokenType.SEMI, 621 "SET": TokenType.SET, 622 "SETTINGS": TokenType.SETTINGS, 623 "SHOW": TokenType.SHOW, 624 "SIMILAR TO": TokenType.SIMILAR_TO, 625 "SOME": TokenType.SOME, 626 "SORT BY": TokenType.SORT_BY, 627 "TABLE": TokenType.TABLE, 628 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 629 "TEMP": TokenType.TEMPORARY, 630 "TEMPORARY": TokenType.TEMPORARY, 631 "THEN": TokenType.THEN, 632 "TRUE": TokenType.TRUE, 633 "UNION": TokenType.UNION, 634 "UNNEST": TokenType.UNNEST, 635 "UNPIVOT": TokenType.UNPIVOT, 636 "UPDATE": TokenType.UPDATE, 637 "USE": TokenType.USE, 638 "USING": TokenType.USING, 639 "UUID": TokenType.UUID, 640 "VALUES": TokenType.VALUES, 641 "VIEW": TokenType.VIEW, 642 "VOLATILE": TokenType.VOLATILE, 643 "WHEN": TokenType.WHEN, 644 "WHERE": TokenType.WHERE, 645 "WINDOW": TokenType.WINDOW, 646 "WITH": TokenType.WITH, 647 "APPLY": TokenType.APPLY, 648 "ARRAY": TokenType.ARRAY, 649 "BIT": TokenType.BIT, 650 "BOOL": TokenType.BOOLEAN, 651 "BOOLEAN": TokenType.BOOLEAN, 652 "BYTE": TokenType.TINYINT, 653 "TINYINT": TokenType.TINYINT, 654 "SHORT": TokenType.SMALLINT, 655 "SMALLINT": TokenType.SMALLINT, 656 "INT2": TokenType.SMALLINT, 657 "INTEGER": TokenType.INT, 658 "INT": TokenType.INT, 659 "INT4": TokenType.INT, 660 "LONG": TokenType.BIGINT, 661 "BIGINT": TokenType.BIGINT, 662 "INT8": TokenType.BIGINT, 663 "DEC": TokenType.DECIMAL, 664 "DECIMAL": TokenType.DECIMAL, 665 "BIGDECIMAL": TokenType.BIGDECIMAL, 666 "BIGNUMERIC": TokenType.BIGDECIMAL, 667 "MAP": TokenType.MAP, 668 "NULLABLE": TokenType.NULLABLE, 669 "NUMBER": TokenType.DECIMAL, 670 "NUMERIC": TokenType.DECIMAL, 671 "FIXED": TokenType.DECIMAL, 672 "REAL": TokenType.FLOAT, 673 "FLOAT": TokenType.FLOAT, 674 "FLOAT4": TokenType.FLOAT, 675 "FLOAT8": TokenType.DOUBLE, 676 "DOUBLE": TokenType.DOUBLE, 677 "DOUBLE PRECISION": TokenType.DOUBLE, 678 "JSON": TokenType.JSON, 679 "CHAR": TokenType.CHAR, 680 "CHARACTER": TokenType.CHAR, 681 "NCHAR": TokenType.NCHAR, 682 "VARCHAR": TokenType.VARCHAR, 683 "VARCHAR2": TokenType.VARCHAR, 684 "NVARCHAR": TokenType.NVARCHAR, 685 "NVARCHAR2": TokenType.NVARCHAR, 686 "STR": TokenType.TEXT, 687 "STRING": TokenType.TEXT, 688 "TEXT": TokenType.TEXT, 689 "CLOB": TokenType.TEXT, 690 "LONGVARCHAR": TokenType.TEXT, 691 "BINARY": TokenType.BINARY, 692 "BLOB": TokenType.VARBINARY, 693 "BYTEA": TokenType.VARBINARY, 694 "VARBINARY": TokenType.VARBINARY, 695 "TIME": TokenType.TIME, 696 "TIMESTAMP": TokenType.TIMESTAMP, 697 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 698 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 699 "DATE": TokenType.DATE, 700 "DATETIME": TokenType.DATETIME, 701 "INT4RANGE": TokenType.INT4RANGE, 702 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 703 "INT8RANGE": TokenType.INT8RANGE, 704 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 705 "NUMRANGE": TokenType.NUMRANGE, 706 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 707 "TSRANGE": TokenType.TSRANGE, 708 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 709 "TSTZRANGE": TokenType.TSTZRANGE, 710 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 711 "DATERANGE": TokenType.DATERANGE, 712 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 713 "UNIQUE": TokenType.UNIQUE, 714 "STRUCT": TokenType.STRUCT, 715 "VARIANT": TokenType.VARIANT, 716 "ALTER": TokenType.ALTER, 717 "ANALYZE": TokenType.COMMAND, 718 "CALL": TokenType.COMMAND, 719 "COMMENT": TokenType.COMMENT, 720 "COPY": TokenType.COMMAND, 721 "EXPLAIN": TokenType.COMMAND, 722 "GRANT": TokenType.COMMAND, 723 "OPTIMIZE": TokenType.COMMAND, 724 "PREPARE": TokenType.COMMAND, 725 "TRUNCATE": TokenType.COMMAND, 726 "VACUUM": TokenType.COMMAND, 727 } 728 729 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 730 " ": TokenType.SPACE, 731 "\t": TokenType.SPACE, 732 "\n": TokenType.BREAK, 733 "\r": TokenType.BREAK, 734 "\r\n": TokenType.BREAK, 735 } 736 737 COMMANDS = { 738 TokenType.COMMAND, 739 TokenType.EXECUTE, 740 TokenType.FETCH, 741 TokenType.SHOW, 742 } 743 744 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 745 746 # handle numeric literals like in hive (3L = BIGINT) 747 NUMERIC_LITERALS: t.Dict[str, str] = {} 748 ENCODE: t.Optional[str] = None 749 750 COMMENTS = ["--", ("/*", "*/")] 751 752 __slots__ = ( 753 "sql", 754 "size", 755 "tokens", 756 "_start", 757 "_current", 758 "_line", 759 "_col", 760 "_comments", 761 "_char", 762 "_end", 763 "_peek", 764 "_prev_token_line", 765 ) 766 767 def __init__(self) -> None: 768 self.reset() 769 770 def reset(self) -> None: 771 self.sql = "" 772 self.size = 0 773 self.tokens: t.List[Token] = [] 774 self._start = 0 775 self._current = 0 776 self._line = 1 777 self._col = 0 778 self._comments: t.List[str] = [] 779 780 self._char = "" 781 self._end = False 782 self._peek = "" 783 self._prev_token_line = -1 784 785 def tokenize(self, sql: str) -> t.List[Token]: 786 """Returns a list of tokens corresponding to the SQL string `sql`.""" 787 self.reset() 788 self.sql = sql 789 self.size = len(sql) 790 791 try: 792 self._scan() 793 except Exception as e: 794 start = max(self._current - 50, 0) 795 end = min(self._current + 50, self.size - 1) 796 context = self.sql[start:end] 797 raise ValueError(f"Error tokenizing '{context}'") from e 798 799 return self.tokens 800 801 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 802 while self.size and not self._end: 803 self._start = self._current 804 self._advance() 805 806 if self._char is None: 807 break 808 809 if self._char not in self.WHITE_SPACE: 810 if self._char.isdigit(): 811 self._scan_number() 812 elif self._char in self._IDENTIFIERS: 813 self._scan_identifier(self._IDENTIFIERS[self._char]) 814 else: 815 self._scan_keywords() 816 817 if until and until(): 818 break 819 820 if self.tokens and self._comments: 821 self.tokens[-1].comments.extend(self._comments) 822 823 def _chars(self, size: int) -> str: 824 if size == 1: 825 return self._char 826 827 start = self._current - 1 828 end = start + size 829 830 return self.sql[start:end] if end <= self.size else "" 831 832 def _advance(self, i: int = 1, alnum: bool = False) -> None: 833 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 834 self._col = 1 835 self._line += 1 836 else: 837 self._col += i 838 839 self._current += i 840 self._end = self._current >= self.size 841 self._char = self.sql[self._current - 1] 842 self._peek = "" if self._end else self.sql[self._current] 843 844 if alnum and self._char.isalnum(): 845 # Here we use local variables instead of attributes for better performance 846 _col = self._col 847 _current = self._current 848 _end = self._end 849 _peek = self._peek 850 851 while _peek.isalnum(): 852 _col += 1 853 _current += 1 854 _end = _current >= self.size 855 _peek = "" if _end else self.sql[_current] 856 857 self._col = _col 858 self._current = _current 859 self._end = _end 860 self._peek = _peek 861 self._char = self.sql[_current - 1] 862 863 @property 864 def _text(self) -> str: 865 return self.sql[self._start : self._current] 866 867 def peek(self, i: int = 0) -> str: 868 i = self._current + i 869 if i < self.size: 870 return self.sql[i] 871 return "" 872 873 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 874 self._prev_token_line = self._line 875 self.tokens.append( 876 Token( 877 token_type, 878 text=self._text if text is None else text, 879 line=self._line, 880 col=self._col, 881 start=self._start, 882 end=self._current - 1, 883 comments=self._comments, 884 ) 885 ) 886 self._comments = [] 887 888 # If we have either a semicolon or a begin token before the command's token, we'll parse 889 # whatever follows the command's token as a string 890 if ( 891 token_type in self.COMMANDS 892 and self._peek != ";" 893 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 894 ): 895 start = self._current 896 tokens = len(self.tokens) 897 self._scan(lambda: self._peek == ";") 898 self.tokens = self.tokens[:tokens] 899 text = self.sql[start : self._current].strip() 900 if text: 901 self._add(TokenType.STRING, text) 902 903 def _scan_keywords(self) -> None: 904 size = 0 905 word = None 906 chars = self._text 907 char = chars 908 prev_space = False 909 skip = False 910 trie = self._KEYWORD_TRIE 911 single_token = char in self.SINGLE_TOKENS 912 913 while chars: 914 if skip: 915 result = 1 916 else: 917 result, trie = in_trie(trie, char.upper()) 918 919 if result == 0: 920 break 921 if result == 2: 922 word = chars 923 924 size += 1 925 end = self._current - 1 + size 926 927 if end < self.size: 928 char = self.sql[end] 929 single_token = single_token or char in self.SINGLE_TOKENS 930 is_space = char in self.WHITE_SPACE 931 932 if not is_space or not prev_space: 933 if is_space: 934 char = " " 935 chars += char 936 prev_space = is_space 937 skip = False 938 else: 939 skip = True 940 else: 941 char = "" 942 chars = " " 943 944 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 945 946 if not word: 947 if self._char in self.SINGLE_TOKENS: 948 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 949 return 950 self._scan_var() 951 return 952 953 if self._scan_string(word): 954 return 955 if self._scan_comment(word): 956 return 957 958 self._advance(size - 1) 959 word = word.upper() 960 self._add(self.KEYWORDS[word], text=word) 961 962 def _scan_comment(self, comment_start: str) -> bool: 963 if comment_start not in self._COMMENTS: 964 return False 965 966 comment_start_line = self._line 967 comment_start_size = len(comment_start) 968 comment_end = self._COMMENTS[comment_start] 969 970 if comment_end: 971 # Skip the comment's start delimiter 972 self._advance(comment_start_size) 973 974 comment_end_size = len(comment_end) 975 while not self._end and self._chars(comment_end_size) != comment_end: 976 self._advance(alnum=True) 977 978 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 979 self._advance(comment_end_size - 1) 980 else: 981 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 982 self._advance(alnum=True) 983 self._comments.append(self._text[comment_start_size:]) 984 985 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 986 # Multiple consecutive comments are preserved by appending them to the current comments list. 987 if comment_start_line == self._prev_token_line: 988 self.tokens[-1].comments.extend(self._comments) 989 self._comments = [] 990 self._prev_token_line = self._line 991 992 return True 993 994 def _scan_number(self) -> None: 995 if self._char == "0": 996 peek = self._peek.upper() 997 if peek == "B": 998 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 999 elif peek == "X": 1000 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1001 1002 decimal = False 1003 scientific = 0 1004 1005 while True: 1006 if self._peek.isdigit(): 1007 self._advance() 1008 elif self._peek == "." and not decimal: 1009 after = self.peek(1) 1010 if after.isdigit() or not after.isalpha(): 1011 decimal = True 1012 self._advance() 1013 else: 1014 return self._add(TokenType.VAR) 1015 elif self._peek in ("-", "+") and scientific == 1: 1016 scientific += 1 1017 self._advance() 1018 elif self._peek.upper() == "E" and not scientific: 1019 scientific += 1 1020 self._advance() 1021 elif self._peek.isidentifier(): 1022 number_text = self._text 1023 literal = "" 1024 1025 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1026 literal += self._peek.upper() 1027 self._advance() 1028 1029 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal, "")) 1030 1031 if token_type: 1032 self._add(TokenType.NUMBER, number_text) 1033 self._add(TokenType.DCOLON, "::") 1034 return self._add(token_type, literal) 1035 elif self.IDENTIFIERS_CAN_START_WITH_DIGIT: 1036 return self._add(TokenType.VAR) 1037 1038 self._add(TokenType.NUMBER, number_text) 1039 return self._advance(-len(literal)) 1040 else: 1041 return self._add(TokenType.NUMBER) 1042 1043 def _scan_bits(self) -> None: 1044 self._advance() 1045 value = self._extract_value() 1046 try: 1047 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1048 int(value, 2) 1049 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1050 except ValueError: 1051 self._add(TokenType.IDENTIFIER) 1052 1053 def _scan_hex(self) -> None: 1054 self._advance() 1055 value = self._extract_value() 1056 try: 1057 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1058 int(value, 16) 1059 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1060 except ValueError: 1061 self._add(TokenType.IDENTIFIER) 1062 1063 def _extract_value(self) -> str: 1064 while True: 1065 char = self._peek.strip() 1066 if char and char not in self.SINGLE_TOKENS: 1067 self._advance(alnum=True) 1068 else: 1069 break 1070 1071 return self._text 1072 1073 def _scan_string(self, start: str) -> bool: 1074 base = None 1075 token_type = TokenType.STRING 1076 1077 if start in self._QUOTES: 1078 end = self._QUOTES[start] 1079 elif start in self._FORMAT_STRINGS: 1080 end, token_type = self._FORMAT_STRINGS[start] 1081 1082 if token_type == TokenType.HEX_STRING: 1083 base = 16 1084 elif token_type == TokenType.BIT_STRING: 1085 base = 2 1086 else: 1087 return False 1088 1089 self._advance(len(start)) 1090 text = self._extract_string(end) 1091 1092 if base: 1093 try: 1094 int(text, base) 1095 except: 1096 raise RuntimeError( 1097 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1098 ) 1099 else: 1100 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text 1101 1102 self._add(token_type, text) 1103 return True 1104 1105 def _scan_identifier(self, identifier_end: str) -> None: 1106 self._advance() 1107 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1108 self._add(TokenType.IDENTIFIER, text) 1109 1110 def _scan_var(self) -> None: 1111 while True: 1112 char = self._peek.strip() 1113 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1114 self._advance(alnum=True) 1115 else: 1116 break 1117 1118 self._add( 1119 TokenType.VAR 1120 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1121 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1122 ) 1123 1124 def _extract_string(self, delimiter: str, escapes=None) -> str: 1125 text = "" 1126 delim_size = len(delimiter) 1127 escapes = self._STRING_ESCAPES if escapes is None else escapes 1128 1129 while True: 1130 if self._char in escapes and (self._peek == delimiter or self._peek in escapes): 1131 if self._peek == delimiter: 1132 text += self._peek 1133 else: 1134 text += self._char + self._peek 1135 1136 if self._current + 1 < self.size: 1137 self._advance(2) 1138 else: 1139 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1140 else: 1141 if self._chars(delim_size) == delimiter: 1142 if delim_size > 1: 1143 self._advance(delim_size - 1) 1144 break 1145 1146 if self._end: 1147 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1148 1149 current = self._current - 1 1150 self._advance(alnum=True) 1151 text += self.sql[current : self._current - 1] 1152 1153 return text
def
reset(self) -> None:
770 def reset(self) -> None: 771 self.sql = "" 772 self.size = 0 773 self.tokens: t.List[Token] = [] 774 self._start = 0 775 self._current = 0 776 self._line = 1 777 self._col = 0 778 self._comments: t.List[str] = [] 779 780 self._char = "" 781 self._end = False 782 self._peek = "" 783 self._prev_token_line = -1
785 def tokenize(self, sql: str) -> t.List[Token]: 786 """Returns a list of tokens corresponding to the SQL string `sql`.""" 787 self.reset() 788 self.sql = sql 789 self.size = len(sql) 790 791 try: 792 self._scan() 793 except Exception as e: 794 start = max(self._current - 50, 0) 795 end = min(self._current + 50, self.size - 1) 796 context = self.sql[start:end] 797 raise ValueError(f"Error tokenizing '{context}'") from e 798 799 return self.tokens
Returns a list of tokens corresponding to the SQL string sql
.