sqlglot.tokens
1from __future__ import annotations 2 3import typing as t 4from enum import auto 5 6from sqlglot.helper import AutoName 7from sqlglot.trie import in_trie, new_trie 8 9 10class TokenType(AutoName): 11 L_PAREN = auto() 12 R_PAREN = auto() 13 L_BRACKET = auto() 14 R_BRACKET = auto() 15 L_BRACE = auto() 16 R_BRACE = auto() 17 COMMA = auto() 18 DOT = auto() 19 DASH = auto() 20 PLUS = auto() 21 COLON = auto() 22 DCOLON = auto() 23 SEMICOLON = auto() 24 STAR = auto() 25 BACKSLASH = auto() 26 SLASH = auto() 27 LT = auto() 28 LTE = auto() 29 GT = auto() 30 GTE = auto() 31 NOT = auto() 32 EQ = auto() 33 NEQ = auto() 34 NULLSAFE_EQ = auto() 35 AND = auto() 36 OR = auto() 37 AMP = auto() 38 DPIPE = auto() 39 PIPE = auto() 40 CARET = auto() 41 TILDA = auto() 42 ARROW = auto() 43 DARROW = auto() 44 FARROW = auto() 45 HASH = auto() 46 HASH_ARROW = auto() 47 DHASH_ARROW = auto() 48 LR_ARROW = auto() 49 LT_AT = auto() 50 AT_GT = auto() 51 DOLLAR = auto() 52 PARAMETER = auto() 53 SESSION_PARAMETER = auto() 54 NATIONAL = auto() 55 DAMP = auto() 56 57 BLOCK_START = auto() 58 BLOCK_END = auto() 59 60 SPACE = auto() 61 BREAK = auto() 62 63 STRING = auto() 64 NUMBER = auto() 65 IDENTIFIER = auto() 66 DATABASE = auto() 67 COLUMN = auto() 68 COLUMN_DEF = auto() 69 SCHEMA = auto() 70 TABLE = auto() 71 VAR = auto() 72 BIT_STRING = auto() 73 HEX_STRING = auto() 74 BYTE_STRING = auto() 75 76 # types 77 BIT = auto() 78 BOOLEAN = auto() 79 TINYINT = auto() 80 UTINYINT = auto() 81 SMALLINT = auto() 82 USMALLINT = auto() 83 INT = auto() 84 UINT = auto() 85 BIGINT = auto() 86 UBIGINT = auto() 87 FLOAT = auto() 88 DOUBLE = auto() 89 DECIMAL = auto() 90 CHAR = auto() 91 NCHAR = auto() 92 VARCHAR = auto() 93 NVARCHAR = auto() 94 TEXT = auto() 95 MEDIUMTEXT = auto() 96 LONGTEXT = auto() 97 MEDIUMBLOB = auto() 98 LONGBLOB = auto() 99 BINARY = auto() 100 VARBINARY = auto() 101 JSON = auto() 102 JSONB = auto() 103 TIME = auto() 104 TIMESTAMP = auto() 105 TIMESTAMPTZ = auto() 106 TIMESTAMPLTZ = auto() 107 DATETIME = auto() 108 DATE = auto() 109 UUID = auto() 110 GEOGRAPHY = auto() 111 NULLABLE = auto() 112 GEOMETRY = auto() 113 HLLSKETCH = auto() 114 HSTORE = auto() 115 SUPER = auto() 116 SERIAL = auto() 117 SMALLSERIAL = auto() 118 BIGSERIAL = auto() 119 XML = auto() 120 UNIQUEIDENTIFIER = auto() 121 MONEY = auto() 122 SMALLMONEY = auto() 123 ROWVERSION = auto() 124 IMAGE = auto() 125 VARIANT = auto() 126 OBJECT = auto() 127 INET = auto() 128 129 # keywords 130 ALIAS = auto() 131 ALTER = auto() 132 ALWAYS = auto() 133 ALL = auto() 134 ANTI = auto() 135 ANY = auto() 136 APPLY = auto() 137 ARRAY = auto() 138 ASC = auto() 139 ASOF = auto() 140 AT_TIME_ZONE = auto() 141 AUTO_INCREMENT = auto() 142 BEGIN = auto() 143 BETWEEN = auto() 144 BOTH = auto() 145 BUCKET = auto() 146 BY_DEFAULT = auto() 147 CACHE = auto() 148 CASCADE = auto() 149 CASE = auto() 150 CHARACTER_SET = auto() 151 CLUSTER_BY = auto() 152 COLLATE = auto() 153 COMMAND = auto() 154 COMMENT = auto() 155 COMMIT = auto() 156 COMPOUND = auto() 157 CONSTRAINT = auto() 158 CREATE = auto() 159 CROSS = auto() 160 CUBE = auto() 161 CURRENT_DATE = auto() 162 CURRENT_DATETIME = auto() 163 CURRENT_ROW = auto() 164 CURRENT_TIME = auto() 165 CURRENT_TIMESTAMP = auto() 166 DEFAULT = auto() 167 DELETE = auto() 168 DESC = auto() 169 DESCRIBE = auto() 170 DISTINCT = auto() 171 DISTINCT_FROM = auto() 172 DISTRIBUTE_BY = auto() 173 DIV = auto() 174 DROP = auto() 175 ELSE = auto() 176 END = auto() 177 ESCAPE = auto() 178 EXCEPT = auto() 179 EXECUTE = auto() 180 EXISTS = auto() 181 FALSE = auto() 182 FETCH = auto() 183 FILTER = auto() 184 FINAL = auto() 185 FIRST = auto() 186 FOLLOWING = auto() 187 FOR = auto() 188 FOREIGN_KEY = auto() 189 FORMAT = auto() 190 FROM = auto() 191 FULL = auto() 192 FUNCTION = auto() 193 GLOB = auto() 194 GLOBAL = auto() 195 GROUP_BY = auto() 196 GROUPING_SETS = auto() 197 HAVING = auto() 198 HINT = auto() 199 IF = auto() 200 IGNORE_NULLS = auto() 201 ILIKE = auto() 202 ILIKE_ANY = auto() 203 IN = auto() 204 INDEX = auto() 205 INNER = auto() 206 INSERT = auto() 207 INTERSECT = auto() 208 INTERVAL = auto() 209 INTO = auto() 210 INTRODUCER = auto() 211 IRLIKE = auto() 212 IS = auto() 213 ISNULL = auto() 214 JOIN = auto() 215 JOIN_MARKER = auto() 216 LANGUAGE = auto() 217 LATERAL = auto() 218 LAZY = auto() 219 LEADING = auto() 220 LEFT = auto() 221 LIKE = auto() 222 LIKE_ANY = auto() 223 LIMIT = auto() 224 LOAD_DATA = auto() 225 LOCAL = auto() 226 MAP = auto() 227 MATCH_RECOGNIZE = auto() 228 MATERIALIZED = auto() 229 MERGE = auto() 230 MOD = auto() 231 NATURAL = auto() 232 NEXT = auto() 233 NO_ACTION = auto() 234 NOTNULL = auto() 235 NULL = auto() 236 NULLS_FIRST = auto() 237 NULLS_LAST = auto() 238 OFFSET = auto() 239 ON = auto() 240 ONLY = auto() 241 OPTIONS = auto() 242 ORDER_BY = auto() 243 ORDERED = auto() 244 ORDINALITY = auto() 245 OUTER = auto() 246 OUT_OF = auto() 247 OVER = auto() 248 OVERLAPS = auto() 249 OVERWRITE = auto() 250 PARTITION = auto() 251 PARTITION_BY = auto() 252 PERCENT = auto() 253 PIVOT = auto() 254 PLACEHOLDER = auto() 255 PRECEDING = auto() 256 PRIMARY_KEY = auto() 257 PROCEDURE = auto() 258 PROPERTIES = auto() 259 PSEUDO_TYPE = auto() 260 QUALIFY = auto() 261 QUOTE = auto() 262 RANGE = auto() 263 RECURSIVE = auto() 264 REPLACE = auto() 265 RESPECT_NULLS = auto() 266 RETURNING = auto() 267 REFERENCES = auto() 268 RIGHT = auto() 269 RLIKE = auto() 270 ROLLBACK = auto() 271 ROLLUP = auto() 272 ROW = auto() 273 ROWS = auto() 274 SEED = auto() 275 SELECT = auto() 276 SEMI = auto() 277 SEPARATOR = auto() 278 SERDE_PROPERTIES = auto() 279 SET = auto() 280 SHOW = auto() 281 SIMILAR_TO = auto() 282 SOME = auto() 283 SORTKEY = auto() 284 SORT_BY = auto() 285 STRUCT = auto() 286 TABLE_SAMPLE = auto() 287 TEMPORARY = auto() 288 TOP = auto() 289 THEN = auto() 290 TRAILING = auto() 291 TRUE = auto() 292 UNBOUNDED = auto() 293 UNCACHE = auto() 294 UNION = auto() 295 UNLOGGED = auto() 296 UNNEST = auto() 297 UNPIVOT = auto() 298 UPDATE = auto() 299 USE = auto() 300 USING = auto() 301 VALUES = auto() 302 VIEW = auto() 303 VOLATILE = auto() 304 WHEN = auto() 305 WHERE = auto() 306 WINDOW = auto() 307 WITH = auto() 308 WITH_TIME_ZONE = auto() 309 WITH_LOCAL_TIME_ZONE = auto() 310 WITHIN_GROUP = auto() 311 WITHOUT_TIME_ZONE = auto() 312 UNIQUE = auto() 313 314 315class Token: 316 __slots__ = ("token_type", "text", "line", "col", "comments") 317 318 @classmethod 319 def number(cls, number: int) -> Token: 320 """Returns a NUMBER token with `number` as its text.""" 321 return cls(TokenType.NUMBER, str(number)) 322 323 @classmethod 324 def string(cls, string: str) -> Token: 325 """Returns a STRING token with `string` as its text.""" 326 return cls(TokenType.STRING, string) 327 328 @classmethod 329 def identifier(cls, identifier: str) -> Token: 330 """Returns an IDENTIFIER token with `identifier` as its text.""" 331 return cls(TokenType.IDENTIFIER, identifier) 332 333 @classmethod 334 def var(cls, var: str) -> Token: 335 """Returns an VAR token with `var` as its text.""" 336 return cls(TokenType.VAR, var) 337 338 def __init__( 339 self, 340 token_type: TokenType, 341 text: str, 342 line: int = 1, 343 col: int = 1, 344 comments: t.List[str] = [], 345 ) -> None: 346 self.token_type = token_type 347 self.text = text 348 self.line = line 349 self.col = max(col - len(text), 1) 350 self.comments = comments 351 352 def __repr__(self) -> str: 353 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 354 return f"<Token {attributes}>" 355 356 357class _Tokenizer(type): 358 def __new__(cls, clsname, bases, attrs): # type: ignore 359 klass = super().__new__(cls, clsname, bases, attrs) 360 361 klass._QUOTES = { 362 f"{prefix}{s}": e 363 for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items() 364 for prefix in (("",) if s[0].isalpha() else ("", "n", "N")) 365 } 366 klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS) 367 klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS) 368 klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS) 369 klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS) 370 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 371 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 372 klass._COMMENTS = dict( 373 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 374 for comment in klass.COMMENTS 375 ) 376 377 klass.KEYWORD_TRIE = new_trie( 378 key.upper() 379 for key in { 380 **klass.KEYWORDS, 381 **{comment: TokenType.COMMENT for comment in klass._COMMENTS}, 382 **{quote: TokenType.QUOTE for quote in klass._QUOTES}, 383 **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS}, 384 **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS}, 385 **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS}, 386 } 387 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 388 ) 389 390 return klass 391 392 @staticmethod 393 def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 394 return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list) 395 396 397class Tokenizer(metaclass=_Tokenizer): 398 SINGLE_TOKENS = { 399 "(": TokenType.L_PAREN, 400 ")": TokenType.R_PAREN, 401 "[": TokenType.L_BRACKET, 402 "]": TokenType.R_BRACKET, 403 "{": TokenType.L_BRACE, 404 "}": TokenType.R_BRACE, 405 "&": TokenType.AMP, 406 "^": TokenType.CARET, 407 ":": TokenType.COLON, 408 ",": TokenType.COMMA, 409 ".": TokenType.DOT, 410 "-": TokenType.DASH, 411 "=": TokenType.EQ, 412 ">": TokenType.GT, 413 "<": TokenType.LT, 414 "%": TokenType.MOD, 415 "!": TokenType.NOT, 416 "|": TokenType.PIPE, 417 "+": TokenType.PLUS, 418 ";": TokenType.SEMICOLON, 419 "/": TokenType.SLASH, 420 "\\": TokenType.BACKSLASH, 421 "*": TokenType.STAR, 422 "~": TokenType.TILDA, 423 "?": TokenType.PLACEHOLDER, 424 "@": TokenType.PARAMETER, 425 # used for breaking a var like x'y' but nothing else 426 # the token type doesn't matter 427 "'": TokenType.QUOTE, 428 "`": TokenType.IDENTIFIER, 429 '"': TokenType.IDENTIFIER, 430 "#": TokenType.HASH, 431 } 432 433 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 434 435 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 436 437 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 438 439 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 440 441 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 442 443 STRING_ESCAPES = ["'"] 444 445 _STRING_ESCAPES: t.Set[str] = set() 446 447 IDENTIFIER_ESCAPES = ['"'] 448 449 _IDENTIFIER_ESCAPES: t.Set[str] = set() 450 451 KEYWORDS = { 452 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 453 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 454 "{{+": TokenType.BLOCK_START, 455 "{{-": TokenType.BLOCK_START, 456 "+}}": TokenType.BLOCK_END, 457 "-}}": TokenType.BLOCK_END, 458 "/*+": TokenType.HINT, 459 "==": TokenType.EQ, 460 "::": TokenType.DCOLON, 461 "||": TokenType.DPIPE, 462 ">=": TokenType.GTE, 463 "<=": TokenType.LTE, 464 "<>": TokenType.NEQ, 465 "!=": TokenType.NEQ, 466 "<=>": TokenType.NULLSAFE_EQ, 467 "->": TokenType.ARROW, 468 "->>": TokenType.DARROW, 469 "=>": TokenType.FARROW, 470 "#>": TokenType.HASH_ARROW, 471 "#>>": TokenType.DHASH_ARROW, 472 "<->": TokenType.LR_ARROW, 473 "&&": TokenType.DAMP, 474 "ALL": TokenType.ALL, 475 "ALWAYS": TokenType.ALWAYS, 476 "AND": TokenType.AND, 477 "ANTI": TokenType.ANTI, 478 "ANY": TokenType.ANY, 479 "ASC": TokenType.ASC, 480 "AS": TokenType.ALIAS, 481 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 482 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 483 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 484 "BEGIN": TokenType.BEGIN, 485 "BETWEEN": TokenType.BETWEEN, 486 "BOTH": TokenType.BOTH, 487 "BUCKET": TokenType.BUCKET, 488 "BY DEFAULT": TokenType.BY_DEFAULT, 489 "CACHE": TokenType.CACHE, 490 "UNCACHE": TokenType.UNCACHE, 491 "CASE": TokenType.CASE, 492 "CASCADE": TokenType.CASCADE, 493 "CHARACTER SET": TokenType.CHARACTER_SET, 494 "CLUSTER BY": TokenType.CLUSTER_BY, 495 "COLLATE": TokenType.COLLATE, 496 "COLUMN": TokenType.COLUMN, 497 "COMMIT": TokenType.COMMIT, 498 "COMPOUND": TokenType.COMPOUND, 499 "CONSTRAINT": TokenType.CONSTRAINT, 500 "CREATE": TokenType.CREATE, 501 "CROSS": TokenType.CROSS, 502 "CUBE": TokenType.CUBE, 503 "CURRENT_DATE": TokenType.CURRENT_DATE, 504 "CURRENT ROW": TokenType.CURRENT_ROW, 505 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 506 "DATABASE": TokenType.DATABASE, 507 "DEFAULT": TokenType.DEFAULT, 508 "DELETE": TokenType.DELETE, 509 "DESC": TokenType.DESC, 510 "DESCRIBE": TokenType.DESCRIBE, 511 "DISTINCT": TokenType.DISTINCT, 512 "DISTINCT FROM": TokenType.DISTINCT_FROM, 513 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 514 "DIV": TokenType.DIV, 515 "DROP": TokenType.DROP, 516 "ELSE": TokenType.ELSE, 517 "END": TokenType.END, 518 "ESCAPE": TokenType.ESCAPE, 519 "EXCEPT": TokenType.EXCEPT, 520 "EXECUTE": TokenType.EXECUTE, 521 "EXISTS": TokenType.EXISTS, 522 "FALSE": TokenType.FALSE, 523 "FETCH": TokenType.FETCH, 524 "FILTER": TokenType.FILTER, 525 "FIRST": TokenType.FIRST, 526 "FULL": TokenType.FULL, 527 "FUNCTION": TokenType.FUNCTION, 528 "FOLLOWING": TokenType.FOLLOWING, 529 "FOR": TokenType.FOR, 530 "FOREIGN KEY": TokenType.FOREIGN_KEY, 531 "FORMAT": TokenType.FORMAT, 532 "FROM": TokenType.FROM, 533 "GLOB": TokenType.GLOB, 534 "GROUP BY": TokenType.GROUP_BY, 535 "GROUPING SETS": TokenType.GROUPING_SETS, 536 "HAVING": TokenType.HAVING, 537 "IF": TokenType.IF, 538 "ILIKE": TokenType.ILIKE, 539 "IGNORE NULLS": TokenType.IGNORE_NULLS, 540 "IN": TokenType.IN, 541 "INDEX": TokenType.INDEX, 542 "INET": TokenType.INET, 543 "INNER": TokenType.INNER, 544 "INSERT": TokenType.INSERT, 545 "INTERVAL": TokenType.INTERVAL, 546 "INTERSECT": TokenType.INTERSECT, 547 "INTO": TokenType.INTO, 548 "IS": TokenType.IS, 549 "ISNULL": TokenType.ISNULL, 550 "JOIN": TokenType.JOIN, 551 "LATERAL": TokenType.LATERAL, 552 "LAZY": TokenType.LAZY, 553 "LEADING": TokenType.LEADING, 554 "LEFT": TokenType.LEFT, 555 "LIKE": TokenType.LIKE, 556 "LIMIT": TokenType.LIMIT, 557 "LOAD DATA": TokenType.LOAD_DATA, 558 "LOCAL": TokenType.LOCAL, 559 "MATERIALIZED": TokenType.MATERIALIZED, 560 "MERGE": TokenType.MERGE, 561 "NATURAL": TokenType.NATURAL, 562 "NEXT": TokenType.NEXT, 563 "NO ACTION": TokenType.NO_ACTION, 564 "NOT": TokenType.NOT, 565 "NOTNULL": TokenType.NOTNULL, 566 "NULL": TokenType.NULL, 567 "NULLS FIRST": TokenType.NULLS_FIRST, 568 "NULLS LAST": TokenType.NULLS_LAST, 569 "OBJECT": TokenType.OBJECT, 570 "OFFSET": TokenType.OFFSET, 571 "ON": TokenType.ON, 572 "ONLY": TokenType.ONLY, 573 "OPTIONS": TokenType.OPTIONS, 574 "OR": TokenType.OR, 575 "ORDER BY": TokenType.ORDER_BY, 576 "ORDINALITY": TokenType.ORDINALITY, 577 "OUTER": TokenType.OUTER, 578 "OUT OF": TokenType.OUT_OF, 579 "OVER": TokenType.OVER, 580 "OVERLAPS": TokenType.OVERLAPS, 581 "OVERWRITE": TokenType.OVERWRITE, 582 "PARTITION": TokenType.PARTITION, 583 "PARTITION BY": TokenType.PARTITION_BY, 584 "PARTITIONED BY": TokenType.PARTITION_BY, 585 "PARTITIONED_BY": TokenType.PARTITION_BY, 586 "PERCENT": TokenType.PERCENT, 587 "PIVOT": TokenType.PIVOT, 588 "PRECEDING": TokenType.PRECEDING, 589 "PRIMARY KEY": TokenType.PRIMARY_KEY, 590 "PROCEDURE": TokenType.PROCEDURE, 591 "QUALIFY": TokenType.QUALIFY, 592 "RANGE": TokenType.RANGE, 593 "RECURSIVE": TokenType.RECURSIVE, 594 "REGEXP": TokenType.RLIKE, 595 "REPLACE": TokenType.REPLACE, 596 "RESPECT NULLS": TokenType.RESPECT_NULLS, 597 "REFERENCES": TokenType.REFERENCES, 598 "RIGHT": TokenType.RIGHT, 599 "RLIKE": TokenType.RLIKE, 600 "ROLLBACK": TokenType.ROLLBACK, 601 "ROLLUP": TokenType.ROLLUP, 602 "ROW": TokenType.ROW, 603 "ROWS": TokenType.ROWS, 604 "SCHEMA": TokenType.SCHEMA, 605 "SEED": TokenType.SEED, 606 "SELECT": TokenType.SELECT, 607 "SEMI": TokenType.SEMI, 608 "SET": TokenType.SET, 609 "SHOW": TokenType.SHOW, 610 "SIMILAR TO": TokenType.SIMILAR_TO, 611 "SOME": TokenType.SOME, 612 "SORTKEY": TokenType.SORTKEY, 613 "SORT BY": TokenType.SORT_BY, 614 "TABLE": TokenType.TABLE, 615 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 616 "TEMP": TokenType.TEMPORARY, 617 "TEMPORARY": TokenType.TEMPORARY, 618 "THEN": TokenType.THEN, 619 "TRUE": TokenType.TRUE, 620 "TRAILING": TokenType.TRAILING, 621 "UNBOUNDED": TokenType.UNBOUNDED, 622 "UNION": TokenType.UNION, 623 "UNLOGGED": TokenType.UNLOGGED, 624 "UNNEST": TokenType.UNNEST, 625 "UNPIVOT": TokenType.UNPIVOT, 626 "UPDATE": TokenType.UPDATE, 627 "USE": TokenType.USE, 628 "USING": TokenType.USING, 629 "VALUES": TokenType.VALUES, 630 "VIEW": TokenType.VIEW, 631 "VOLATILE": TokenType.VOLATILE, 632 "WHEN": TokenType.WHEN, 633 "WHERE": TokenType.WHERE, 634 "WINDOW": TokenType.WINDOW, 635 "WITH": TokenType.WITH, 636 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 637 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 638 "WITHIN GROUP": TokenType.WITHIN_GROUP, 639 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 640 "APPLY": TokenType.APPLY, 641 "ARRAY": TokenType.ARRAY, 642 "BIT": TokenType.BIT, 643 "BOOL": TokenType.BOOLEAN, 644 "BOOLEAN": TokenType.BOOLEAN, 645 "BYTE": TokenType.TINYINT, 646 "TINYINT": TokenType.TINYINT, 647 "SHORT": TokenType.SMALLINT, 648 "SMALLINT": TokenType.SMALLINT, 649 "INT2": TokenType.SMALLINT, 650 "INTEGER": TokenType.INT, 651 "INT": TokenType.INT, 652 "INT4": TokenType.INT, 653 "LONG": TokenType.BIGINT, 654 "BIGINT": TokenType.BIGINT, 655 "INT8": TokenType.BIGINT, 656 "DECIMAL": TokenType.DECIMAL, 657 "MAP": TokenType.MAP, 658 "NULLABLE": TokenType.NULLABLE, 659 "NUMBER": TokenType.DECIMAL, 660 "NUMERIC": TokenType.DECIMAL, 661 "FIXED": TokenType.DECIMAL, 662 "REAL": TokenType.FLOAT, 663 "FLOAT": TokenType.FLOAT, 664 "FLOAT4": TokenType.FLOAT, 665 "FLOAT8": TokenType.DOUBLE, 666 "DOUBLE": TokenType.DOUBLE, 667 "DOUBLE PRECISION": TokenType.DOUBLE, 668 "JSON": TokenType.JSON, 669 "CHAR": TokenType.CHAR, 670 "CHARACTER": TokenType.CHAR, 671 "NCHAR": TokenType.NCHAR, 672 "VARCHAR": TokenType.VARCHAR, 673 "VARCHAR2": TokenType.VARCHAR, 674 "NVARCHAR": TokenType.NVARCHAR, 675 "NVARCHAR2": TokenType.NVARCHAR, 676 "STR": TokenType.TEXT, 677 "STRING": TokenType.TEXT, 678 "TEXT": TokenType.TEXT, 679 "CLOB": TokenType.TEXT, 680 "LONGVARCHAR": TokenType.TEXT, 681 "BINARY": TokenType.BINARY, 682 "BLOB": TokenType.VARBINARY, 683 "BYTEA": TokenType.VARBINARY, 684 "VARBINARY": TokenType.VARBINARY, 685 "TIME": TokenType.TIME, 686 "TIMESTAMP": TokenType.TIMESTAMP, 687 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 688 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 689 "DATE": TokenType.DATE, 690 "DATETIME": TokenType.DATETIME, 691 "UNIQUE": TokenType.UNIQUE, 692 "STRUCT": TokenType.STRUCT, 693 "VARIANT": TokenType.VARIANT, 694 "ALTER": TokenType.ALTER, 695 "ALTER AGGREGATE": TokenType.COMMAND, 696 "ALTER DEFAULT": TokenType.COMMAND, 697 "ALTER DOMAIN": TokenType.COMMAND, 698 "ALTER ROLE": TokenType.COMMAND, 699 "ALTER RULE": TokenType.COMMAND, 700 "ALTER SEQUENCE": TokenType.COMMAND, 701 "ALTER TYPE": TokenType.COMMAND, 702 "ALTER USER": TokenType.COMMAND, 703 "ALTER VIEW": TokenType.COMMAND, 704 "ANALYZE": TokenType.COMMAND, 705 "CALL": TokenType.COMMAND, 706 "COMMENT": TokenType.COMMENT, 707 "COPY": TokenType.COMMAND, 708 "EXPLAIN": TokenType.COMMAND, 709 "GRANT": TokenType.COMMAND, 710 "OPTIMIZE": TokenType.COMMAND, 711 "PREPARE": TokenType.COMMAND, 712 "TRUNCATE": TokenType.COMMAND, 713 "VACUUM": TokenType.COMMAND, 714 } 715 716 WHITE_SPACE: t.Dict[str, TokenType] = { 717 " ": TokenType.SPACE, 718 "\t": TokenType.SPACE, 719 "\n": TokenType.BREAK, 720 "\r": TokenType.BREAK, 721 "\r\n": TokenType.BREAK, 722 } 723 724 COMMANDS = { 725 TokenType.COMMAND, 726 TokenType.EXECUTE, 727 TokenType.FETCH, 728 TokenType.SET, 729 TokenType.SHOW, 730 } 731 732 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 733 734 # handle numeric literals like in hive (3L = BIGINT) 735 NUMERIC_LITERALS: t.Dict[str, str] = {} 736 ENCODE: t.Optional[str] = None 737 738 COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] 739 KEYWORD_TRIE = None # autofilled 740 741 IDENTIFIER_CAN_START_WITH_DIGIT = False 742 743 __slots__ = ( 744 "sql", 745 "size", 746 "tokens", 747 "_start", 748 "_current", 749 "_line", 750 "_col", 751 "_comments", 752 "_char", 753 "_end", 754 "_peek", 755 "_prev_token_line", 756 "_prev_token_comments", 757 "_prev_token_type", 758 ) 759 760 def __init__(self) -> None: 761 self.reset() 762 763 def reset(self) -> None: 764 self.sql = "" 765 self.size = 0 766 self.tokens: t.List[Token] = [] 767 self._start = 0 768 self._current = 0 769 self._line = 1 770 self._col = 1 771 self._comments: t.List[str] = [] 772 773 self._char = None 774 self._end = None 775 self._peek = None 776 self._prev_token_line = -1 777 self._prev_token_comments: t.List[str] = [] 778 self._prev_token_type = None 779 780 def tokenize(self, sql: str) -> t.List[Token]: 781 """Returns a list of tokens corresponding to the SQL string `sql`.""" 782 self.reset() 783 self.sql = sql 784 self.size = len(sql) 785 self._scan() 786 return self.tokens 787 788 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 789 while self.size and not self._end: 790 self._start = self._current 791 self._advance() 792 793 if self._char is None: 794 break 795 796 if self._char not in self.WHITE_SPACE: 797 if self._char.isdigit(): 798 self._scan_number() 799 elif self._char in self._IDENTIFIERS: 800 self._scan_identifier(self._IDENTIFIERS[self._char]) 801 else: 802 self._scan_keywords() 803 804 if until and until(): 805 break 806 807 def _chars(self, size: int) -> str: 808 if size == 1: 809 return self._char # type: ignore 810 start = self._current - 1 811 end = start + size 812 if end <= self.size: 813 return self.sql[start:end] 814 return "" 815 816 def _line_break(self, char: t.Optional[str]) -> bool: 817 return self.WHITE_SPACE.get(char) == TokenType.BREAK # type: ignore 818 819 def _advance(self, i: int = 1) -> None: 820 if self._line_break(self._char): 821 self._set_new_line() 822 823 self._col += i 824 self._current += i 825 self._end = self._current >= self.size # type: ignore 826 self._char = self.sql[self._current - 1] # type: ignore 827 self._peek = self.sql[self._current] if self._current < self.size else "" # type: ignore 828 829 def _set_new_line(self) -> None: 830 self._col = 1 831 self._line += 1 832 833 @property 834 def _text(self) -> str: 835 return self.sql[self._start : self._current] 836 837 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 838 self._prev_token_line = self._line 839 self._prev_token_comments = self._comments 840 self._prev_token_type = token_type # type: ignore 841 self.tokens.append( 842 Token( 843 token_type, 844 self._text if text is None else text, 845 self._line, 846 self._col, 847 self._comments, 848 ) 849 ) 850 self._comments = [] 851 852 # If we have either a semicolon or a begin token before the command's token, we'll parse 853 # whatever follows the command's token as a string 854 if token_type in self.COMMANDS and ( 855 len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS 856 ): 857 start = self._current 858 tokens = len(self.tokens) 859 self._scan(lambda: self._peek == ";") 860 self.tokens = self.tokens[:tokens] 861 text = self.sql[start : self._current].strip() 862 if text: 863 self._add(TokenType.STRING, text) 864 865 def _scan_keywords(self) -> None: 866 size = 0 867 word = None 868 chars = self._text 869 char = chars 870 prev_space = False 871 skip = False 872 trie = self.KEYWORD_TRIE 873 single_token = char in self.SINGLE_TOKENS 874 875 while chars: 876 if skip: 877 result = 1 878 else: 879 result, trie = in_trie(trie, char.upper()) # type: ignore 880 881 if result == 0: 882 break 883 if result == 2: 884 word = chars 885 size += 1 886 end = self._current - 1 + size 887 888 if end < self.size: 889 char = self.sql[end] 890 single_token = single_token or char in self.SINGLE_TOKENS 891 is_space = char in self.WHITE_SPACE 892 893 if not is_space or not prev_space: 894 if is_space: 895 char = " " 896 chars += char 897 prev_space = is_space 898 skip = False 899 else: 900 skip = True 901 else: 902 chars = " " 903 904 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 905 906 if not word: 907 if self._char in self.SINGLE_TOKENS: 908 self._add(self.SINGLE_TOKENS[self._char]) # type: ignore 909 return 910 self._scan_var() 911 return 912 913 if self._scan_string(word): 914 return 915 if self._scan_formatted_string(word): 916 return 917 if self._scan_comment(word): 918 return 919 920 self._advance(size - 1) 921 self._add(self.KEYWORDS[word.upper()]) 922 923 def _scan_comment(self, comment_start: str) -> bool: 924 if comment_start not in self._COMMENTS: # type: ignore 925 return False 926 927 comment_start_line = self._line 928 comment_start_size = len(comment_start) 929 comment_end = self._COMMENTS[comment_start] # type: ignore 930 931 if comment_end: 932 comment_end_size = len(comment_end) 933 934 while not self._end and self._chars(comment_end_size) != comment_end: 935 self._advance() 936 937 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) # type: ignore 938 self._advance(comment_end_size - 1) 939 else: 940 while not self._end and not self._line_break(self._peek): 941 self._advance() 942 self._comments.append(self._text[comment_start_size:]) # type: ignore 943 944 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 945 # Multiple consecutive comments are preserved by appending them to the current comments list. 946 if comment_start_line == self._prev_token_line: 947 self.tokens[-1].comments.extend(self._comments) 948 self._comments = [] 949 self._prev_token_line = self._line 950 951 return True 952 953 def _scan_number(self) -> None: 954 if self._char == "0": 955 peek = self._peek.upper() # type: ignore 956 if peek == "B": 957 return self._scan_bits() 958 elif peek == "X": 959 return self._scan_hex() 960 961 decimal = False 962 scientific = 0 963 964 while True: 965 if self._peek.isdigit(): # type: ignore 966 self._advance() 967 elif self._peek == "." and not decimal: 968 decimal = True 969 self._advance() 970 elif self._peek in ("-", "+") and scientific == 1: 971 scientific += 1 972 self._advance() 973 elif self._peek.upper() == "E" and not scientific: # type: ignore 974 scientific += 1 975 self._advance() 976 elif self._peek.isidentifier(): # type: ignore 977 number_text = self._text 978 literal = [] 979 980 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: # type: ignore 981 literal.append(self._peek.upper()) # type: ignore 982 self._advance() 983 984 literal = "".join(literal) # type: ignore 985 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) # type: ignore 986 987 if token_type: 988 self._add(TokenType.NUMBER, number_text) 989 self._add(TokenType.DCOLON, "::") 990 return self._add(token_type, literal) # type: ignore 991 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 992 return self._add(TokenType.VAR) 993 994 self._add(TokenType.NUMBER, number_text) 995 return self._advance(-len(literal)) 996 else: 997 return self._add(TokenType.NUMBER) 998 999 def _scan_bits(self) -> None: 1000 self._advance() 1001 value = self._extract_value() 1002 try: 1003 self._add(TokenType.BIT_STRING, f"{int(value, 2)}") 1004 except ValueError: 1005 self._add(TokenType.IDENTIFIER) 1006 1007 def _scan_hex(self) -> None: 1008 self._advance() 1009 value = self._extract_value() 1010 try: 1011 self._add(TokenType.HEX_STRING, f"{int(value, 16)}") 1012 except ValueError: 1013 self._add(TokenType.IDENTIFIER) 1014 1015 def _extract_value(self) -> str: 1016 while True: 1017 char = self._peek.strip() # type: ignore 1018 if char and char not in self.SINGLE_TOKENS: 1019 self._advance() 1020 else: 1021 break 1022 1023 return self._text 1024 1025 def _scan_string(self, quote: str) -> bool: 1026 quote_end = self._QUOTES.get(quote) # type: ignore 1027 if quote_end is None: 1028 return False 1029 1030 self._advance(len(quote)) 1031 text = self._extract_string(quote_end) 1032 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text # type: ignore 1033 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1034 return True 1035 1036 # X'1234, b'0110', E'\\\\\' etc. 1037 def _scan_formatted_string(self, string_start: str) -> bool: 1038 if string_start in self._HEX_STRINGS: # type: ignore 1039 delimiters = self._HEX_STRINGS # type: ignore 1040 token_type = TokenType.HEX_STRING 1041 base = 16 1042 elif string_start in self._BIT_STRINGS: # type: ignore 1043 delimiters = self._BIT_STRINGS # type: ignore 1044 token_type = TokenType.BIT_STRING 1045 base = 2 1046 elif string_start in self._BYTE_STRINGS: # type: ignore 1047 delimiters = self._BYTE_STRINGS # type: ignore 1048 token_type = TokenType.BYTE_STRING 1049 base = None 1050 else: 1051 return False 1052 1053 self._advance(len(string_start)) 1054 string_end = delimiters.get(string_start) 1055 text = self._extract_string(string_end) 1056 1057 if base is None: 1058 self._add(token_type, text) 1059 else: 1060 try: 1061 self._add(token_type, f"{int(text, base)}") 1062 except: 1063 raise RuntimeError( 1064 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1065 ) 1066 1067 return True 1068 1069 def _scan_identifier(self, identifier_end: str) -> None: 1070 text = "" 1071 identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES 1072 1073 while True: 1074 if self._end: 1075 raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}") 1076 1077 self._advance() 1078 if self._char == identifier_end: 1079 if identifier_end_is_escape and self._peek == identifier_end: 1080 text += identifier_end # type: ignore 1081 self._advance() 1082 continue 1083 1084 break 1085 1086 text += self._char # type: ignore 1087 1088 self._add(TokenType.IDENTIFIER, text) 1089 1090 def _scan_var(self) -> None: 1091 while True: 1092 char = self._peek.strip() # type: ignore 1093 if char and char not in self.SINGLE_TOKENS: 1094 self._advance() 1095 else: 1096 break 1097 self._add( 1098 TokenType.VAR 1099 if self._prev_token_type == TokenType.PARAMETER 1100 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1101 ) 1102 1103 def _extract_string(self, delimiter: str) -> str: 1104 text = "" 1105 delim_size = len(delimiter) 1106 1107 while True: 1108 if self._char in self._STRING_ESCAPES and ( 1109 self._peek == delimiter or self._peek in self._STRING_ESCAPES 1110 ): 1111 if self._peek == delimiter: 1112 text += self._peek # type: ignore 1113 else: 1114 text += self._char + self._peek # type: ignore 1115 1116 if self._current + 1 < self.size: 1117 self._advance(2) 1118 else: 1119 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1120 else: 1121 if self._chars(delim_size) == delimiter: 1122 if delim_size > 1: 1123 self._advance(delim_size - 1) 1124 break 1125 1126 if self._end: 1127 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1128 text += self._char # type: ignore 1129 self._advance() 1130 1131 return text
11class TokenType(AutoName): 12 L_PAREN = auto() 13 R_PAREN = auto() 14 L_BRACKET = auto() 15 R_BRACKET = auto() 16 L_BRACE = auto() 17 R_BRACE = auto() 18 COMMA = auto() 19 DOT = auto() 20 DASH = auto() 21 PLUS = auto() 22 COLON = auto() 23 DCOLON = auto() 24 SEMICOLON = auto() 25 STAR = auto() 26 BACKSLASH = auto() 27 SLASH = auto() 28 LT = auto() 29 LTE = auto() 30 GT = auto() 31 GTE = auto() 32 NOT = auto() 33 EQ = auto() 34 NEQ = auto() 35 NULLSAFE_EQ = auto() 36 AND = auto() 37 OR = auto() 38 AMP = auto() 39 DPIPE = auto() 40 PIPE = auto() 41 CARET = auto() 42 TILDA = auto() 43 ARROW = auto() 44 DARROW = auto() 45 FARROW = auto() 46 HASH = auto() 47 HASH_ARROW = auto() 48 DHASH_ARROW = auto() 49 LR_ARROW = auto() 50 LT_AT = auto() 51 AT_GT = auto() 52 DOLLAR = auto() 53 PARAMETER = auto() 54 SESSION_PARAMETER = auto() 55 NATIONAL = auto() 56 DAMP = auto() 57 58 BLOCK_START = auto() 59 BLOCK_END = auto() 60 61 SPACE = auto() 62 BREAK = auto() 63 64 STRING = auto() 65 NUMBER = auto() 66 IDENTIFIER = auto() 67 DATABASE = auto() 68 COLUMN = auto() 69 COLUMN_DEF = auto() 70 SCHEMA = auto() 71 TABLE = auto() 72 VAR = auto() 73 BIT_STRING = auto() 74 HEX_STRING = auto() 75 BYTE_STRING = auto() 76 77 # types 78 BIT = auto() 79 BOOLEAN = auto() 80 TINYINT = auto() 81 UTINYINT = auto() 82 SMALLINT = auto() 83 USMALLINT = auto() 84 INT = auto() 85 UINT = auto() 86 BIGINT = auto() 87 UBIGINT = auto() 88 FLOAT = auto() 89 DOUBLE = auto() 90 DECIMAL = auto() 91 CHAR = auto() 92 NCHAR = auto() 93 VARCHAR = auto() 94 NVARCHAR = auto() 95 TEXT = auto() 96 MEDIUMTEXT = auto() 97 LONGTEXT = auto() 98 MEDIUMBLOB = auto() 99 LONGBLOB = auto() 100 BINARY = auto() 101 VARBINARY = auto() 102 JSON = auto() 103 JSONB = auto() 104 TIME = auto() 105 TIMESTAMP = auto() 106 TIMESTAMPTZ = auto() 107 TIMESTAMPLTZ = auto() 108 DATETIME = auto() 109 DATE = auto() 110 UUID = auto() 111 GEOGRAPHY = auto() 112 NULLABLE = auto() 113 GEOMETRY = auto() 114 HLLSKETCH = auto() 115 HSTORE = auto() 116 SUPER = auto() 117 SERIAL = auto() 118 SMALLSERIAL = auto() 119 BIGSERIAL = auto() 120 XML = auto() 121 UNIQUEIDENTIFIER = auto() 122 MONEY = auto() 123 SMALLMONEY = auto() 124 ROWVERSION = auto() 125 IMAGE = auto() 126 VARIANT = auto() 127 OBJECT = auto() 128 INET = auto() 129 130 # keywords 131 ALIAS = auto() 132 ALTER = auto() 133 ALWAYS = auto() 134 ALL = auto() 135 ANTI = auto() 136 ANY = auto() 137 APPLY = auto() 138 ARRAY = auto() 139 ASC = auto() 140 ASOF = auto() 141 AT_TIME_ZONE = auto() 142 AUTO_INCREMENT = auto() 143 BEGIN = auto() 144 BETWEEN = auto() 145 BOTH = auto() 146 BUCKET = auto() 147 BY_DEFAULT = auto() 148 CACHE = auto() 149 CASCADE = auto() 150 CASE = auto() 151 CHARACTER_SET = auto() 152 CLUSTER_BY = auto() 153 COLLATE = auto() 154 COMMAND = auto() 155 COMMENT = auto() 156 COMMIT = auto() 157 COMPOUND = auto() 158 CONSTRAINT = auto() 159 CREATE = auto() 160 CROSS = auto() 161 CUBE = auto() 162 CURRENT_DATE = auto() 163 CURRENT_DATETIME = auto() 164 CURRENT_ROW = auto() 165 CURRENT_TIME = auto() 166 CURRENT_TIMESTAMP = auto() 167 DEFAULT = auto() 168 DELETE = auto() 169 DESC = auto() 170 DESCRIBE = auto() 171 DISTINCT = auto() 172 DISTINCT_FROM = auto() 173 DISTRIBUTE_BY = auto() 174 DIV = auto() 175 DROP = auto() 176 ELSE = auto() 177 END = auto() 178 ESCAPE = auto() 179 EXCEPT = auto() 180 EXECUTE = auto() 181 EXISTS = auto() 182 FALSE = auto() 183 FETCH = auto() 184 FILTER = auto() 185 FINAL = auto() 186 FIRST = auto() 187 FOLLOWING = auto() 188 FOR = auto() 189 FOREIGN_KEY = auto() 190 FORMAT = auto() 191 FROM = auto() 192 FULL = auto() 193 FUNCTION = auto() 194 GLOB = auto() 195 GLOBAL = auto() 196 GROUP_BY = auto() 197 GROUPING_SETS = auto() 198 HAVING = auto() 199 HINT = auto() 200 IF = auto() 201 IGNORE_NULLS = auto() 202 ILIKE = auto() 203 ILIKE_ANY = auto() 204 IN = auto() 205 INDEX = auto() 206 INNER = auto() 207 INSERT = auto() 208 INTERSECT = auto() 209 INTERVAL = auto() 210 INTO = auto() 211 INTRODUCER = auto() 212 IRLIKE = auto() 213 IS = auto() 214 ISNULL = auto() 215 JOIN = auto() 216 JOIN_MARKER = auto() 217 LANGUAGE = auto() 218 LATERAL = auto() 219 LAZY = auto() 220 LEADING = auto() 221 LEFT = auto() 222 LIKE = auto() 223 LIKE_ANY = auto() 224 LIMIT = auto() 225 LOAD_DATA = auto() 226 LOCAL = auto() 227 MAP = auto() 228 MATCH_RECOGNIZE = auto() 229 MATERIALIZED = auto() 230 MERGE = auto() 231 MOD = auto() 232 NATURAL = auto() 233 NEXT = auto() 234 NO_ACTION = auto() 235 NOTNULL = auto() 236 NULL = auto() 237 NULLS_FIRST = auto() 238 NULLS_LAST = auto() 239 OFFSET = auto() 240 ON = auto() 241 ONLY = auto() 242 OPTIONS = auto() 243 ORDER_BY = auto() 244 ORDERED = auto() 245 ORDINALITY = auto() 246 OUTER = auto() 247 OUT_OF = auto() 248 OVER = auto() 249 OVERLAPS = auto() 250 OVERWRITE = auto() 251 PARTITION = auto() 252 PARTITION_BY = auto() 253 PERCENT = auto() 254 PIVOT = auto() 255 PLACEHOLDER = auto() 256 PRECEDING = auto() 257 PRIMARY_KEY = auto() 258 PROCEDURE = auto() 259 PROPERTIES = auto() 260 PSEUDO_TYPE = auto() 261 QUALIFY = auto() 262 QUOTE = auto() 263 RANGE = auto() 264 RECURSIVE = auto() 265 REPLACE = auto() 266 RESPECT_NULLS = auto() 267 RETURNING = auto() 268 REFERENCES = auto() 269 RIGHT = auto() 270 RLIKE = auto() 271 ROLLBACK = auto() 272 ROLLUP = auto() 273 ROW = auto() 274 ROWS = auto() 275 SEED = auto() 276 SELECT = auto() 277 SEMI = auto() 278 SEPARATOR = auto() 279 SERDE_PROPERTIES = auto() 280 SET = auto() 281 SHOW = auto() 282 SIMILAR_TO = auto() 283 SOME = auto() 284 SORTKEY = auto() 285 SORT_BY = auto() 286 STRUCT = auto() 287 TABLE_SAMPLE = auto() 288 TEMPORARY = auto() 289 TOP = auto() 290 THEN = auto() 291 TRAILING = auto() 292 TRUE = auto() 293 UNBOUNDED = auto() 294 UNCACHE = auto() 295 UNION = auto() 296 UNLOGGED = auto() 297 UNNEST = auto() 298 UNPIVOT = auto() 299 UPDATE = auto() 300 USE = auto() 301 USING = auto() 302 VALUES = auto() 303 VIEW = auto() 304 VOLATILE = auto() 305 WHEN = auto() 306 WHERE = auto() 307 WINDOW = auto() 308 WITH = auto() 309 WITH_TIME_ZONE = auto() 310 WITH_LOCAL_TIME_ZONE = auto() 311 WITHIN_GROUP = auto() 312 WITHOUT_TIME_ZONE = auto() 313 UNIQUE = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE =
<TokenType.PIPE: 'PIPE'>
CARET =
<TokenType.CARET: 'CARET'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL =
<TokenType.NATIONAL: 'NATIONAL'>
DAMP =
<TokenType.DAMP: 'DAMP'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATE =
<TokenType.DATE: 'DATE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
UNIQUEIDENTIFIER =
<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALWAYS =
<TokenType.ALWAYS: 'ALWAYS'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE =
<TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
BOTH =
<TokenType.BOTH: 'BOTH'>
BUCKET =
<TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT =
<TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASCADE =
<TokenType.CASCADE: 'CASCADE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
COMPOUND =
<TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW =
<TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM =
<TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOLLOWING =
<TokenType.FOLLOWING: 'FOLLOWING'>
FOR =
<TokenType.FOR: 'FOR'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IF =
<TokenType.IF: 'IF'>
IGNORE_NULLS =
<TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY =
<TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LAZY =
<TokenType.LAZY: 'LAZY'>
LEADING =
<TokenType.LEADING: 'LEADING'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIKE_ANY =
<TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LOAD_DATA =
<TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL =
<TokenType.LOCAL: 'LOCAL'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED =
<TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NO_ACTION =
<TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
NULLS_FIRST =
<TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST =
<TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
ONLY =
<TokenType.ONLY: 'ONLY'>
OPTIONS =
<TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OUT_OF =
<TokenType.OUT_OF: 'OUT_OF'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRECEDING =
<TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS =
<TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SEED =
<TokenType.SEED: 'SEED'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORTKEY =
<TokenType.SORTKEY: 'SORTKEY'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRAILING =
<TokenType.TRAILING: 'TRAILING'>
TRUE =
<TokenType.TRUE: 'TRUE'>
UNBOUNDED =
<TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNLOGGED =
<TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
WITH_TIME_ZONE =
<TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE =
<TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP =
<TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE =
<TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
316class Token: 317 __slots__ = ("token_type", "text", "line", "col", "comments") 318 319 @classmethod 320 def number(cls, number: int) -> Token: 321 """Returns a NUMBER token with `number` as its text.""" 322 return cls(TokenType.NUMBER, str(number)) 323 324 @classmethod 325 def string(cls, string: str) -> Token: 326 """Returns a STRING token with `string` as its text.""" 327 return cls(TokenType.STRING, string) 328 329 @classmethod 330 def identifier(cls, identifier: str) -> Token: 331 """Returns an IDENTIFIER token with `identifier` as its text.""" 332 return cls(TokenType.IDENTIFIER, identifier) 333 334 @classmethod 335 def var(cls, var: str) -> Token: 336 """Returns an VAR token with `var` as its text.""" 337 return cls(TokenType.VAR, var) 338 339 def __init__( 340 self, 341 token_type: TokenType, 342 text: str, 343 line: int = 1, 344 col: int = 1, 345 comments: t.List[str] = [], 346 ) -> None: 347 self.token_type = token_type 348 self.text = text 349 self.line = line 350 self.col = max(col - len(text), 1) 351 self.comments = comments 352 353 def __repr__(self) -> str: 354 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 355 return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, comments: List[str] = [])
319 @classmethod 320 def number(cls, number: int) -> Token: 321 """Returns a NUMBER token with `number` as its text.""" 322 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
324 @classmethod 325 def string(cls, string: str) -> Token: 326 """Returns a STRING token with `string` as its text.""" 327 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
329 @classmethod 330 def identifier(cls, identifier: str) -> Token: 331 """Returns an IDENTIFIER token with `identifier` as its text.""" 332 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
398class Tokenizer(metaclass=_Tokenizer): 399 SINGLE_TOKENS = { 400 "(": TokenType.L_PAREN, 401 ")": TokenType.R_PAREN, 402 "[": TokenType.L_BRACKET, 403 "]": TokenType.R_BRACKET, 404 "{": TokenType.L_BRACE, 405 "}": TokenType.R_BRACE, 406 "&": TokenType.AMP, 407 "^": TokenType.CARET, 408 ":": TokenType.COLON, 409 ",": TokenType.COMMA, 410 ".": TokenType.DOT, 411 "-": TokenType.DASH, 412 "=": TokenType.EQ, 413 ">": TokenType.GT, 414 "<": TokenType.LT, 415 "%": TokenType.MOD, 416 "!": TokenType.NOT, 417 "|": TokenType.PIPE, 418 "+": TokenType.PLUS, 419 ";": TokenType.SEMICOLON, 420 "/": TokenType.SLASH, 421 "\\": TokenType.BACKSLASH, 422 "*": TokenType.STAR, 423 "~": TokenType.TILDA, 424 "?": TokenType.PLACEHOLDER, 425 "@": TokenType.PARAMETER, 426 # used for breaking a var like x'y' but nothing else 427 # the token type doesn't matter 428 "'": TokenType.QUOTE, 429 "`": TokenType.IDENTIFIER, 430 '"': TokenType.IDENTIFIER, 431 "#": TokenType.HASH, 432 } 433 434 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 435 436 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 437 438 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 439 440 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 441 442 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 443 444 STRING_ESCAPES = ["'"] 445 446 _STRING_ESCAPES: t.Set[str] = set() 447 448 IDENTIFIER_ESCAPES = ['"'] 449 450 _IDENTIFIER_ESCAPES: t.Set[str] = set() 451 452 KEYWORDS = { 453 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 454 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 455 "{{+": TokenType.BLOCK_START, 456 "{{-": TokenType.BLOCK_START, 457 "+}}": TokenType.BLOCK_END, 458 "-}}": TokenType.BLOCK_END, 459 "/*+": TokenType.HINT, 460 "==": TokenType.EQ, 461 "::": TokenType.DCOLON, 462 "||": TokenType.DPIPE, 463 ">=": TokenType.GTE, 464 "<=": TokenType.LTE, 465 "<>": TokenType.NEQ, 466 "!=": TokenType.NEQ, 467 "<=>": TokenType.NULLSAFE_EQ, 468 "->": TokenType.ARROW, 469 "->>": TokenType.DARROW, 470 "=>": TokenType.FARROW, 471 "#>": TokenType.HASH_ARROW, 472 "#>>": TokenType.DHASH_ARROW, 473 "<->": TokenType.LR_ARROW, 474 "&&": TokenType.DAMP, 475 "ALL": TokenType.ALL, 476 "ALWAYS": TokenType.ALWAYS, 477 "AND": TokenType.AND, 478 "ANTI": TokenType.ANTI, 479 "ANY": TokenType.ANY, 480 "ASC": TokenType.ASC, 481 "AS": TokenType.ALIAS, 482 "AT TIME ZONE": TokenType.AT_TIME_ZONE, 483 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 484 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 485 "BEGIN": TokenType.BEGIN, 486 "BETWEEN": TokenType.BETWEEN, 487 "BOTH": TokenType.BOTH, 488 "BUCKET": TokenType.BUCKET, 489 "BY DEFAULT": TokenType.BY_DEFAULT, 490 "CACHE": TokenType.CACHE, 491 "UNCACHE": TokenType.UNCACHE, 492 "CASE": TokenType.CASE, 493 "CASCADE": TokenType.CASCADE, 494 "CHARACTER SET": TokenType.CHARACTER_SET, 495 "CLUSTER BY": TokenType.CLUSTER_BY, 496 "COLLATE": TokenType.COLLATE, 497 "COLUMN": TokenType.COLUMN, 498 "COMMIT": TokenType.COMMIT, 499 "COMPOUND": TokenType.COMPOUND, 500 "CONSTRAINT": TokenType.CONSTRAINT, 501 "CREATE": TokenType.CREATE, 502 "CROSS": TokenType.CROSS, 503 "CUBE": TokenType.CUBE, 504 "CURRENT_DATE": TokenType.CURRENT_DATE, 505 "CURRENT ROW": TokenType.CURRENT_ROW, 506 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 507 "DATABASE": TokenType.DATABASE, 508 "DEFAULT": TokenType.DEFAULT, 509 "DELETE": TokenType.DELETE, 510 "DESC": TokenType.DESC, 511 "DESCRIBE": TokenType.DESCRIBE, 512 "DISTINCT": TokenType.DISTINCT, 513 "DISTINCT FROM": TokenType.DISTINCT_FROM, 514 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 515 "DIV": TokenType.DIV, 516 "DROP": TokenType.DROP, 517 "ELSE": TokenType.ELSE, 518 "END": TokenType.END, 519 "ESCAPE": TokenType.ESCAPE, 520 "EXCEPT": TokenType.EXCEPT, 521 "EXECUTE": TokenType.EXECUTE, 522 "EXISTS": TokenType.EXISTS, 523 "FALSE": TokenType.FALSE, 524 "FETCH": TokenType.FETCH, 525 "FILTER": TokenType.FILTER, 526 "FIRST": TokenType.FIRST, 527 "FULL": TokenType.FULL, 528 "FUNCTION": TokenType.FUNCTION, 529 "FOLLOWING": TokenType.FOLLOWING, 530 "FOR": TokenType.FOR, 531 "FOREIGN KEY": TokenType.FOREIGN_KEY, 532 "FORMAT": TokenType.FORMAT, 533 "FROM": TokenType.FROM, 534 "GLOB": TokenType.GLOB, 535 "GROUP BY": TokenType.GROUP_BY, 536 "GROUPING SETS": TokenType.GROUPING_SETS, 537 "HAVING": TokenType.HAVING, 538 "IF": TokenType.IF, 539 "ILIKE": TokenType.ILIKE, 540 "IGNORE NULLS": TokenType.IGNORE_NULLS, 541 "IN": TokenType.IN, 542 "INDEX": TokenType.INDEX, 543 "INET": TokenType.INET, 544 "INNER": TokenType.INNER, 545 "INSERT": TokenType.INSERT, 546 "INTERVAL": TokenType.INTERVAL, 547 "INTERSECT": TokenType.INTERSECT, 548 "INTO": TokenType.INTO, 549 "IS": TokenType.IS, 550 "ISNULL": TokenType.ISNULL, 551 "JOIN": TokenType.JOIN, 552 "LATERAL": TokenType.LATERAL, 553 "LAZY": TokenType.LAZY, 554 "LEADING": TokenType.LEADING, 555 "LEFT": TokenType.LEFT, 556 "LIKE": TokenType.LIKE, 557 "LIMIT": TokenType.LIMIT, 558 "LOAD DATA": TokenType.LOAD_DATA, 559 "LOCAL": TokenType.LOCAL, 560 "MATERIALIZED": TokenType.MATERIALIZED, 561 "MERGE": TokenType.MERGE, 562 "NATURAL": TokenType.NATURAL, 563 "NEXT": TokenType.NEXT, 564 "NO ACTION": TokenType.NO_ACTION, 565 "NOT": TokenType.NOT, 566 "NOTNULL": TokenType.NOTNULL, 567 "NULL": TokenType.NULL, 568 "NULLS FIRST": TokenType.NULLS_FIRST, 569 "NULLS LAST": TokenType.NULLS_LAST, 570 "OBJECT": TokenType.OBJECT, 571 "OFFSET": TokenType.OFFSET, 572 "ON": TokenType.ON, 573 "ONLY": TokenType.ONLY, 574 "OPTIONS": TokenType.OPTIONS, 575 "OR": TokenType.OR, 576 "ORDER BY": TokenType.ORDER_BY, 577 "ORDINALITY": TokenType.ORDINALITY, 578 "OUTER": TokenType.OUTER, 579 "OUT OF": TokenType.OUT_OF, 580 "OVER": TokenType.OVER, 581 "OVERLAPS": TokenType.OVERLAPS, 582 "OVERWRITE": TokenType.OVERWRITE, 583 "PARTITION": TokenType.PARTITION, 584 "PARTITION BY": TokenType.PARTITION_BY, 585 "PARTITIONED BY": TokenType.PARTITION_BY, 586 "PARTITIONED_BY": TokenType.PARTITION_BY, 587 "PERCENT": TokenType.PERCENT, 588 "PIVOT": TokenType.PIVOT, 589 "PRECEDING": TokenType.PRECEDING, 590 "PRIMARY KEY": TokenType.PRIMARY_KEY, 591 "PROCEDURE": TokenType.PROCEDURE, 592 "QUALIFY": TokenType.QUALIFY, 593 "RANGE": TokenType.RANGE, 594 "RECURSIVE": TokenType.RECURSIVE, 595 "REGEXP": TokenType.RLIKE, 596 "REPLACE": TokenType.REPLACE, 597 "RESPECT NULLS": TokenType.RESPECT_NULLS, 598 "REFERENCES": TokenType.REFERENCES, 599 "RIGHT": TokenType.RIGHT, 600 "RLIKE": TokenType.RLIKE, 601 "ROLLBACK": TokenType.ROLLBACK, 602 "ROLLUP": TokenType.ROLLUP, 603 "ROW": TokenType.ROW, 604 "ROWS": TokenType.ROWS, 605 "SCHEMA": TokenType.SCHEMA, 606 "SEED": TokenType.SEED, 607 "SELECT": TokenType.SELECT, 608 "SEMI": TokenType.SEMI, 609 "SET": TokenType.SET, 610 "SHOW": TokenType.SHOW, 611 "SIMILAR TO": TokenType.SIMILAR_TO, 612 "SOME": TokenType.SOME, 613 "SORTKEY": TokenType.SORTKEY, 614 "SORT BY": TokenType.SORT_BY, 615 "TABLE": TokenType.TABLE, 616 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 617 "TEMP": TokenType.TEMPORARY, 618 "TEMPORARY": TokenType.TEMPORARY, 619 "THEN": TokenType.THEN, 620 "TRUE": TokenType.TRUE, 621 "TRAILING": TokenType.TRAILING, 622 "UNBOUNDED": TokenType.UNBOUNDED, 623 "UNION": TokenType.UNION, 624 "UNLOGGED": TokenType.UNLOGGED, 625 "UNNEST": TokenType.UNNEST, 626 "UNPIVOT": TokenType.UNPIVOT, 627 "UPDATE": TokenType.UPDATE, 628 "USE": TokenType.USE, 629 "USING": TokenType.USING, 630 "VALUES": TokenType.VALUES, 631 "VIEW": TokenType.VIEW, 632 "VOLATILE": TokenType.VOLATILE, 633 "WHEN": TokenType.WHEN, 634 "WHERE": TokenType.WHERE, 635 "WINDOW": TokenType.WINDOW, 636 "WITH": TokenType.WITH, 637 "WITH TIME ZONE": TokenType.WITH_TIME_ZONE, 638 "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE, 639 "WITHIN GROUP": TokenType.WITHIN_GROUP, 640 "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE, 641 "APPLY": TokenType.APPLY, 642 "ARRAY": TokenType.ARRAY, 643 "BIT": TokenType.BIT, 644 "BOOL": TokenType.BOOLEAN, 645 "BOOLEAN": TokenType.BOOLEAN, 646 "BYTE": TokenType.TINYINT, 647 "TINYINT": TokenType.TINYINT, 648 "SHORT": TokenType.SMALLINT, 649 "SMALLINT": TokenType.SMALLINT, 650 "INT2": TokenType.SMALLINT, 651 "INTEGER": TokenType.INT, 652 "INT": TokenType.INT, 653 "INT4": TokenType.INT, 654 "LONG": TokenType.BIGINT, 655 "BIGINT": TokenType.BIGINT, 656 "INT8": TokenType.BIGINT, 657 "DECIMAL": TokenType.DECIMAL, 658 "MAP": TokenType.MAP, 659 "NULLABLE": TokenType.NULLABLE, 660 "NUMBER": TokenType.DECIMAL, 661 "NUMERIC": TokenType.DECIMAL, 662 "FIXED": TokenType.DECIMAL, 663 "REAL": TokenType.FLOAT, 664 "FLOAT": TokenType.FLOAT, 665 "FLOAT4": TokenType.FLOAT, 666 "FLOAT8": TokenType.DOUBLE, 667 "DOUBLE": TokenType.DOUBLE, 668 "DOUBLE PRECISION": TokenType.DOUBLE, 669 "JSON": TokenType.JSON, 670 "CHAR": TokenType.CHAR, 671 "CHARACTER": TokenType.CHAR, 672 "NCHAR": TokenType.NCHAR, 673 "VARCHAR": TokenType.VARCHAR, 674 "VARCHAR2": TokenType.VARCHAR, 675 "NVARCHAR": TokenType.NVARCHAR, 676 "NVARCHAR2": TokenType.NVARCHAR, 677 "STR": TokenType.TEXT, 678 "STRING": TokenType.TEXT, 679 "TEXT": TokenType.TEXT, 680 "CLOB": TokenType.TEXT, 681 "LONGVARCHAR": TokenType.TEXT, 682 "BINARY": TokenType.BINARY, 683 "BLOB": TokenType.VARBINARY, 684 "BYTEA": TokenType.VARBINARY, 685 "VARBINARY": TokenType.VARBINARY, 686 "TIME": TokenType.TIME, 687 "TIMESTAMP": TokenType.TIMESTAMP, 688 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 689 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 690 "DATE": TokenType.DATE, 691 "DATETIME": TokenType.DATETIME, 692 "UNIQUE": TokenType.UNIQUE, 693 "STRUCT": TokenType.STRUCT, 694 "VARIANT": TokenType.VARIANT, 695 "ALTER": TokenType.ALTER, 696 "ALTER AGGREGATE": TokenType.COMMAND, 697 "ALTER DEFAULT": TokenType.COMMAND, 698 "ALTER DOMAIN": TokenType.COMMAND, 699 "ALTER ROLE": TokenType.COMMAND, 700 "ALTER RULE": TokenType.COMMAND, 701 "ALTER SEQUENCE": TokenType.COMMAND, 702 "ALTER TYPE": TokenType.COMMAND, 703 "ALTER USER": TokenType.COMMAND, 704 "ALTER VIEW": TokenType.COMMAND, 705 "ANALYZE": TokenType.COMMAND, 706 "CALL": TokenType.COMMAND, 707 "COMMENT": TokenType.COMMENT, 708 "COPY": TokenType.COMMAND, 709 "EXPLAIN": TokenType.COMMAND, 710 "GRANT": TokenType.COMMAND, 711 "OPTIMIZE": TokenType.COMMAND, 712 "PREPARE": TokenType.COMMAND, 713 "TRUNCATE": TokenType.COMMAND, 714 "VACUUM": TokenType.COMMAND, 715 } 716 717 WHITE_SPACE: t.Dict[str, TokenType] = { 718 " ": TokenType.SPACE, 719 "\t": TokenType.SPACE, 720 "\n": TokenType.BREAK, 721 "\r": TokenType.BREAK, 722 "\r\n": TokenType.BREAK, 723 } 724 725 COMMANDS = { 726 TokenType.COMMAND, 727 TokenType.EXECUTE, 728 TokenType.FETCH, 729 TokenType.SET, 730 TokenType.SHOW, 731 } 732 733 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 734 735 # handle numeric literals like in hive (3L = BIGINT) 736 NUMERIC_LITERALS: t.Dict[str, str] = {} 737 ENCODE: t.Optional[str] = None 738 739 COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")] 740 KEYWORD_TRIE = None # autofilled 741 742 IDENTIFIER_CAN_START_WITH_DIGIT = False 743 744 __slots__ = ( 745 "sql", 746 "size", 747 "tokens", 748 "_start", 749 "_current", 750 "_line", 751 "_col", 752 "_comments", 753 "_char", 754 "_end", 755 "_peek", 756 "_prev_token_line", 757 "_prev_token_comments", 758 "_prev_token_type", 759 ) 760 761 def __init__(self) -> None: 762 self.reset() 763 764 def reset(self) -> None: 765 self.sql = "" 766 self.size = 0 767 self.tokens: t.List[Token] = [] 768 self._start = 0 769 self._current = 0 770 self._line = 1 771 self._col = 1 772 self._comments: t.List[str] = [] 773 774 self._char = None 775 self._end = None 776 self._peek = None 777 self._prev_token_line = -1 778 self._prev_token_comments: t.List[str] = [] 779 self._prev_token_type = None 780 781 def tokenize(self, sql: str) -> t.List[Token]: 782 """Returns a list of tokens corresponding to the SQL string `sql`.""" 783 self.reset() 784 self.sql = sql 785 self.size = len(sql) 786 self._scan() 787 return self.tokens 788 789 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 790 while self.size and not self._end: 791 self._start = self._current 792 self._advance() 793 794 if self._char is None: 795 break 796 797 if self._char not in self.WHITE_SPACE: 798 if self._char.isdigit(): 799 self._scan_number() 800 elif self._char in self._IDENTIFIERS: 801 self._scan_identifier(self._IDENTIFIERS[self._char]) 802 else: 803 self._scan_keywords() 804 805 if until and until(): 806 break 807 808 def _chars(self, size: int) -> str: 809 if size == 1: 810 return self._char # type: ignore 811 start = self._current - 1 812 end = start + size 813 if end <= self.size: 814 return self.sql[start:end] 815 return "" 816 817 def _line_break(self, char: t.Optional[str]) -> bool: 818 return self.WHITE_SPACE.get(char) == TokenType.BREAK # type: ignore 819 820 def _advance(self, i: int = 1) -> None: 821 if self._line_break(self._char): 822 self._set_new_line() 823 824 self._col += i 825 self._current += i 826 self._end = self._current >= self.size # type: ignore 827 self._char = self.sql[self._current - 1] # type: ignore 828 self._peek = self.sql[self._current] if self._current < self.size else "" # type: ignore 829 830 def _set_new_line(self) -> None: 831 self._col = 1 832 self._line += 1 833 834 @property 835 def _text(self) -> str: 836 return self.sql[self._start : self._current] 837 838 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 839 self._prev_token_line = self._line 840 self._prev_token_comments = self._comments 841 self._prev_token_type = token_type # type: ignore 842 self.tokens.append( 843 Token( 844 token_type, 845 self._text if text is None else text, 846 self._line, 847 self._col, 848 self._comments, 849 ) 850 ) 851 self._comments = [] 852 853 # If we have either a semicolon or a begin token before the command's token, we'll parse 854 # whatever follows the command's token as a string 855 if token_type in self.COMMANDS and ( 856 len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS 857 ): 858 start = self._current 859 tokens = len(self.tokens) 860 self._scan(lambda: self._peek == ";") 861 self.tokens = self.tokens[:tokens] 862 text = self.sql[start : self._current].strip() 863 if text: 864 self._add(TokenType.STRING, text) 865 866 def _scan_keywords(self) -> None: 867 size = 0 868 word = None 869 chars = self._text 870 char = chars 871 prev_space = False 872 skip = False 873 trie = self.KEYWORD_TRIE 874 single_token = char in self.SINGLE_TOKENS 875 876 while chars: 877 if skip: 878 result = 1 879 else: 880 result, trie = in_trie(trie, char.upper()) # type: ignore 881 882 if result == 0: 883 break 884 if result == 2: 885 word = chars 886 size += 1 887 end = self._current - 1 + size 888 889 if end < self.size: 890 char = self.sql[end] 891 single_token = single_token or char in self.SINGLE_TOKENS 892 is_space = char in self.WHITE_SPACE 893 894 if not is_space or not prev_space: 895 if is_space: 896 char = " " 897 chars += char 898 prev_space = is_space 899 skip = False 900 else: 901 skip = True 902 else: 903 chars = " " 904 905 word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word 906 907 if not word: 908 if self._char in self.SINGLE_TOKENS: 909 self._add(self.SINGLE_TOKENS[self._char]) # type: ignore 910 return 911 self._scan_var() 912 return 913 914 if self._scan_string(word): 915 return 916 if self._scan_formatted_string(word): 917 return 918 if self._scan_comment(word): 919 return 920 921 self._advance(size - 1) 922 self._add(self.KEYWORDS[word.upper()]) 923 924 def _scan_comment(self, comment_start: str) -> bool: 925 if comment_start not in self._COMMENTS: # type: ignore 926 return False 927 928 comment_start_line = self._line 929 comment_start_size = len(comment_start) 930 comment_end = self._COMMENTS[comment_start] # type: ignore 931 932 if comment_end: 933 comment_end_size = len(comment_end) 934 935 while not self._end and self._chars(comment_end_size) != comment_end: 936 self._advance() 937 938 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) # type: ignore 939 self._advance(comment_end_size - 1) 940 else: 941 while not self._end and not self._line_break(self._peek): 942 self._advance() 943 self._comments.append(self._text[comment_start_size:]) # type: ignore 944 945 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 946 # Multiple consecutive comments are preserved by appending them to the current comments list. 947 if comment_start_line == self._prev_token_line: 948 self.tokens[-1].comments.extend(self._comments) 949 self._comments = [] 950 self._prev_token_line = self._line 951 952 return True 953 954 def _scan_number(self) -> None: 955 if self._char == "0": 956 peek = self._peek.upper() # type: ignore 957 if peek == "B": 958 return self._scan_bits() 959 elif peek == "X": 960 return self._scan_hex() 961 962 decimal = False 963 scientific = 0 964 965 while True: 966 if self._peek.isdigit(): # type: ignore 967 self._advance() 968 elif self._peek == "." and not decimal: 969 decimal = True 970 self._advance() 971 elif self._peek in ("-", "+") and scientific == 1: 972 scientific += 1 973 self._advance() 974 elif self._peek.upper() == "E" and not scientific: # type: ignore 975 scientific += 1 976 self._advance() 977 elif self._peek.isidentifier(): # type: ignore 978 number_text = self._text 979 literal = [] 980 981 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: # type: ignore 982 literal.append(self._peek.upper()) # type: ignore 983 self._advance() 984 985 literal = "".join(literal) # type: ignore 986 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal)) # type: ignore 987 988 if token_type: 989 self._add(TokenType.NUMBER, number_text) 990 self._add(TokenType.DCOLON, "::") 991 return self._add(token_type, literal) # type: ignore 992 elif self.IDENTIFIER_CAN_START_WITH_DIGIT: 993 return self._add(TokenType.VAR) 994 995 self._add(TokenType.NUMBER, number_text) 996 return self._advance(-len(literal)) 997 else: 998 return self._add(TokenType.NUMBER) 999 1000 def _scan_bits(self) -> None: 1001 self._advance() 1002 value = self._extract_value() 1003 try: 1004 self._add(TokenType.BIT_STRING, f"{int(value, 2)}") 1005 except ValueError: 1006 self._add(TokenType.IDENTIFIER) 1007 1008 def _scan_hex(self) -> None: 1009 self._advance() 1010 value = self._extract_value() 1011 try: 1012 self._add(TokenType.HEX_STRING, f"{int(value, 16)}") 1013 except ValueError: 1014 self._add(TokenType.IDENTIFIER) 1015 1016 def _extract_value(self) -> str: 1017 while True: 1018 char = self._peek.strip() # type: ignore 1019 if char and char not in self.SINGLE_TOKENS: 1020 self._advance() 1021 else: 1022 break 1023 1024 return self._text 1025 1026 def _scan_string(self, quote: str) -> bool: 1027 quote_end = self._QUOTES.get(quote) # type: ignore 1028 if quote_end is None: 1029 return False 1030 1031 self._advance(len(quote)) 1032 text = self._extract_string(quote_end) 1033 text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text # type: ignore 1034 self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text) 1035 return True 1036 1037 # X'1234, b'0110', E'\\\\\' etc. 1038 def _scan_formatted_string(self, string_start: str) -> bool: 1039 if string_start in self._HEX_STRINGS: # type: ignore 1040 delimiters = self._HEX_STRINGS # type: ignore 1041 token_type = TokenType.HEX_STRING 1042 base = 16 1043 elif string_start in self._BIT_STRINGS: # type: ignore 1044 delimiters = self._BIT_STRINGS # type: ignore 1045 token_type = TokenType.BIT_STRING 1046 base = 2 1047 elif string_start in self._BYTE_STRINGS: # type: ignore 1048 delimiters = self._BYTE_STRINGS # type: ignore 1049 token_type = TokenType.BYTE_STRING 1050 base = None 1051 else: 1052 return False 1053 1054 self._advance(len(string_start)) 1055 string_end = delimiters.get(string_start) 1056 text = self._extract_string(string_end) 1057 1058 if base is None: 1059 self._add(token_type, text) 1060 else: 1061 try: 1062 self._add(token_type, f"{int(text, base)}") 1063 except: 1064 raise RuntimeError( 1065 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1066 ) 1067 1068 return True 1069 1070 def _scan_identifier(self, identifier_end: str) -> None: 1071 text = "" 1072 identifier_end_is_escape = identifier_end in self._IDENTIFIER_ESCAPES 1073 1074 while True: 1075 if self._end: 1076 raise RuntimeError(f"Missing {identifier_end} from {self._line}:{self._start}") 1077 1078 self._advance() 1079 if self._char == identifier_end: 1080 if identifier_end_is_escape and self._peek == identifier_end: 1081 text += identifier_end # type: ignore 1082 self._advance() 1083 continue 1084 1085 break 1086 1087 text += self._char # type: ignore 1088 1089 self._add(TokenType.IDENTIFIER, text) 1090 1091 def _scan_var(self) -> None: 1092 while True: 1093 char = self._peek.strip() # type: ignore 1094 if char and char not in self.SINGLE_TOKENS: 1095 self._advance() 1096 else: 1097 break 1098 self._add( 1099 TokenType.VAR 1100 if self._prev_token_type == TokenType.PARAMETER 1101 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1102 ) 1103 1104 def _extract_string(self, delimiter: str) -> str: 1105 text = "" 1106 delim_size = len(delimiter) 1107 1108 while True: 1109 if self._char in self._STRING_ESCAPES and ( 1110 self._peek == delimiter or self._peek in self._STRING_ESCAPES 1111 ): 1112 if self._peek == delimiter: 1113 text += self._peek # type: ignore 1114 else: 1115 text += self._char + self._peek # type: ignore 1116 1117 if self._current + 1 < self.size: 1118 self._advance(2) 1119 else: 1120 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}") 1121 else: 1122 if self._chars(delim_size) == delimiter: 1123 if delim_size > 1: 1124 self._advance(delim_size - 1) 1125 break 1126 1127 if self._end: 1128 raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}") 1129 text += self._char # type: ignore 1130 self._advance() 1131 1132 return text
def
reset(self) -> None:
764 def reset(self) -> None: 765 self.sql = "" 766 self.size = 0 767 self.tokens: t.List[Token] = [] 768 self._start = 0 769 self._current = 0 770 self._line = 1 771 self._col = 1 772 self._comments: t.List[str] = [] 773 774 self._char = None 775 self._end = None 776 self._peek = None 777 self._prev_token_line = -1 778 self._prev_token_comments: t.List[str] = [] 779 self._prev_token_type = None