1
0
Fork 0

Merging upstream version 23.7.0.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-13 21:30:28 +01:00
parent ebba7c6a18
commit d26905e4af
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
187 changed files with 86502 additions and 71397 deletions

View file

@ -45,7 +45,7 @@ from sqlglot.expressions import (
from sqlglot.generator import Generator as Generator
from sqlglot.parser import Parser as Parser
from sqlglot.schema import MappingSchema as MappingSchema, Schema as Schema
from sqlglot.tokens import Tokenizer as Tokenizer, TokenType as TokenType
from sqlglot.tokens import Token as Token, Tokenizer as Tokenizer, TokenType as TokenType
if t.TYPE_CHECKING:
from sqlglot._typing import E
@ -69,6 +69,21 @@ schema = MappingSchema()
"""The default schema used by SQLGlot (e.g. in the optimizer)."""
def tokenize(sql: str, read: DialectType = None, dialect: DialectType = None) -> t.List[Token]:
"""
Tokenizes the given SQL string.
Args:
sql: the SQL code string to tokenize.
read: the SQL dialect to apply during tokenizing (eg. "spark", "hive", "presto", "mysql").
dialect: the SQL dialect (alias for read).
Returns:
The resulting list of tokens.
"""
return Dialect.get_or_raise(read or dialect).tokenize(sql)
def parse(
sql: str, read: DialectType = None, dialect: DialectType = None, **opts
) -> t.List[t.Optional[Expression]]: