Merging upstream version 11.3.6.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
f5f7767f8d
commit
e33f287a97
46 changed files with 23416 additions and 22639 deletions
|
@ -214,11 +214,7 @@ class BigQuery(Dialect):
|
|||
),
|
||||
}
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
class Generator(generator.Generator):
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
TRANSFORMS = {
|
||||
**generator.Generator.TRANSFORMS, # type: ignore
|
||||
**transforms.REMOVE_PRECISION_PARAMETERIZED_TYPES, # type: ignore
|
||||
|
|
|
@ -56,8 +56,6 @@ class ClickHouse(Dialect):
|
|||
|
||||
TABLE_ALIAS_TOKENS = {*parser.Parser.TABLE_ALIAS_TOKENS} - {TokenType.ANY} # type: ignore
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
def _parse_in(
|
||||
self, this: t.Optional[exp.Expression], is_global: bool = False
|
||||
) -> exp.Expression:
|
||||
|
@ -96,7 +94,6 @@ class ClickHouse(Dialect):
|
|||
|
||||
class Generator(generator.Generator):
|
||||
STRUCT_DELIMITER = ("(", ")")
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING, # type: ignore
|
||||
|
|
|
@ -21,6 +21,7 @@ class Databricks(Spark):
|
|||
**Spark.Generator.TRANSFORMS, # type: ignore
|
||||
exp.DateAdd: generate_date_delta_with_unit_sql,
|
||||
exp.DateDiff: generate_date_delta_with_unit_sql,
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
}
|
||||
|
||||
PARAMETER_TOKEN = "$"
|
||||
|
|
|
@ -9,7 +9,6 @@ from sqlglot.dialects.dialect import (
|
|||
create_with_partitions_sql,
|
||||
datestrtodate_sql,
|
||||
format_time_lambda,
|
||||
no_pivot_sql,
|
||||
no_trycast_sql,
|
||||
rename_func,
|
||||
str_position_sql,
|
||||
|
@ -136,16 +135,17 @@ class Drill(Dialect):
|
|||
exp.ILike: lambda self, e: f" {self.sql(e, 'this')} `ILIKE` {self.sql(e, 'expression')}",
|
||||
exp.Levenshtein: rename_func("LEVENSHTEIN_DISTANCE"),
|
||||
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
|
||||
exp.Pivot: no_pivot_sql,
|
||||
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
|
||||
exp.StrPosition: str_position_sql,
|
||||
exp.StrToDate: _str_to_date,
|
||||
exp.Pow: rename_func("POW"),
|
||||
exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
|
||||
exp.TimeStrToDate: lambda self, e: f"CAST({self.sql(e, 'this')} AS DATE)",
|
||||
exp.TimeStrToTime: timestrtotime_sql,
|
||||
exp.TimeStrToUnix: rename_func("UNIX_TIMESTAMP"),
|
||||
exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
|
||||
exp.TimeToUnix: rename_func("UNIX_TIMESTAMP"),
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
exp.TryCast: no_trycast_sql,
|
||||
exp.TsOrDsAdd: lambda self, e: f"DATE_ADD(CAST({self.sql(e, 'this')} AS DATE), {self.sql(exp.Interval(this=e.expression, unit=exp.Var(this='DAY')))})",
|
||||
exp.TsOrDsToDate: ts_or_ds_to_date_sql("drill"),
|
||||
|
|
|
@ -81,8 +81,21 @@ class DuckDB(Dialect):
|
|||
**tokens.Tokenizer.KEYWORDS,
|
||||
":=": TokenType.EQ,
|
||||
"ATTACH": TokenType.COMMAND,
|
||||
"CHARACTER VARYING": TokenType.VARCHAR,
|
||||
"BINARY": TokenType.VARBINARY,
|
||||
"BPCHAR": TokenType.TEXT,
|
||||
"BITSTRING": TokenType.BIT,
|
||||
"CHAR": TokenType.TEXT,
|
||||
"CHARACTER VARYING": TokenType.TEXT,
|
||||
"EXCLUDE": TokenType.EXCEPT,
|
||||
"INT1": TokenType.TINYINT,
|
||||
"LOGICAL": TokenType.BOOLEAN,
|
||||
"NUMERIC": TokenType.DOUBLE,
|
||||
"SIGNED": TokenType.INT,
|
||||
"STRING": TokenType.VARCHAR,
|
||||
"UBIGINT": TokenType.UBIGINT,
|
||||
"UINTEGER": TokenType.UINT,
|
||||
"USMALLINT": TokenType.USMALLINT,
|
||||
"UTINYINT": TokenType.UTINYINT,
|
||||
}
|
||||
|
||||
class Parser(parser.Parser):
|
||||
|
@ -115,6 +128,14 @@ class DuckDB(Dialect):
|
|||
"UNNEST": exp.Explode.from_arg_list,
|
||||
}
|
||||
|
||||
TYPE_TOKENS = {
|
||||
*parser.Parser.TYPE_TOKENS,
|
||||
TokenType.UBIGINT,
|
||||
TokenType.UINT,
|
||||
TokenType.USMALLINT,
|
||||
TokenType.UTINYINT,
|
||||
}
|
||||
|
||||
class Generator(generator.Generator):
|
||||
STRUCT_DELIMITER = ("(", ")")
|
||||
|
||||
|
@ -169,8 +190,14 @@ class DuckDB(Dialect):
|
|||
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING, # type: ignore
|
||||
exp.DataType.Type.VARCHAR: "TEXT",
|
||||
exp.DataType.Type.BINARY: "BLOB",
|
||||
exp.DataType.Type.CHAR: "TEXT",
|
||||
exp.DataType.Type.FLOAT: "REAL",
|
||||
exp.DataType.Type.NCHAR: "TEXT",
|
||||
exp.DataType.Type.NVARCHAR: "TEXT",
|
||||
exp.DataType.Type.UINT: "UINTEGER",
|
||||
exp.DataType.Type.VARBINARY: "BLOB",
|
||||
exp.DataType.Type.VARCHAR: "TEXT",
|
||||
}
|
||||
|
||||
STAR_MAPPING = {
|
||||
|
|
|
@ -257,11 +257,7 @@ class Hive(Dialect):
|
|||
),
|
||||
}
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
class Generator(generator.Generator):
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING, # type: ignore
|
||||
exp.DataType.Type.TEXT: "STRING",
|
||||
|
|
|
@ -301,8 +301,6 @@ class MySQL(Dialect):
|
|||
"READ ONLY",
|
||||
}
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
def _parse_show_mysql(self, this, target=False, full=None, global_=None):
|
||||
if target:
|
||||
if isinstance(target, str):
|
||||
|
@ -435,7 +433,6 @@ class MySQL(Dialect):
|
|||
class Generator(generator.Generator):
|
||||
LOCKING_READS_SUPPORTED = True
|
||||
NULL_ORDERING_SUPPORTED = False
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
TRANSFORMS = {
|
||||
**generator.Generator.TRANSFORMS, # type: ignore
|
||||
|
@ -446,6 +443,7 @@ class MySQL(Dialect):
|
|||
exp.TableSample: no_tablesample_sql,
|
||||
exp.TryCast: no_trycast_sql,
|
||||
exp.DateAdd: _date_add_sql("ADD"),
|
||||
exp.DateDiff: lambda self, e: f"DATEDIFF({self.format_args(e.this, e.expression)})",
|
||||
exp.DateSub: _date_add_sql("SUB"),
|
||||
exp.DateTrunc: _date_trunc_sql,
|
||||
exp.DayOfWeek: rename_func("DAYOFWEEK"),
|
||||
|
|
|
@ -4,7 +4,7 @@ import typing as t
|
|||
|
||||
from sqlglot import exp, generator, parser, tokens, transforms
|
||||
from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
|
||||
from sqlglot.helper import csv
|
||||
from sqlglot.helper import csv, seq_get
|
||||
from sqlglot.tokens import TokenType
|
||||
|
||||
PASSING_TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {
|
||||
|
@ -75,6 +75,7 @@ class Oracle(Dialect):
|
|||
FUNCTIONS = {
|
||||
**parser.Parser.FUNCTIONS, # type: ignore
|
||||
"DECODE": exp.Matches.from_arg_list,
|
||||
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
|
||||
}
|
||||
|
||||
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
|
||||
|
@ -82,8 +83,6 @@ class Oracle(Dialect):
|
|||
"XMLTABLE": _parse_xml_table,
|
||||
}
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
def _parse_column(self) -> t.Optional[exp.Expression]:
|
||||
column = super()._parse_column()
|
||||
if column:
|
||||
|
@ -92,7 +91,6 @@ class Oracle(Dialect):
|
|||
|
||||
class Generator(generator.Generator):
|
||||
LOCKING_READS_SUPPORTED = True
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING, # type: ignore
|
||||
|
@ -122,6 +120,7 @@ class Oracle(Dialect):
|
|||
exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
|
||||
exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
|
||||
exp.Substring: rename_func("SUBSTR"),
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
}
|
||||
|
||||
def query_modifiers(self, expression: exp.Expression, *sqls: str) -> str:
|
||||
|
|
|
@ -15,6 +15,7 @@ from sqlglot.dialects.dialect import (
|
|||
trim_sql,
|
||||
)
|
||||
from sqlglot.helper import seq_get
|
||||
from sqlglot.parser import binary_range_parser
|
||||
from sqlglot.tokens import TokenType
|
||||
from sqlglot.transforms import delegate, preprocess
|
||||
|
||||
|
@ -219,6 +220,8 @@ class Postgres(Dialect):
|
|||
"~~*": TokenType.ILIKE,
|
||||
"~*": TokenType.IRLIKE,
|
||||
"~": TokenType.RLIKE,
|
||||
"@>": TokenType.AT_GT,
|
||||
"<@": TokenType.LT_AT,
|
||||
"BEGIN": TokenType.COMMAND,
|
||||
"BEGIN TRANSACTION": TokenType.BEGIN,
|
||||
"BIGSERIAL": TokenType.BIGSERIAL,
|
||||
|
@ -260,7 +263,17 @@ class Postgres(Dialect):
|
|||
TokenType.HASH: exp.BitwiseXor,
|
||||
}
|
||||
|
||||
FACTOR = {**parser.Parser.FACTOR, TokenType.CARET: exp.Pow}
|
||||
FACTOR = {
|
||||
**parser.Parser.FACTOR,
|
||||
TokenType.CARET: exp.Pow,
|
||||
}
|
||||
|
||||
RANGE_PARSERS = {
|
||||
**parser.Parser.RANGE_PARSERS, # type: ignore
|
||||
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
|
||||
TokenType.AT_GT: binary_range_parser(exp.ArrayContains),
|
||||
TokenType.LT_AT: binary_range_parser(exp.ArrayContained),
|
||||
}
|
||||
|
||||
class Generator(generator.Generator):
|
||||
LOCKING_READS_SUPPORTED = True
|
||||
|
@ -299,6 +312,9 @@ class Postgres(Dialect):
|
|||
exp.DateDiff: _date_diff_sql,
|
||||
exp.LogicalOr: rename_func("BOOL_OR"),
|
||||
exp.Min: min_or_least,
|
||||
exp.ArrayOverlaps: lambda self, e: self.binary(e, "&&"),
|
||||
exp.ArrayContains: lambda self, e: self.binary(e, "@>"),
|
||||
exp.ArrayContained: lambda self, e: self.binary(e, "<@"),
|
||||
exp.RegexpLike: lambda self, e: self.binary(e, "~"),
|
||||
exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
|
||||
exp.StrPosition: str_position_sql,
|
||||
|
@ -307,6 +323,7 @@ class Postgres(Dialect):
|
|||
exp.TimeStrToTime: lambda self, e: f"CAST({self.sql(e, 'this')} AS TIMESTAMP)",
|
||||
exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
|
||||
exp.TableSample: no_tablesample_sql,
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
exp.Trim: trim_sql,
|
||||
exp.TryCast: no_trycast_sql,
|
||||
exp.UnixToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')})",
|
||||
|
|
|
@ -29,6 +29,8 @@ class Redshift(Postgres):
|
|||
"NVL": exp.Coalesce.from_arg_list,
|
||||
}
|
||||
|
||||
CONVERT_TYPE_FIRST = True
|
||||
|
||||
def _parse_types(self, check_func: bool = False) -> t.Optional[exp.Expression]:
|
||||
this = super()._parse_types(check_func=check_func)
|
||||
|
||||
|
@ -83,6 +85,9 @@ class Redshift(Postgres):
|
|||
exp.Matches: rename_func("DECODE"),
|
||||
}
|
||||
|
||||
# Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres)
|
||||
TRANSFORMS.pop(exp.Pow)
|
||||
|
||||
def values_sql(self, expression: exp.Values) -> str:
|
||||
"""
|
||||
Converts `VALUES...` expression into a series of unions.
|
||||
|
|
|
@ -16,6 +16,7 @@ from sqlglot.dialects.dialect import (
|
|||
)
|
||||
from sqlglot.expressions import Literal
|
||||
from sqlglot.helper import flatten, seq_get
|
||||
from sqlglot.parser import binary_range_parser
|
||||
from sqlglot.tokens import TokenType
|
||||
|
||||
|
||||
|
@ -111,7 +112,7 @@ def _parse_date_part(self):
|
|||
def _div0_to_if(args):
|
||||
cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
|
||||
true = exp.Literal.number(0)
|
||||
false = exp.FloatDiv(this=seq_get(args, 0), expression=seq_get(args, 1))
|
||||
false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
|
||||
return exp.If(this=cond, true=true, false=false)
|
||||
|
||||
|
||||
|
@ -173,26 +174,33 @@ class Snowflake(Dialect):
|
|||
FUNCTIONS = {
|
||||
**parser.Parser.FUNCTIONS,
|
||||
"ARRAYAGG": exp.ArrayAgg.from_arg_list,
|
||||
"ARRAY_CONSTRUCT": exp.Array.from_arg_list,
|
||||
"ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
|
||||
"DATEADD": lambda args: exp.DateAdd(
|
||||
this=seq_get(args, 2),
|
||||
expression=seq_get(args, 1),
|
||||
unit=seq_get(args, 0),
|
||||
),
|
||||
"DATEDIFF": lambda args: exp.DateDiff(
|
||||
this=seq_get(args, 2),
|
||||
expression=seq_get(args, 1),
|
||||
unit=seq_get(args, 0),
|
||||
),
|
||||
"DATE_TRUNC": lambda args: exp.DateTrunc(
|
||||
unit=exp.Literal.string(seq_get(args, 0).name), # type: ignore
|
||||
this=seq_get(args, 1),
|
||||
),
|
||||
"DECODE": exp.Matches.from_arg_list,
|
||||
"DIV0": _div0_to_if,
|
||||
"IFF": exp.If.from_arg_list,
|
||||
"TO_ARRAY": exp.Array.from_arg_list,
|
||||
"TO_TIMESTAMP": _snowflake_to_timestamp,
|
||||
"ARRAY_CONSTRUCT": exp.Array.from_arg_list,
|
||||
"RLIKE": exp.RegexpLike.from_arg_list,
|
||||
"DECODE": exp.Matches.from_arg_list,
|
||||
"OBJECT_CONSTRUCT": parser.parse_var_map,
|
||||
"ZEROIFNULL": _zeroifnull_to_if,
|
||||
"NULLIFZERO": _nullifzero_to_if,
|
||||
"OBJECT_CONSTRUCT": parser.parse_var_map,
|
||||
"RLIKE": exp.RegexpLike.from_arg_list,
|
||||
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
|
||||
"TO_ARRAY": exp.Array.from_arg_list,
|
||||
"TO_VARCHAR": exp.ToChar.from_arg_list,
|
||||
"TO_TIMESTAMP": _snowflake_to_timestamp,
|
||||
"ZEROIFNULL": _zeroifnull_to_if,
|
||||
}
|
||||
|
||||
FUNCTION_PARSERS = {
|
||||
|
@ -218,12 +226,8 @@ class Snowflake(Dialect):
|
|||
|
||||
RANGE_PARSERS = {
|
||||
**parser.Parser.RANGE_PARSERS, # type: ignore
|
||||
TokenType.LIKE_ANY: lambda self, this: self._parse_escape(
|
||||
self.expression(exp.LikeAny, this=this, expression=self._parse_bitwise())
|
||||
),
|
||||
TokenType.ILIKE_ANY: lambda self, this: self._parse_escape(
|
||||
self.expression(exp.ILikeAny, this=this, expression=self._parse_bitwise())
|
||||
),
|
||||
TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
|
||||
TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
|
||||
}
|
||||
|
||||
ALTER_PARSERS = {
|
||||
|
@ -232,8 +236,6 @@ class Snowflake(Dialect):
|
|||
"SET": lambda self: self._parse_alter_table_set_tag(),
|
||||
}
|
||||
|
||||
INTEGER_DIVISION = False
|
||||
|
||||
def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
|
||||
self._match_text_seq("TAG")
|
||||
parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
|
||||
|
@ -266,7 +268,6 @@ class Snowflake(Dialect):
|
|||
|
||||
class Generator(generator.Generator):
|
||||
PARAMETER_TOKEN = "$"
|
||||
INTEGER_DIVISION = False
|
||||
MATCHED_BY_SOURCE = False
|
||||
|
||||
TRANSFORMS = {
|
||||
|
@ -289,6 +290,7 @@ class Snowflake(Dialect):
|
|||
exp.TimeStrToTime: timestrtotime_sql,
|
||||
exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
|
||||
exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
|
||||
exp.UnixToTime: _unix_to_time_sql,
|
||||
exp.DayOfWeek: rename_func("DAYOFWEEK"),
|
||||
|
|
|
@ -92,6 +92,33 @@ class SQLite(Dialect):
|
|||
exp.GroupConcat: _group_concat_sql,
|
||||
}
|
||||
|
||||
def datediff_sql(self, expression: exp.DateDiff) -> str:
|
||||
unit = expression.args.get("unit")
|
||||
unit = unit.name.upper() if unit else "DAY"
|
||||
|
||||
sql = f"(JULIANDAY({self.sql(expression, 'this')}) - JULIANDAY({self.sql(expression, 'expression')}))"
|
||||
|
||||
if unit == "MONTH":
|
||||
sql = f"{sql} / 30.0"
|
||||
elif unit == "YEAR":
|
||||
sql = f"{sql} / 365.0"
|
||||
elif unit == "HOUR":
|
||||
sql = f"{sql} * 24.0"
|
||||
elif unit == "MINUTE":
|
||||
sql = f"{sql} * 1440.0"
|
||||
elif unit == "SECOND":
|
||||
sql = f"{sql} * 86400.0"
|
||||
elif unit == "MILLISECOND":
|
||||
sql = f"{sql} * 86400000.0"
|
||||
elif unit == "MICROSECOND":
|
||||
sql = f"{sql} * 86400000000.0"
|
||||
elif unit == "NANOSECOND":
|
||||
sql = f"{sql} * 8640000000000.0"
|
||||
else:
|
||||
self.unsupported("DATEDIFF unsupported for '{unit}'.")
|
||||
|
||||
return f"CAST({sql} AS INTEGER)"
|
||||
|
||||
def fetch_sql(self, expression):
|
||||
return self.limit_sql(exp.Limit(expression=expression.args.get("count")))
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ class Teradata(Dialect):
|
|||
TRANSFORMS = {
|
||||
**generator.Generator.TRANSFORMS,
|
||||
exp.Min: min_or_least,
|
||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||
}
|
||||
|
||||
def partitionedbyproperty_sql(self, expression: exp.PartitionedByProperty) -> str:
|
||||
|
|
|
@ -248,7 +248,6 @@ class TSQL(Dialect):
|
|||
|
||||
KEYWORDS = {
|
||||
**tokens.Tokenizer.KEYWORDS,
|
||||
"BIT": TokenType.BOOLEAN,
|
||||
"DATETIME2": TokenType.DATETIME,
|
||||
"DATETIMEOFFSET": TokenType.TIMESTAMPTZ,
|
||||
"DECLARE": TokenType.COMMAND,
|
||||
|
@ -283,19 +282,20 @@ class TSQL(Dialect):
|
|||
substr=seq_get(args, 0),
|
||||
position=seq_get(args, 2),
|
||||
),
|
||||
"ISNULL": exp.Coalesce.from_arg_list,
|
||||
"DATEADD": parse_date_delta(exp.DateAdd, unit_mapping=DATE_DELTA_INTERVAL),
|
||||
"DATEDIFF": parse_date_delta(exp.DateDiff, unit_mapping=DATE_DELTA_INTERVAL),
|
||||
"DATENAME": _format_time_lambda(exp.TimeToStr, full_format_mapping=True),
|
||||
"DATEPART": _format_time_lambda(exp.TimeToStr),
|
||||
"EOMONTH": _parse_eomonth,
|
||||
"FORMAT": _parse_format,
|
||||
"GETDATE": exp.CurrentTimestamp.from_arg_list,
|
||||
"SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
|
||||
"IIF": exp.If.from_arg_list,
|
||||
"ISNULL": exp.Coalesce.from_arg_list,
|
||||
"JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
|
||||
"LEN": exp.Length.from_arg_list,
|
||||
"REPLICATE": exp.Repeat.from_arg_list,
|
||||
"JSON_VALUE": exp.JSONExtractScalar.from_arg_list,
|
||||
"FORMAT": _parse_format,
|
||||
"EOMONTH": _parse_eomonth,
|
||||
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
|
||||
"SYSDATETIME": exp.CurrentTimestamp.from_arg_list,
|
||||
}
|
||||
|
||||
VAR_LENGTH_DATATYPES = {
|
||||
|
@ -421,7 +421,6 @@ class TSQL(Dialect):
|
|||
|
||||
TYPE_MAPPING = {
|
||||
**generator.Generator.TYPE_MAPPING, # type: ignore
|
||||
exp.DataType.Type.BOOLEAN: "BIT",
|
||||
exp.DataType.Type.INT: "INTEGER",
|
||||
exp.DataType.Type.DECIMAL: "NUMERIC",
|
||||
exp.DataType.Type.DATETIME: "DATETIME2",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue