sqlglot.dialects.bigquery
1from __future__ import annotations 2 3import re 4import typing as t 5 6from sqlglot import exp, generator, parser, tokens, transforms 7from sqlglot.dialects.dialect import ( 8 Dialect, 9 datestrtodate_sql, 10 format_time_lambda, 11 inline_array_sql, 12 max_or_greatest, 13 min_or_least, 14 no_ilike_sql, 15 parse_date_delta_with_interval, 16 rename_func, 17 timestrtotime_sql, 18 ts_or_ds_to_date_sql, 19) 20from sqlglot.helper import seq_get, split_num_words 21from sqlglot.tokens import TokenType 22 23 24def _date_add_sql( 25 data_type: str, kind: str 26) -> t.Callable[[generator.Generator, exp.Expression], str]: 27 def func(self, expression): 28 this = self.sql(expression, "this") 29 unit = expression.args.get("unit") 30 unit = exp.var(unit.name.upper() if unit else "DAY") 31 interval = exp.Interval(this=expression.expression, unit=unit) 32 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 33 34 return func 35 36 37def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 38 if not isinstance(expression.unnest().parent, exp.From): 39 return self.values_sql(expression) 40 41 alias = expression.args.get("alias") 42 43 structs = [ 44 exp.Struct( 45 expressions=[ 46 exp.alias_(value, column_name) 47 for value, column_name in zip( 48 t.expressions, 49 alias.columns 50 if alias and alias.columns 51 else (f"_c{i}" for i in range(len(t.expressions))), 52 ) 53 ] 54 ) 55 for t in expression.find_all(exp.Tuple) 56 ] 57 58 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 59 60 61def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 62 this = expression.this 63 if isinstance(this, exp.Schema): 64 this = f"{this.this} <{self.expressions(this)}>" 65 else: 66 this = self.sql(this) 67 return f"RETURNS {this}" 68 69 70def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 71 kind = expression.args["kind"] 72 returns = expression.find(exp.ReturnsProperty) 73 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 74 expression = expression.copy() 75 expression.set("kind", "TABLE FUNCTION") 76 if isinstance( 77 expression.expression, 78 ( 79 exp.Subquery, 80 exp.Literal, 81 ), 82 ): 83 expression.set("expression", expression.expression.this) 84 85 return self.create_sql(expression) 86 87 return self.create_sql(expression) 88 89 90def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 91 """Remove references to unnest table aliases since bigquery doesn't allow them. 92 93 These are added by the optimizer's qualify_column step. 94 """ 95 if isinstance(expression, exp.Select): 96 for unnest in expression.find_all(exp.Unnest): 97 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: 98 for select in expression.selects: 99 for column in select.find_all(exp.Column): 100 if column.table == unnest.alias: 101 column.set("table", None) 102 103 return expression 104 105 106class BigQuery(Dialect): 107 UNNEST_COLUMN_ONLY = True 108 109 TIME_MAPPING = { 110 "%D": "%m/%d/%y", 111 } 112 113 FORMAT_MAPPING = { 114 "DD": "%d", 115 "MM": "%m", 116 "MON": "%b", 117 "MONTH": "%B", 118 "YYYY": "%Y", 119 "YY": "%y", 120 "HH": "%I", 121 "HH12": "%I", 122 "HH24": "%H", 123 "MI": "%M", 124 "SS": "%S", 125 "SSSSS": "%f", 126 "TZH": "%z", 127 } 128 129 class Tokenizer(tokens.Tokenizer): 130 QUOTES = ["'", '"', '"""', "'''"] 131 COMMENTS = ["--", "#", ("/*", "*/")] 132 IDENTIFIERS = ["`"] 133 STRING_ESCAPES = ["\\"] 134 135 HEX_STRINGS = [("0x", ""), ("0X", "")] 136 137 BYTE_STRINGS = [ 138 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 139 ] 140 141 RAW_STRINGS = [ 142 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 143 ] 144 145 KEYWORDS = { 146 **tokens.Tokenizer.KEYWORDS, 147 "ANY TYPE": TokenType.VARIANT, 148 "BEGIN": TokenType.COMMAND, 149 "BEGIN TRANSACTION": TokenType.BEGIN, 150 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 151 "BYTES": TokenType.BINARY, 152 "DECLARE": TokenType.COMMAND, 153 "FLOAT64": TokenType.DOUBLE, 154 "INT64": TokenType.BIGINT, 155 "RECORD": TokenType.STRUCT, 156 "TIMESTAMP": TokenType.TIMESTAMPTZ, 157 "NOT DETERMINISTIC": TokenType.VOLATILE, 158 "UNKNOWN": TokenType.NULL, 159 } 160 KEYWORDS.pop("DIV") 161 162 class Parser(parser.Parser): 163 PREFIXED_PIVOT_COLUMNS = True 164 165 LOG_BASE_FIRST = False 166 LOG_DEFAULTS_TO_LN = True 167 168 FUNCTIONS = { 169 **parser.Parser.FUNCTIONS, 170 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 171 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 172 "DATE_TRUNC": lambda args: exp.DateTrunc( 173 unit=exp.Literal.string(str(seq_get(args, 1))), 174 this=seq_get(args, 0), 175 ), 176 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 177 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 178 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 179 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 180 [seq_get(args, 1), seq_get(args, 0)] 181 ), 182 "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")( 183 [seq_get(args, 1), seq_get(args, 0)] 184 ), 185 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 186 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 187 this=seq_get(args, 0), 188 expression=seq_get(args, 1), 189 position=seq_get(args, 2), 190 occurrence=seq_get(args, 3), 191 group=exp.Literal.number(1) 192 if re.compile(str(seq_get(args, 1))).groups == 1 193 else None, 194 ), 195 "SPLIT": lambda args: exp.Split( 196 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 197 this=seq_get(args, 0), 198 expression=seq_get(args, 1) or exp.Literal.string(","), 199 ), 200 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 201 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 202 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 203 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 204 } 205 206 FUNCTION_PARSERS = { 207 **parser.Parser.FUNCTION_PARSERS, 208 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 209 } 210 FUNCTION_PARSERS.pop("TRIM") 211 212 NO_PAREN_FUNCTIONS = { 213 **parser.Parser.NO_PAREN_FUNCTIONS, 214 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 215 } 216 217 NESTED_TYPE_TOKENS = { 218 *parser.Parser.NESTED_TYPE_TOKENS, 219 TokenType.TABLE, 220 } 221 222 ID_VAR_TOKENS = { 223 *parser.Parser.ID_VAR_TOKENS, 224 TokenType.VALUES, 225 } 226 227 PROPERTY_PARSERS = { 228 **parser.Parser.PROPERTY_PARSERS, 229 "NOT DETERMINISTIC": lambda self: self.expression( 230 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 231 ), 232 "OPTIONS": lambda self: self._parse_with_property(), 233 } 234 235 CONSTRAINT_PARSERS = { 236 **parser.Parser.CONSTRAINT_PARSERS, 237 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 238 } 239 240 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 241 this = super()._parse_table_part(schema=schema) 242 243 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 244 if isinstance(this, exp.Identifier): 245 table_name = this.name 246 while self._match(TokenType.DASH, advance=False) and self._next: 247 self._advance(2) 248 table_name += f"-{self._prev.text}" 249 250 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 251 252 return this 253 254 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 255 table = super()._parse_table_parts(schema=schema) 256 if isinstance(table.this, exp.Identifier) and "." in table.name: 257 catalog, db, this, *rest = ( 258 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 259 for x in split_num_words(table.name, ".", 3) 260 ) 261 262 if rest and this: 263 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 264 265 table = exp.Table(this=this, db=db, catalog=catalog) 266 267 return table 268 269 class Generator(generator.Generator): 270 EXPLICIT_UNION = True 271 INTERVAL_ALLOWS_PLURAL_FORM = False 272 JOIN_HINTS = False 273 TABLE_HINTS = False 274 LIMIT_FETCH = "LIMIT" 275 RENAME_TABLE_WITH_DB = False 276 277 TRANSFORMS = { 278 **generator.Generator.TRANSFORMS, 279 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 280 exp.ArraySize: rename_func("ARRAY_LENGTH"), 281 exp.AtTimeZone: lambda self, e: self.func( 282 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 283 ), 284 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 285 exp.DateAdd: _date_add_sql("DATE", "ADD"), 286 exp.DateSub: _date_add_sql("DATE", "SUB"), 287 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 288 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 289 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 290 exp.DateStrToDate: datestrtodate_sql, 291 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 292 exp.GroupConcat: rename_func("STRING_AGG"), 293 exp.ILike: no_ilike_sql, 294 exp.IntDiv: rename_func("DIV"), 295 exp.Max: max_or_greatest, 296 exp.Min: min_or_least, 297 exp.RegexpExtract: lambda self, e: self.func( 298 "REGEXP_EXTRACT", 299 e.this, 300 e.expression, 301 e.args.get("position"), 302 e.args.get("occurrence"), 303 ), 304 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 305 exp.Select: transforms.preprocess( 306 [_unqualify_unnest, transforms.eliminate_distinct_on] 307 ), 308 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 309 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 310 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 311 exp.TimeSub: _date_add_sql("TIME", "SUB"), 312 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 313 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 314 exp.TimeStrToTime: timestrtotime_sql, 315 exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})", 316 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 317 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 318 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 319 exp.VariancePop: rename_func("VAR_POP"), 320 exp.Values: _derived_table_values_to_unnest, 321 exp.ReturnsProperty: _returnsproperty_sql, 322 exp.Create: _create_sql, 323 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 324 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 325 if e.name == "IMMUTABLE" 326 else "NOT DETERMINISTIC", 327 } 328 329 TYPE_MAPPING = { 330 **generator.Generator.TYPE_MAPPING, 331 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 332 exp.DataType.Type.BIGINT: "INT64", 333 exp.DataType.Type.BINARY: "BYTES", 334 exp.DataType.Type.BOOLEAN: "BOOL", 335 exp.DataType.Type.CHAR: "STRING", 336 exp.DataType.Type.DECIMAL: "NUMERIC", 337 exp.DataType.Type.DOUBLE: "FLOAT64", 338 exp.DataType.Type.FLOAT: "FLOAT64", 339 exp.DataType.Type.INT: "INT64", 340 exp.DataType.Type.NCHAR: "STRING", 341 exp.DataType.Type.NVARCHAR: "STRING", 342 exp.DataType.Type.SMALLINT: "INT64", 343 exp.DataType.Type.TEXT: "STRING", 344 exp.DataType.Type.TIMESTAMP: "DATETIME", 345 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 346 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 347 exp.DataType.Type.TINYINT: "INT64", 348 exp.DataType.Type.VARBINARY: "BYTES", 349 exp.DataType.Type.VARCHAR: "STRING", 350 exp.DataType.Type.VARIANT: "ANY TYPE", 351 } 352 353 PROPERTIES_LOCATION = { 354 **generator.Generator.PROPERTIES_LOCATION, 355 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 356 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 357 } 358 359 RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} 360 361 def array_sql(self, expression: exp.Array) -> str: 362 first_arg = seq_get(expression.expressions, 0) 363 if isinstance(first_arg, exp.Subqueryable): 364 return f"ARRAY{self.wrap(self.sql(first_arg))}" 365 366 return inline_array_sql(self, expression) 367 368 def transaction_sql(self, *_) -> str: 369 return "BEGIN TRANSACTION" 370 371 def commit_sql(self, *_) -> str: 372 return "COMMIT TRANSACTION" 373 374 def rollback_sql(self, *_) -> str: 375 return "ROLLBACK TRANSACTION" 376 377 def in_unnest_op(self, expression: exp.Unnest) -> str: 378 return self.sql(expression) 379 380 def except_op(self, expression: exp.Except) -> str: 381 if not expression.args.get("distinct", False): 382 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 383 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 384 385 def intersect_op(self, expression: exp.Intersect) -> str: 386 if not expression.args.get("distinct", False): 387 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 388 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 389 390 def with_properties(self, properties: exp.Properties) -> str: 391 return self.properties(properties, prefix=self.seg("OPTIONS"))
107class BigQuery(Dialect): 108 UNNEST_COLUMN_ONLY = True 109 110 TIME_MAPPING = { 111 "%D": "%m/%d/%y", 112 } 113 114 FORMAT_MAPPING = { 115 "DD": "%d", 116 "MM": "%m", 117 "MON": "%b", 118 "MONTH": "%B", 119 "YYYY": "%Y", 120 "YY": "%y", 121 "HH": "%I", 122 "HH12": "%I", 123 "HH24": "%H", 124 "MI": "%M", 125 "SS": "%S", 126 "SSSSS": "%f", 127 "TZH": "%z", 128 } 129 130 class Tokenizer(tokens.Tokenizer): 131 QUOTES = ["'", '"', '"""', "'''"] 132 COMMENTS = ["--", "#", ("/*", "*/")] 133 IDENTIFIERS = ["`"] 134 STRING_ESCAPES = ["\\"] 135 136 HEX_STRINGS = [("0x", ""), ("0X", "")] 137 138 BYTE_STRINGS = [ 139 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 140 ] 141 142 RAW_STRINGS = [ 143 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 144 ] 145 146 KEYWORDS = { 147 **tokens.Tokenizer.KEYWORDS, 148 "ANY TYPE": TokenType.VARIANT, 149 "BEGIN": TokenType.COMMAND, 150 "BEGIN TRANSACTION": TokenType.BEGIN, 151 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 152 "BYTES": TokenType.BINARY, 153 "DECLARE": TokenType.COMMAND, 154 "FLOAT64": TokenType.DOUBLE, 155 "INT64": TokenType.BIGINT, 156 "RECORD": TokenType.STRUCT, 157 "TIMESTAMP": TokenType.TIMESTAMPTZ, 158 "NOT DETERMINISTIC": TokenType.VOLATILE, 159 "UNKNOWN": TokenType.NULL, 160 } 161 KEYWORDS.pop("DIV") 162 163 class Parser(parser.Parser): 164 PREFIXED_PIVOT_COLUMNS = True 165 166 LOG_BASE_FIRST = False 167 LOG_DEFAULTS_TO_LN = True 168 169 FUNCTIONS = { 170 **parser.Parser.FUNCTIONS, 171 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 172 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 173 "DATE_TRUNC": lambda args: exp.DateTrunc( 174 unit=exp.Literal.string(str(seq_get(args, 1))), 175 this=seq_get(args, 0), 176 ), 177 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 178 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 179 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 180 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 181 [seq_get(args, 1), seq_get(args, 0)] 182 ), 183 "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")( 184 [seq_get(args, 1), seq_get(args, 0)] 185 ), 186 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 187 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 188 this=seq_get(args, 0), 189 expression=seq_get(args, 1), 190 position=seq_get(args, 2), 191 occurrence=seq_get(args, 3), 192 group=exp.Literal.number(1) 193 if re.compile(str(seq_get(args, 1))).groups == 1 194 else None, 195 ), 196 "SPLIT": lambda args: exp.Split( 197 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 198 this=seq_get(args, 0), 199 expression=seq_get(args, 1) or exp.Literal.string(","), 200 ), 201 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 202 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 203 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 204 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 205 } 206 207 FUNCTION_PARSERS = { 208 **parser.Parser.FUNCTION_PARSERS, 209 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 210 } 211 FUNCTION_PARSERS.pop("TRIM") 212 213 NO_PAREN_FUNCTIONS = { 214 **parser.Parser.NO_PAREN_FUNCTIONS, 215 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 216 } 217 218 NESTED_TYPE_TOKENS = { 219 *parser.Parser.NESTED_TYPE_TOKENS, 220 TokenType.TABLE, 221 } 222 223 ID_VAR_TOKENS = { 224 *parser.Parser.ID_VAR_TOKENS, 225 TokenType.VALUES, 226 } 227 228 PROPERTY_PARSERS = { 229 **parser.Parser.PROPERTY_PARSERS, 230 "NOT DETERMINISTIC": lambda self: self.expression( 231 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 232 ), 233 "OPTIONS": lambda self: self._parse_with_property(), 234 } 235 236 CONSTRAINT_PARSERS = { 237 **parser.Parser.CONSTRAINT_PARSERS, 238 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 239 } 240 241 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 242 this = super()._parse_table_part(schema=schema) 243 244 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 245 if isinstance(this, exp.Identifier): 246 table_name = this.name 247 while self._match(TokenType.DASH, advance=False) and self._next: 248 self._advance(2) 249 table_name += f"-{self._prev.text}" 250 251 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 252 253 return this 254 255 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 256 table = super()._parse_table_parts(schema=schema) 257 if isinstance(table.this, exp.Identifier) and "." in table.name: 258 catalog, db, this, *rest = ( 259 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 260 for x in split_num_words(table.name, ".", 3) 261 ) 262 263 if rest and this: 264 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 265 266 table = exp.Table(this=this, db=db, catalog=catalog) 267 268 return table 269 270 class Generator(generator.Generator): 271 EXPLICIT_UNION = True 272 INTERVAL_ALLOWS_PLURAL_FORM = False 273 JOIN_HINTS = False 274 TABLE_HINTS = False 275 LIMIT_FETCH = "LIMIT" 276 RENAME_TABLE_WITH_DB = False 277 278 TRANSFORMS = { 279 **generator.Generator.TRANSFORMS, 280 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 281 exp.ArraySize: rename_func("ARRAY_LENGTH"), 282 exp.AtTimeZone: lambda self, e: self.func( 283 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 284 ), 285 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 286 exp.DateAdd: _date_add_sql("DATE", "ADD"), 287 exp.DateSub: _date_add_sql("DATE", "SUB"), 288 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 289 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 290 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 291 exp.DateStrToDate: datestrtodate_sql, 292 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 293 exp.GroupConcat: rename_func("STRING_AGG"), 294 exp.ILike: no_ilike_sql, 295 exp.IntDiv: rename_func("DIV"), 296 exp.Max: max_or_greatest, 297 exp.Min: min_or_least, 298 exp.RegexpExtract: lambda self, e: self.func( 299 "REGEXP_EXTRACT", 300 e.this, 301 e.expression, 302 e.args.get("position"), 303 e.args.get("occurrence"), 304 ), 305 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 306 exp.Select: transforms.preprocess( 307 [_unqualify_unnest, transforms.eliminate_distinct_on] 308 ), 309 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 310 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 311 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 312 exp.TimeSub: _date_add_sql("TIME", "SUB"), 313 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 314 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})", 317 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 318 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 319 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 320 exp.VariancePop: rename_func("VAR_POP"), 321 exp.Values: _derived_table_values_to_unnest, 322 exp.ReturnsProperty: _returnsproperty_sql, 323 exp.Create: _create_sql, 324 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 325 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 326 if e.name == "IMMUTABLE" 327 else "NOT DETERMINISTIC", 328 } 329 330 TYPE_MAPPING = { 331 **generator.Generator.TYPE_MAPPING, 332 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 333 exp.DataType.Type.BIGINT: "INT64", 334 exp.DataType.Type.BINARY: "BYTES", 335 exp.DataType.Type.BOOLEAN: "BOOL", 336 exp.DataType.Type.CHAR: "STRING", 337 exp.DataType.Type.DECIMAL: "NUMERIC", 338 exp.DataType.Type.DOUBLE: "FLOAT64", 339 exp.DataType.Type.FLOAT: "FLOAT64", 340 exp.DataType.Type.INT: "INT64", 341 exp.DataType.Type.NCHAR: "STRING", 342 exp.DataType.Type.NVARCHAR: "STRING", 343 exp.DataType.Type.SMALLINT: "INT64", 344 exp.DataType.Type.TEXT: "STRING", 345 exp.DataType.Type.TIMESTAMP: "DATETIME", 346 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 347 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 348 exp.DataType.Type.TINYINT: "INT64", 349 exp.DataType.Type.VARBINARY: "BYTES", 350 exp.DataType.Type.VARCHAR: "STRING", 351 exp.DataType.Type.VARIANT: "ANY TYPE", 352 } 353 354 PROPERTIES_LOCATION = { 355 **generator.Generator.PROPERTIES_LOCATION, 356 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 357 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 358 } 359 360 RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} 361 362 def array_sql(self, expression: exp.Array) -> str: 363 first_arg = seq_get(expression.expressions, 0) 364 if isinstance(first_arg, exp.Subqueryable): 365 return f"ARRAY{self.wrap(self.sql(first_arg))}" 366 367 return inline_array_sql(self, expression) 368 369 def transaction_sql(self, *_) -> str: 370 return "BEGIN TRANSACTION" 371 372 def commit_sql(self, *_) -> str: 373 return "COMMIT TRANSACTION" 374 375 def rollback_sql(self, *_) -> str: 376 return "ROLLBACK TRANSACTION" 377 378 def in_unnest_op(self, expression: exp.Unnest) -> str: 379 return self.sql(expression) 380 381 def except_op(self, expression: exp.Except) -> str: 382 if not expression.args.get("distinct", False): 383 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 384 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 385 386 def intersect_op(self, expression: exp.Intersect) -> str: 387 if not expression.args.get("distinct", False): 388 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 389 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 390 391 def with_properties(self, properties: exp.Properties) -> str: 392 return self.properties(properties, prefix=self.seg("OPTIONS"))
130 class Tokenizer(tokens.Tokenizer): 131 QUOTES = ["'", '"', '"""', "'''"] 132 COMMENTS = ["--", "#", ("/*", "*/")] 133 IDENTIFIERS = ["`"] 134 STRING_ESCAPES = ["\\"] 135 136 HEX_STRINGS = [("0x", ""), ("0X", "")] 137 138 BYTE_STRINGS = [ 139 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 140 ] 141 142 RAW_STRINGS = [ 143 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 144 ] 145 146 KEYWORDS = { 147 **tokens.Tokenizer.KEYWORDS, 148 "ANY TYPE": TokenType.VARIANT, 149 "BEGIN": TokenType.COMMAND, 150 "BEGIN TRANSACTION": TokenType.BEGIN, 151 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 152 "BYTES": TokenType.BINARY, 153 "DECLARE": TokenType.COMMAND, 154 "FLOAT64": TokenType.DOUBLE, 155 "INT64": TokenType.BIGINT, 156 "RECORD": TokenType.STRUCT, 157 "TIMESTAMP": TokenType.TIMESTAMPTZ, 158 "NOT DETERMINISTIC": TokenType.VOLATILE, 159 "UNKNOWN": TokenType.NULL, 160 } 161 KEYWORDS.pop("DIV")
Inherited Members
163 class Parser(parser.Parser): 164 PREFIXED_PIVOT_COLUMNS = True 165 166 LOG_BASE_FIRST = False 167 LOG_DEFAULTS_TO_LN = True 168 169 FUNCTIONS = { 170 **parser.Parser.FUNCTIONS, 171 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 172 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 173 "DATE_TRUNC": lambda args: exp.DateTrunc( 174 unit=exp.Literal.string(str(seq_get(args, 1))), 175 this=seq_get(args, 0), 176 ), 177 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 178 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 179 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 180 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 181 [seq_get(args, 1), seq_get(args, 0)] 182 ), 183 "PARSE_TIMESTAMP": lambda args: format_time_lambda(exp.StrToTime, "bigquery")( 184 [seq_get(args, 1), seq_get(args, 0)] 185 ), 186 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 187 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 188 this=seq_get(args, 0), 189 expression=seq_get(args, 1), 190 position=seq_get(args, 2), 191 occurrence=seq_get(args, 3), 192 group=exp.Literal.number(1) 193 if re.compile(str(seq_get(args, 1))).groups == 1 194 else None, 195 ), 196 "SPLIT": lambda args: exp.Split( 197 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 198 this=seq_get(args, 0), 199 expression=seq_get(args, 1) or exp.Literal.string(","), 200 ), 201 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 202 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 203 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 204 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 205 } 206 207 FUNCTION_PARSERS = { 208 **parser.Parser.FUNCTION_PARSERS, 209 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 210 } 211 FUNCTION_PARSERS.pop("TRIM") 212 213 NO_PAREN_FUNCTIONS = { 214 **parser.Parser.NO_PAREN_FUNCTIONS, 215 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 216 } 217 218 NESTED_TYPE_TOKENS = { 219 *parser.Parser.NESTED_TYPE_TOKENS, 220 TokenType.TABLE, 221 } 222 223 ID_VAR_TOKENS = { 224 *parser.Parser.ID_VAR_TOKENS, 225 TokenType.VALUES, 226 } 227 228 PROPERTY_PARSERS = { 229 **parser.Parser.PROPERTY_PARSERS, 230 "NOT DETERMINISTIC": lambda self: self.expression( 231 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 232 ), 233 "OPTIONS": lambda self: self._parse_with_property(), 234 } 235 236 CONSTRAINT_PARSERS = { 237 **parser.Parser.CONSTRAINT_PARSERS, 238 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 239 } 240 241 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 242 this = super()._parse_table_part(schema=schema) 243 244 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 245 if isinstance(this, exp.Identifier): 246 table_name = this.name 247 while self._match(TokenType.DASH, advance=False) and self._next: 248 self._advance(2) 249 table_name += f"-{self._prev.text}" 250 251 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 252 253 return this 254 255 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 256 table = super()._parse_table_parts(schema=schema) 257 if isinstance(table.this, exp.Identifier) and "." in table.name: 258 catalog, db, this, *rest = ( 259 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 260 for x in split_num_words(table.name, ".", 3) 261 ) 262 263 if rest and this: 264 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 265 266 table = exp.Table(this=this, db=db, catalog=catalog) 267 268 return table
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
270 class Generator(generator.Generator): 271 EXPLICIT_UNION = True 272 INTERVAL_ALLOWS_PLURAL_FORM = False 273 JOIN_HINTS = False 274 TABLE_HINTS = False 275 LIMIT_FETCH = "LIMIT" 276 RENAME_TABLE_WITH_DB = False 277 278 TRANSFORMS = { 279 **generator.Generator.TRANSFORMS, 280 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 281 exp.ArraySize: rename_func("ARRAY_LENGTH"), 282 exp.AtTimeZone: lambda self, e: self.func( 283 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 284 ), 285 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 286 exp.DateAdd: _date_add_sql("DATE", "ADD"), 287 exp.DateSub: _date_add_sql("DATE", "SUB"), 288 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 289 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 290 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 291 exp.DateStrToDate: datestrtodate_sql, 292 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 293 exp.GroupConcat: rename_func("STRING_AGG"), 294 exp.ILike: no_ilike_sql, 295 exp.IntDiv: rename_func("DIV"), 296 exp.Max: max_or_greatest, 297 exp.Min: min_or_least, 298 exp.RegexpExtract: lambda self, e: self.func( 299 "REGEXP_EXTRACT", 300 e.this, 301 e.expression, 302 e.args.get("position"), 303 e.args.get("occurrence"), 304 ), 305 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 306 exp.Select: transforms.preprocess( 307 [_unqualify_unnest, transforms.eliminate_distinct_on] 308 ), 309 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 310 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 311 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 312 exp.TimeSub: _date_add_sql("TIME", "SUB"), 313 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 314 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 315 exp.TimeStrToTime: timestrtotime_sql, 316 exp.TryCast: lambda self, e: f"SAFE_CAST({self.sql(e, 'this')} AS {self.sql(e, 'to')})", 317 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 318 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 319 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 320 exp.VariancePop: rename_func("VAR_POP"), 321 exp.Values: _derived_table_values_to_unnest, 322 exp.ReturnsProperty: _returnsproperty_sql, 323 exp.Create: _create_sql, 324 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 325 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 326 if e.name == "IMMUTABLE" 327 else "NOT DETERMINISTIC", 328 } 329 330 TYPE_MAPPING = { 331 **generator.Generator.TYPE_MAPPING, 332 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 333 exp.DataType.Type.BIGINT: "INT64", 334 exp.DataType.Type.BINARY: "BYTES", 335 exp.DataType.Type.BOOLEAN: "BOOL", 336 exp.DataType.Type.CHAR: "STRING", 337 exp.DataType.Type.DECIMAL: "NUMERIC", 338 exp.DataType.Type.DOUBLE: "FLOAT64", 339 exp.DataType.Type.FLOAT: "FLOAT64", 340 exp.DataType.Type.INT: "INT64", 341 exp.DataType.Type.NCHAR: "STRING", 342 exp.DataType.Type.NVARCHAR: "STRING", 343 exp.DataType.Type.SMALLINT: "INT64", 344 exp.DataType.Type.TEXT: "STRING", 345 exp.DataType.Type.TIMESTAMP: "DATETIME", 346 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 347 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 348 exp.DataType.Type.TINYINT: "INT64", 349 exp.DataType.Type.VARBINARY: "BYTES", 350 exp.DataType.Type.VARCHAR: "STRING", 351 exp.DataType.Type.VARIANT: "ANY TYPE", 352 } 353 354 PROPERTIES_LOCATION = { 355 **generator.Generator.PROPERTIES_LOCATION, 356 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 357 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 358 } 359 360 RESERVED_KEYWORDS = {*generator.Generator.RESERVED_KEYWORDS, "hash"} 361 362 def array_sql(self, expression: exp.Array) -> str: 363 first_arg = seq_get(expression.expressions, 0) 364 if isinstance(first_arg, exp.Subqueryable): 365 return f"ARRAY{self.wrap(self.sql(first_arg))}" 366 367 return inline_array_sql(self, expression) 368 369 def transaction_sql(self, *_) -> str: 370 return "BEGIN TRANSACTION" 371 372 def commit_sql(self, *_) -> str: 373 return "COMMIT TRANSACTION" 374 375 def rollback_sql(self, *_) -> str: 376 return "ROLLBACK TRANSACTION" 377 378 def in_unnest_op(self, expression: exp.Unnest) -> str: 379 return self.sql(expression) 380 381 def except_op(self, expression: exp.Except) -> str: 382 if not expression.args.get("distinct", False): 383 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 384 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 385 386 def intersect_op(self, expression: exp.Intersect) -> str: 387 if not expression.args.get("distinct", False): 388 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 389 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 390 391 def with_properties(self, properties: exp.Properties) -> str: 392 return self.properties(properties, prefix=self.seg("OPTIONS"))
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql