sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15) 16from sqlglot.dialects.postgres import Postgres 17from sqlglot.helper import seq_get 18from sqlglot.tokens import TokenType 19 20if t.TYPE_CHECKING: 21 from sqlglot._typing import E 22 23 24def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 25 def _builder(args: t.List) -> E: 26 expr = expr_type(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) 27 if expr_type is exp.TsOrDsAdd: 28 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 29 30 return expr 31 32 return _builder 33 34 35class Redshift(Postgres): 36 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 37 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 38 39 SUPPORTS_USER_DEFINED_TYPES = False 40 INDEX_OFFSET = 0 41 COPY_PARAMS_ARE_CSV = False 42 HEX_LOWERCASE = True 43 44 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 45 TIME_MAPPING = { 46 **Postgres.TIME_MAPPING, 47 "MON": "%b", 48 "HH": "%H", 49 } 50 51 class Parser(Postgres.Parser): 52 FUNCTIONS = { 53 **Postgres.Parser.FUNCTIONS, 54 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 55 this=seq_get(args, 0), 56 expression=seq_get(args, 1), 57 unit=exp.var("month"), 58 return_type=exp.DataType.build("TIMESTAMP"), 59 ), 60 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 63 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 64 "GETDATE": exp.CurrentTimestamp.from_arg_list, 65 "LISTAGG": exp.GroupConcat.from_arg_list, 66 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 67 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 68 ), 69 "STRTOL": exp.FromBase.from_arg_list, 70 } 71 72 NO_PAREN_FUNCTION_PARSERS = { 73 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 74 "APPROXIMATE": lambda self: self._parse_approximate_count(), 75 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 76 } 77 78 SUPPORTS_IMPLICIT_UNNEST = True 79 80 def _parse_table( 81 self, 82 schema: bool = False, 83 joins: bool = False, 84 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 85 parse_bracket: bool = False, 86 is_db_reference: bool = False, 87 parse_partition: bool = False, 88 ) -> t.Optional[exp.Expression]: 89 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 90 unpivot = self._match(TokenType.UNPIVOT) 91 table = super()._parse_table( 92 schema=schema, 93 joins=joins, 94 alias_tokens=alias_tokens, 95 parse_bracket=parse_bracket, 96 is_db_reference=is_db_reference, 97 ) 98 99 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 100 101 def _parse_convert( 102 self, strict: bool, safe: t.Optional[bool] = None 103 ) -> t.Optional[exp.Expression]: 104 to = self._parse_types() 105 self._match(TokenType.COMMA) 106 this = self._parse_bitwise() 107 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 108 109 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 110 index = self._index - 1 111 func = self._parse_function() 112 113 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 114 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 115 self._retreat(index) 116 return None 117 118 class Tokenizer(Postgres.Tokenizer): 119 BIT_STRINGS = [] 120 HEX_STRINGS = [] 121 STRING_ESCAPES = ["\\", "'"] 122 123 KEYWORDS = { 124 **Postgres.Tokenizer.KEYWORDS, 125 "HLLSKETCH": TokenType.HLLSKETCH, 126 "SUPER": TokenType.SUPER, 127 "TOP": TokenType.TOP, 128 "UNLOAD": TokenType.COMMAND, 129 "VARBYTE": TokenType.VARBINARY, 130 "MINUS": TokenType.EXCEPT, 131 } 132 KEYWORDS.pop("VALUES") 133 134 # Redshift allows # to appear as a table identifier prefix 135 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 136 SINGLE_TOKENS.pop("#") 137 138 class Generator(Postgres.Generator): 139 LOCKING_READS_SUPPORTED = False 140 QUERY_HINTS = False 141 VALUES_AS_TABLE = False 142 TZ_TO_WITH_TIME_ZONE = True 143 NVL2_SUPPORTED = True 144 LAST_DAY_SUPPORTS_DATE_PART = False 145 CAN_IMPLEMENT_ARRAY_ANY = False 146 MULTI_ARG_DISTINCT = True 147 COPY_PARAMS_ARE_WRAPPED = False 148 HEX_FUNC = "TO_HEX" 149 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 150 WITH_PROPERTIES_PREFIX = " " 151 152 TYPE_MAPPING = { 153 **Postgres.Generator.TYPE_MAPPING, 154 exp.DataType.Type.BINARY: "VARBYTE", 155 exp.DataType.Type.INT: "INTEGER", 156 exp.DataType.Type.TIMETZ: "TIME", 157 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 158 exp.DataType.Type.VARBINARY: "VARBYTE", 159 exp.DataType.Type.ROWVERSION: "VARBYTE", 160 } 161 162 TRANSFORMS = { 163 **Postgres.Generator.TRANSFORMS, 164 exp.Concat: concat_to_dpipe_sql, 165 exp.ConcatWs: concat_ws_to_dpipe_sql, 166 exp.ApproxDistinct: lambda self, 167 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 168 exp.CurrentTimestamp: lambda self, e: ( 169 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 170 ), 171 exp.DateAdd: date_delta_sql("DATEADD"), 172 exp.DateDiff: date_delta_sql("DATEDIFF"), 173 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 174 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 175 exp.FromBase: rename_func("STRTOL"), 176 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 177 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 178 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 179 exp.GroupConcat: rename_func("LISTAGG"), 180 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 181 exp.ParseJSON: rename_func("JSON_PARSE"), 182 exp.Select: transforms.preprocess( 183 [ 184 transforms.eliminate_distinct_on, 185 transforms.eliminate_semi_and_anti_joins, 186 transforms.unqualify_unnest, 187 ] 188 ), 189 exp.SortKeyProperty: lambda self, 190 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 191 exp.StartsWith: lambda self, 192 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 193 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 194 exp.TableSample: no_tablesample_sql, 195 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 196 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 197 exp.UnixToTime: lambda self, 198 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 199 } 200 201 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 202 TRANSFORMS.pop(exp.Pivot) 203 204 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 205 TRANSFORMS.pop(exp.Pow) 206 207 # Redshift supports ANY_VALUE(..) 208 TRANSFORMS.pop(exp.AnyValue) 209 210 # Redshift supports LAST_DAY(..) 211 TRANSFORMS.pop(exp.LastDay) 212 213 RESERVED_KEYWORDS = { 214 "aes128", 215 "aes256", 216 "all", 217 "allowoverwrite", 218 "analyse", 219 "analyze", 220 "and", 221 "any", 222 "array", 223 "as", 224 "asc", 225 "authorization", 226 "az64", 227 "backup", 228 "between", 229 "binary", 230 "blanksasnull", 231 "both", 232 "bytedict", 233 "bzip2", 234 "case", 235 "cast", 236 "check", 237 "collate", 238 "column", 239 "constraint", 240 "create", 241 "credentials", 242 "cross", 243 "current_date", 244 "current_time", 245 "current_timestamp", 246 "current_user", 247 "current_user_id", 248 "default", 249 "deferrable", 250 "deflate", 251 "defrag", 252 "delta", 253 "delta32k", 254 "desc", 255 "disable", 256 "distinct", 257 "do", 258 "else", 259 "emptyasnull", 260 "enable", 261 "encode", 262 "encrypt ", 263 "encryption", 264 "end", 265 "except", 266 "explicit", 267 "false", 268 "for", 269 "foreign", 270 "freeze", 271 "from", 272 "full", 273 "globaldict256", 274 "globaldict64k", 275 "grant", 276 "group", 277 "gzip", 278 "having", 279 "identity", 280 "ignore", 281 "ilike", 282 "in", 283 "initially", 284 "inner", 285 "intersect", 286 "interval", 287 "into", 288 "is", 289 "isnull", 290 "join", 291 "leading", 292 "left", 293 "like", 294 "limit", 295 "localtime", 296 "localtimestamp", 297 "lun", 298 "luns", 299 "lzo", 300 "lzop", 301 "minus", 302 "mostly16", 303 "mostly32", 304 "mostly8", 305 "natural", 306 "new", 307 "not", 308 "notnull", 309 "null", 310 "nulls", 311 "off", 312 "offline", 313 "offset", 314 "oid", 315 "old", 316 "on", 317 "only", 318 "open", 319 "or", 320 "order", 321 "outer", 322 "overlaps", 323 "parallel", 324 "partition", 325 "percent", 326 "permissions", 327 "pivot", 328 "placing", 329 "primary", 330 "raw", 331 "readratio", 332 "recover", 333 "references", 334 "rejectlog", 335 "resort", 336 "respect", 337 "restore", 338 "right", 339 "select", 340 "session_user", 341 "similar", 342 "snapshot", 343 "some", 344 "sysdate", 345 "system", 346 "table", 347 "tag", 348 "tdes", 349 "text255", 350 "text32k", 351 "then", 352 "timestamp", 353 "to", 354 "top", 355 "trailing", 356 "true", 357 "truncatecolumns", 358 "type", 359 "union", 360 "unique", 361 "unnest", 362 "unpivot", 363 "user", 364 "using", 365 "verbose", 366 "wallet", 367 "when", 368 "where", 369 "with", 370 "without", 371 } 372 373 def unnest_sql(self, expression: exp.Unnest) -> str: 374 args = expression.expressions 375 num_args = len(args) 376 377 if num_args > 1: 378 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 379 return "" 380 381 arg = self.sql(seq_get(args, 0)) 382 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 383 return f"{arg} AS {alias}" if alias else arg 384 385 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 386 if expression.is_type(exp.DataType.Type.JSON): 387 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 388 return self.sql(expression, "this") 389 390 return super().cast_sql(expression, safe_prefix=safe_prefix) 391 392 def datatype_sql(self, expression: exp.DataType) -> str: 393 """ 394 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 395 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 396 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 397 `TEXT` to `VARCHAR`. 398 """ 399 if expression.is_type("text"): 400 expression.set("this", exp.DataType.Type.VARCHAR) 401 precision = expression.args.get("expressions") 402 403 if not precision: 404 expression.append("expressions", exp.var("MAX")) 405 406 return super().datatype_sql(expression) 407 408 def alterset_sql(self, expression: exp.AlterSet) -> str: 409 exprs = self.expressions(expression, flat=True) 410 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 411 location = self.sql(expression, "location") 412 location = f" LOCATION {location}" if location else "" 413 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 414 file_format = f" FILE FORMAT {file_format}" if file_format else "" 415 416 return f"SET{exprs}{location}{file_format}"
36class Redshift(Postgres): 37 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 38 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 39 40 SUPPORTS_USER_DEFINED_TYPES = False 41 INDEX_OFFSET = 0 42 COPY_PARAMS_ARE_CSV = False 43 HEX_LOWERCASE = True 44 45 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 46 TIME_MAPPING = { 47 **Postgres.TIME_MAPPING, 48 "MON": "%b", 49 "HH": "%H", 50 } 51 52 class Parser(Postgres.Parser): 53 FUNCTIONS = { 54 **Postgres.Parser.FUNCTIONS, 55 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 56 this=seq_get(args, 0), 57 expression=seq_get(args, 1), 58 unit=exp.var("month"), 59 return_type=exp.DataType.build("TIMESTAMP"), 60 ), 61 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 63 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 64 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 65 "GETDATE": exp.CurrentTimestamp.from_arg_list, 66 "LISTAGG": exp.GroupConcat.from_arg_list, 67 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 68 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 69 ), 70 "STRTOL": exp.FromBase.from_arg_list, 71 } 72 73 NO_PAREN_FUNCTION_PARSERS = { 74 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 75 "APPROXIMATE": lambda self: self._parse_approximate_count(), 76 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 77 } 78 79 SUPPORTS_IMPLICIT_UNNEST = True 80 81 def _parse_table( 82 self, 83 schema: bool = False, 84 joins: bool = False, 85 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 86 parse_bracket: bool = False, 87 is_db_reference: bool = False, 88 parse_partition: bool = False, 89 ) -> t.Optional[exp.Expression]: 90 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 91 unpivot = self._match(TokenType.UNPIVOT) 92 table = super()._parse_table( 93 schema=schema, 94 joins=joins, 95 alias_tokens=alias_tokens, 96 parse_bracket=parse_bracket, 97 is_db_reference=is_db_reference, 98 ) 99 100 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 101 102 def _parse_convert( 103 self, strict: bool, safe: t.Optional[bool] = None 104 ) -> t.Optional[exp.Expression]: 105 to = self._parse_types() 106 self._match(TokenType.COMMA) 107 this = self._parse_bitwise() 108 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 109 110 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 111 index = self._index - 1 112 func = self._parse_function() 113 114 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 115 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 116 self._retreat(index) 117 return None 118 119 class Tokenizer(Postgres.Tokenizer): 120 BIT_STRINGS = [] 121 HEX_STRINGS = [] 122 STRING_ESCAPES = ["\\", "'"] 123 124 KEYWORDS = { 125 **Postgres.Tokenizer.KEYWORDS, 126 "HLLSKETCH": TokenType.HLLSKETCH, 127 "SUPER": TokenType.SUPER, 128 "TOP": TokenType.TOP, 129 "UNLOAD": TokenType.COMMAND, 130 "VARBYTE": TokenType.VARBINARY, 131 "MINUS": TokenType.EXCEPT, 132 } 133 KEYWORDS.pop("VALUES") 134 135 # Redshift allows # to appear as a table identifier prefix 136 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 137 SINGLE_TOKENS.pop("#") 138 139 class Generator(Postgres.Generator): 140 LOCKING_READS_SUPPORTED = False 141 QUERY_HINTS = False 142 VALUES_AS_TABLE = False 143 TZ_TO_WITH_TIME_ZONE = True 144 NVL2_SUPPORTED = True 145 LAST_DAY_SUPPORTS_DATE_PART = False 146 CAN_IMPLEMENT_ARRAY_ANY = False 147 MULTI_ARG_DISTINCT = True 148 COPY_PARAMS_ARE_WRAPPED = False 149 HEX_FUNC = "TO_HEX" 150 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 151 WITH_PROPERTIES_PREFIX = " " 152 153 TYPE_MAPPING = { 154 **Postgres.Generator.TYPE_MAPPING, 155 exp.DataType.Type.BINARY: "VARBYTE", 156 exp.DataType.Type.INT: "INTEGER", 157 exp.DataType.Type.TIMETZ: "TIME", 158 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 159 exp.DataType.Type.VARBINARY: "VARBYTE", 160 exp.DataType.Type.ROWVERSION: "VARBYTE", 161 } 162 163 TRANSFORMS = { 164 **Postgres.Generator.TRANSFORMS, 165 exp.Concat: concat_to_dpipe_sql, 166 exp.ConcatWs: concat_ws_to_dpipe_sql, 167 exp.ApproxDistinct: lambda self, 168 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 169 exp.CurrentTimestamp: lambda self, e: ( 170 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 171 ), 172 exp.DateAdd: date_delta_sql("DATEADD"), 173 exp.DateDiff: date_delta_sql("DATEDIFF"), 174 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 175 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 176 exp.FromBase: rename_func("STRTOL"), 177 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 178 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 179 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 180 exp.GroupConcat: rename_func("LISTAGG"), 181 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 182 exp.ParseJSON: rename_func("JSON_PARSE"), 183 exp.Select: transforms.preprocess( 184 [ 185 transforms.eliminate_distinct_on, 186 transforms.eliminate_semi_and_anti_joins, 187 transforms.unqualify_unnest, 188 ] 189 ), 190 exp.SortKeyProperty: lambda self, 191 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 192 exp.StartsWith: lambda self, 193 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 194 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 195 exp.TableSample: no_tablesample_sql, 196 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 197 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 198 exp.UnixToTime: lambda self, 199 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 200 } 201 202 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 203 TRANSFORMS.pop(exp.Pivot) 204 205 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 206 TRANSFORMS.pop(exp.Pow) 207 208 # Redshift supports ANY_VALUE(..) 209 TRANSFORMS.pop(exp.AnyValue) 210 211 # Redshift supports LAST_DAY(..) 212 TRANSFORMS.pop(exp.LastDay) 213 214 RESERVED_KEYWORDS = { 215 "aes128", 216 "aes256", 217 "all", 218 "allowoverwrite", 219 "analyse", 220 "analyze", 221 "and", 222 "any", 223 "array", 224 "as", 225 "asc", 226 "authorization", 227 "az64", 228 "backup", 229 "between", 230 "binary", 231 "blanksasnull", 232 "both", 233 "bytedict", 234 "bzip2", 235 "case", 236 "cast", 237 "check", 238 "collate", 239 "column", 240 "constraint", 241 "create", 242 "credentials", 243 "cross", 244 "current_date", 245 "current_time", 246 "current_timestamp", 247 "current_user", 248 "current_user_id", 249 "default", 250 "deferrable", 251 "deflate", 252 "defrag", 253 "delta", 254 "delta32k", 255 "desc", 256 "disable", 257 "distinct", 258 "do", 259 "else", 260 "emptyasnull", 261 "enable", 262 "encode", 263 "encrypt ", 264 "encryption", 265 "end", 266 "except", 267 "explicit", 268 "false", 269 "for", 270 "foreign", 271 "freeze", 272 "from", 273 "full", 274 "globaldict256", 275 "globaldict64k", 276 "grant", 277 "group", 278 "gzip", 279 "having", 280 "identity", 281 "ignore", 282 "ilike", 283 "in", 284 "initially", 285 "inner", 286 "intersect", 287 "interval", 288 "into", 289 "is", 290 "isnull", 291 "join", 292 "leading", 293 "left", 294 "like", 295 "limit", 296 "localtime", 297 "localtimestamp", 298 "lun", 299 "luns", 300 "lzo", 301 "lzop", 302 "minus", 303 "mostly16", 304 "mostly32", 305 "mostly8", 306 "natural", 307 "new", 308 "not", 309 "notnull", 310 "null", 311 "nulls", 312 "off", 313 "offline", 314 "offset", 315 "oid", 316 "old", 317 "on", 318 "only", 319 "open", 320 "or", 321 "order", 322 "outer", 323 "overlaps", 324 "parallel", 325 "partition", 326 "percent", 327 "permissions", 328 "pivot", 329 "placing", 330 "primary", 331 "raw", 332 "readratio", 333 "recover", 334 "references", 335 "rejectlog", 336 "resort", 337 "respect", 338 "restore", 339 "right", 340 "select", 341 "session_user", 342 "similar", 343 "snapshot", 344 "some", 345 "sysdate", 346 "system", 347 "table", 348 "tag", 349 "tdes", 350 "text255", 351 "text32k", 352 "then", 353 "timestamp", 354 "to", 355 "top", 356 "trailing", 357 "true", 358 "truncatecolumns", 359 "type", 360 "union", 361 "unique", 362 "unnest", 363 "unpivot", 364 "user", 365 "using", 366 "verbose", 367 "wallet", 368 "when", 369 "where", 370 "with", 371 "without", 372 } 373 374 def unnest_sql(self, expression: exp.Unnest) -> str: 375 args = expression.expressions 376 num_args = len(args) 377 378 if num_args > 1: 379 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 380 return "" 381 382 arg = self.sql(seq_get(args, 0)) 383 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 384 return f"{arg} AS {alias}" if alias else arg 385 386 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 387 if expression.is_type(exp.DataType.Type.JSON): 388 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 389 return self.sql(expression, "this") 390 391 return super().cast_sql(expression, safe_prefix=safe_prefix) 392 393 def datatype_sql(self, expression: exp.DataType) -> str: 394 """ 395 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 396 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 397 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 398 `TEXT` to `VARCHAR`. 399 """ 400 if expression.is_type("text"): 401 expression.set("this", exp.DataType.Type.VARCHAR) 402 precision = expression.args.get("expressions") 403 404 if not precision: 405 expression.append("expressions", exp.var("MAX")) 406 407 return super().datatype_sql(expression) 408 409 def alterset_sql(self, expression: exp.AlterSet) -> str: 410 exprs = self.expressions(expression, flat=True) 411 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 412 location = self.sql(expression, "location") 413 location = f" LOCATION {location}" if location else "" 414 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 415 file_format = f" FILE FORMAT {file_format}" if file_format else "" 416 417 return f"SET{exprs}{location}{file_format}"
NORMALIZATION_STRATEGY =
<NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>
Specifies the strategy according to which identifiers should be normalized.
TIME_MAPPING: Dict[str, str] =
{'AM': '%p', 'PM': '%p', 'D': '%u', 'DD': '%d', 'DDD': '%j', 'FMDD': '%-d', 'FMDDD': '%-j', 'FMHH12': '%-I', 'FMHH24': '%-H', 'FMMI': '%-M', 'FMMM': '%-m', 'FMSS': '%-S', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'MM': '%m', 'OF': '%z', 'SS': '%S', 'TMDay': '%A', 'TMDy': '%a', 'TMMon': '%b', 'TMMonth': '%B', 'TZ': '%Z', 'US': '%f', 'WW': '%U', 'YY': '%y', 'YYYY': '%Y', 'MON': '%b', 'HH': '%H'}
Associates this dialect's time formats with their equivalent Python strftime
formats.
UNESCAPED_SEQUENCES: Dict[str, str] =
{'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}
Mapping of an escaped sequence (\n
) to its unescaped version (
).
tokenizer_class =
<class 'Redshift.Tokenizer'>
parser_class =
<class 'Redshift.Parser'>
generator_class =
<class 'Redshift.Generator'>
TIME_TRIE: Dict =
{'A': {'M': {0: True}}, 'P': {'M': {0: True}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}, 0: True}}, 'M': {'I': {0: True}, 'M': {0: True}, 'O': {'N': {0: True}}}, 'O': {'F': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'W': {'W': {0: True}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
FORMAT_TRIE: Dict =
{'A': {'M': {0: True}}, 'P': {'M': {0: True}}, 'D': {0: True, 'D': {0: True, 'D': {0: True}}}, 'F': {'M': {'D': {'D': {0: True, 'D': {0: True}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'M': {'I': {0: True}, 'M': {0: True}}, 'S': {'S': {0: True}}}}, 'H': {'H': {'1': {'2': {0: True}}, '2': {'4': {0: True}}, 0: True}}, 'M': {'I': {0: True}, 'M': {0: True}, 'O': {'N': {0: True}}}, 'O': {'F': {0: True}}, 'S': {'S': {0: True}}, 'T': {'M': {'D': {'a': {'y': {0: True}}, 'y': {0: True}}, 'M': {'o': {'n': {0: True, 't': {'h': {0: True}}}}}}, 'Z': {0: True}}, 'U': {'S': {0: True}}, 'W': {'W': {0: True}}, 'Y': {'Y': {0: True, 'Y': {'Y': {0: True}}}}}
INVERSE_TIME_MAPPING: Dict[str, str] =
{'%p': 'PM', '%u': 'D', '%d': 'DD', '%j': 'DDD', '%-d': 'FMDD', '%-j': 'FMDDD', '%-I': 'FMHH12', '%-H': 'FMHH24', '%-M': 'FMMI', '%-m': 'FMMM', '%-S': 'FMSS', '%I': 'HH12', '%H': 'HH', '%M': 'MI', '%m': 'MM', '%z': 'OF', '%S': 'SS', '%A': 'TMDay', '%a': 'TMDy', '%b': 'MON', '%B': 'TMMonth', '%Z': 'TZ', '%f': 'US', '%U': 'WW', '%y': 'YY', '%Y': 'YYYY'}
INVERSE_TIME_TRIE: Dict =
{'%': {'p': {0: True}, 'u': {0: True}, 'd': {0: True}, 'j': {0: True}, '-': {'d': {0: True}, 'j': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'S': {0: True}}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'm': {0: True}, 'z': {0: True}, 'S': {0: True}, 'A': {0: True}, 'a': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Z': {0: True}, 'f': {0: True}, 'U': {0: True}, 'y': {0: True}, 'Y': {0: True}}}
ESCAPED_SEQUENCES: Dict[str, str] =
{'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
Inherited Members
- sqlglot.dialects.dialect.Dialect
- Dialect
- WEEK_OFFSET
- UNNEST_COLUMN_ONLY
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- SUPPORTS_SEMI_ANTI_JOIN
- NORMALIZE_FUNCTIONS
- LOG_BASE_FIRST
- SAFE_DIVISION
- DATE_FORMAT
- DATEINT_FORMAT
- FORMAT_MAPPING
- PSEUDOCOLUMNS
- PREFER_CTE_ALIAS_COLUMN
- get_or_raise
- format_time
- normalize_identifier
- case_sensitive
- can_identify
- quote_identifier
- to_json_path
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
52 class Parser(Postgres.Parser): 53 FUNCTIONS = { 54 **Postgres.Parser.FUNCTIONS, 55 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 56 this=seq_get(args, 0), 57 expression=seq_get(args, 1), 58 unit=exp.var("month"), 59 return_type=exp.DataType.build("TIMESTAMP"), 60 ), 61 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 63 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 64 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 65 "GETDATE": exp.CurrentTimestamp.from_arg_list, 66 "LISTAGG": exp.GroupConcat.from_arg_list, 67 "SPLIT_TO_ARRAY": lambda args: exp.StringToArray( 68 this=seq_get(args, 0), expression=seq_get(args, 1) or exp.Literal.string(",") 69 ), 70 "STRTOL": exp.FromBase.from_arg_list, 71 } 72 73 NO_PAREN_FUNCTION_PARSERS = { 74 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 75 "APPROXIMATE": lambda self: self._parse_approximate_count(), 76 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 77 } 78 79 SUPPORTS_IMPLICIT_UNNEST = True 80 81 def _parse_table( 82 self, 83 schema: bool = False, 84 joins: bool = False, 85 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 86 parse_bracket: bool = False, 87 is_db_reference: bool = False, 88 parse_partition: bool = False, 89 ) -> t.Optional[exp.Expression]: 90 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 91 unpivot = self._match(TokenType.UNPIVOT) 92 table = super()._parse_table( 93 schema=schema, 94 joins=joins, 95 alias_tokens=alias_tokens, 96 parse_bracket=parse_bracket, 97 is_db_reference=is_db_reference, 98 ) 99 100 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 101 102 def _parse_convert( 103 self, strict: bool, safe: t.Optional[bool] = None 104 ) -> t.Optional[exp.Expression]: 105 to = self._parse_types() 106 self._match(TokenType.COMMA) 107 this = self._parse_bitwise() 108 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 109 110 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 111 index = self._index - 1 112 func = self._parse_function() 113 114 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 115 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 116 self._retreat(index) 117 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
FUNCTIONS =
{'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <function Redshift.Parser.<lambda>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <function _build_date_delta.<locals>._builder>, 'DATEDIFF': <function _build_date_delta.<locals>._builder>, 'DATE_DIFF': <function _build_date_delta.<locals>._builder>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function build_timestamp_trunc>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <function _build_generate_series>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMPFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <function build_formatted_time.<locals>._builder>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToNumber'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_json_extract_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'TO_HEX': <function build_hex>, 'JSON_EXTRACT_PATH': <function build_json_extract_path.<locals>._builder>, 'MAKE_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'MAKE_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'NOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'TO_TIMESTAMP': <function _build_to_timestamp>, 'UNNEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'DATEADD': <function _build_date_delta.<locals>._builder>, 'GETDATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'SPLIT_TO_ARRAY': <function Redshift.Parser.<lambda>>, 'STRTOL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>}
NO_PAREN_FUNCTION_PARSERS =
{'ANY': <function Parser.<lambda>>, 'CASE': <function Parser.<lambda>>, 'IF': <function Parser.<lambda>>, 'NEXT': <function Parser.<lambda>>, 'APPROXIMATE': <function Redshift.Parser.<lambda>>, 'SYSDATE': <function Redshift.Parser.<lambda>>}
SET_TRIE: Dict =
{'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
- sqlglot.parser.Parser
- Parser
- NO_PAREN_FUNCTIONS
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ID_VAR_TOKENS
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTER
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_UNION
- UNION_MODIFIERS
- NO_PAREN_IF_COMMANDS
- COLON_IS_JSON_EXTRACT
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
119 class Tokenizer(Postgres.Tokenizer): 120 BIT_STRINGS = [] 121 HEX_STRINGS = [] 122 STRING_ESCAPES = ["\\", "'"] 123 124 KEYWORDS = { 125 **Postgres.Tokenizer.KEYWORDS, 126 "HLLSKETCH": TokenType.HLLSKETCH, 127 "SUPER": TokenType.SUPER, 128 "TOP": TokenType.TOP, 129 "UNLOAD": TokenType.COMMAND, 130 "VARBYTE": TokenType.VARBINARY, 131 "MINUS": TokenType.EXCEPT, 132 } 133 KEYWORDS.pop("VALUES") 134 135 # Redshift allows # to appear as a table identifier prefix 136 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 137 SINGLE_TOKENS.pop("#")
KEYWORDS =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.DOUBLE: 'DOUBLE'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, '~': <TokenType.RLIKE: 'RLIKE'>, '@@': <TokenType.DAT: 'DAT'>, '@>': <TokenType.AT_GT: 'AT_GT'>, '<@': <TokenType.LT_AT: 'LT_AT'>, '|/': <TokenType.PIPE_SLASH: 'PIPE_SLASH'>, '||/': <TokenType.DPIPE_SLASH: 'DPIPE_SLASH'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BIGSERIAL': <TokenType.BIGSERIAL: 'BIGSERIAL'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CONSTRAINT TRIGGER': <TokenType.COMMAND: 'COMMAND'>, 'CSTRING': <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'DO': <TokenType.COMMAND: 'COMMAND'>, 'EXEC': <TokenType.COMMAND: 'COMMAND'>, 'HSTORE': <TokenType.HSTORE: 'HSTORE'>, 'MONEY': <TokenType.MONEY: 'MONEY'>, 'NAME': <TokenType.NAME: 'NAME'>, 'OID': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'ONLY': <TokenType.ONLY: 'ONLY'>, 'OPERATOR': <TokenType.OPERATOR: 'OPERATOR'>, 'REFRESH': <TokenType.COMMAND: 'COMMAND'>, 'REINDEX': <TokenType.COMMAND: 'COMMAND'>, 'RESET': <TokenType.COMMAND: 'COMMAND'>, 'REVOKE': <TokenType.COMMAND: 'COMMAND'>, 'SERIAL': <TokenType.SERIAL: 'SERIAL'>, 'SMALLSERIAL': <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, 'REGCLASS': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCOLLATION': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGCONFIG': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGDICTIONARY': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGNAMESPACE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPER': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGOPERATOR': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROC': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGPROCEDURE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGROLE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'REGTYPE': <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, 'HLLSKETCH': <TokenType.HLLSKETCH: 'HLLSKETCH'>, 'SUPER': <TokenType.SUPER: 'SUPER'>, 'TOP': <TokenType.TOP: 'TOP'>, 'UNLOAD': <TokenType.COMMAND: 'COMMAND'>, 'VARBYTE': <TokenType.VARBINARY: 'VARBINARY'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>}
SINGLE_TOKENS =
{'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>}
Inherited Members
139 class Generator(Postgres.Generator): 140 LOCKING_READS_SUPPORTED = False 141 QUERY_HINTS = False 142 VALUES_AS_TABLE = False 143 TZ_TO_WITH_TIME_ZONE = True 144 NVL2_SUPPORTED = True 145 LAST_DAY_SUPPORTS_DATE_PART = False 146 CAN_IMPLEMENT_ARRAY_ANY = False 147 MULTI_ARG_DISTINCT = True 148 COPY_PARAMS_ARE_WRAPPED = False 149 HEX_FUNC = "TO_HEX" 150 # Redshift doesn't have `WITH` as part of their with_properties so we remove it 151 WITH_PROPERTIES_PREFIX = " " 152 153 TYPE_MAPPING = { 154 **Postgres.Generator.TYPE_MAPPING, 155 exp.DataType.Type.BINARY: "VARBYTE", 156 exp.DataType.Type.INT: "INTEGER", 157 exp.DataType.Type.TIMETZ: "TIME", 158 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 159 exp.DataType.Type.VARBINARY: "VARBYTE", 160 exp.DataType.Type.ROWVERSION: "VARBYTE", 161 } 162 163 TRANSFORMS = { 164 **Postgres.Generator.TRANSFORMS, 165 exp.Concat: concat_to_dpipe_sql, 166 exp.ConcatWs: concat_ws_to_dpipe_sql, 167 exp.ApproxDistinct: lambda self, 168 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 169 exp.CurrentTimestamp: lambda self, e: ( 170 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 171 ), 172 exp.DateAdd: date_delta_sql("DATEADD"), 173 exp.DateDiff: date_delta_sql("DATEDIFF"), 174 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 175 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 176 exp.FromBase: rename_func("STRTOL"), 177 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 178 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 179 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 180 exp.GroupConcat: rename_func("LISTAGG"), 181 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 182 exp.ParseJSON: rename_func("JSON_PARSE"), 183 exp.Select: transforms.preprocess( 184 [ 185 transforms.eliminate_distinct_on, 186 transforms.eliminate_semi_and_anti_joins, 187 transforms.unqualify_unnest, 188 ] 189 ), 190 exp.SortKeyProperty: lambda self, 191 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 192 exp.StartsWith: lambda self, 193 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 194 exp.StringToArray: rename_func("SPLIT_TO_ARRAY"), 195 exp.TableSample: no_tablesample_sql, 196 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 197 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 198 exp.UnixToTime: lambda self, 199 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 200 } 201 202 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 203 TRANSFORMS.pop(exp.Pivot) 204 205 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 206 TRANSFORMS.pop(exp.Pow) 207 208 # Redshift supports ANY_VALUE(..) 209 TRANSFORMS.pop(exp.AnyValue) 210 211 # Redshift supports LAST_DAY(..) 212 TRANSFORMS.pop(exp.LastDay) 213 214 RESERVED_KEYWORDS = { 215 "aes128", 216 "aes256", 217 "all", 218 "allowoverwrite", 219 "analyse", 220 "analyze", 221 "and", 222 "any", 223 "array", 224 "as", 225 "asc", 226 "authorization", 227 "az64", 228 "backup", 229 "between", 230 "binary", 231 "blanksasnull", 232 "both", 233 "bytedict", 234 "bzip2", 235 "case", 236 "cast", 237 "check", 238 "collate", 239 "column", 240 "constraint", 241 "create", 242 "credentials", 243 "cross", 244 "current_date", 245 "current_time", 246 "current_timestamp", 247 "current_user", 248 "current_user_id", 249 "default", 250 "deferrable", 251 "deflate", 252 "defrag", 253 "delta", 254 "delta32k", 255 "desc", 256 "disable", 257 "distinct", 258 "do", 259 "else", 260 "emptyasnull", 261 "enable", 262 "encode", 263 "encrypt ", 264 "encryption", 265 "end", 266 "except", 267 "explicit", 268 "false", 269 "for", 270 "foreign", 271 "freeze", 272 "from", 273 "full", 274 "globaldict256", 275 "globaldict64k", 276 "grant", 277 "group", 278 "gzip", 279 "having", 280 "identity", 281 "ignore", 282 "ilike", 283 "in", 284 "initially", 285 "inner", 286 "intersect", 287 "interval", 288 "into", 289 "is", 290 "isnull", 291 "join", 292 "leading", 293 "left", 294 "like", 295 "limit", 296 "localtime", 297 "localtimestamp", 298 "lun", 299 "luns", 300 "lzo", 301 "lzop", 302 "minus", 303 "mostly16", 304 "mostly32", 305 "mostly8", 306 "natural", 307 "new", 308 "not", 309 "notnull", 310 "null", 311 "nulls", 312 "off", 313 "offline", 314 "offset", 315 "oid", 316 "old", 317 "on", 318 "only", 319 "open", 320 "or", 321 "order", 322 "outer", 323 "overlaps", 324 "parallel", 325 "partition", 326 "percent", 327 "permissions", 328 "pivot", 329 "placing", 330 "primary", 331 "raw", 332 "readratio", 333 "recover", 334 "references", 335 "rejectlog", 336 "resort", 337 "respect", 338 "restore", 339 "right", 340 "select", 341 "session_user", 342 "similar", 343 "snapshot", 344 "some", 345 "sysdate", 346 "system", 347 "table", 348 "tag", 349 "tdes", 350 "text255", 351 "text32k", 352 "then", 353 "timestamp", 354 "to", 355 "top", 356 "trailing", 357 "true", 358 "truncatecolumns", 359 "type", 360 "union", 361 "unique", 362 "unnest", 363 "unpivot", 364 "user", 365 "using", 366 "verbose", 367 "wallet", 368 "when", 369 "where", 370 "with", 371 "without", 372 } 373 374 def unnest_sql(self, expression: exp.Unnest) -> str: 375 args = expression.expressions 376 num_args = len(args) 377 378 if num_args > 1: 379 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 380 return "" 381 382 arg = self.sql(seq_get(args, 0)) 383 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 384 return f"{arg} AS {alias}" if alias else arg 385 386 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 387 if expression.is_type(exp.DataType.Type.JSON): 388 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 389 return self.sql(expression, "this") 390 391 return super().cast_sql(expression, safe_prefix=safe_prefix) 392 393 def datatype_sql(self, expression: exp.DataType) -> str: 394 """ 395 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 396 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 397 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 398 `TEXT` to `VARCHAR`. 399 """ 400 if expression.is_type("text"): 401 expression.set("this", exp.DataType.Type.VARCHAR) 402 precision = expression.args.get("expressions") 403 404 if not precision: 405 expression.append("expressions", exp.var("MAX")) 406 407 return super().datatype_sql(expression) 408 409 def alterset_sql(self, expression: exp.AlterSet) -> str: 410 exprs = self.expressions(expression, flat=True) 411 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 412 location = self.sql(expression, "location") 413 location = f" LOCATION {location}" if location else "" 414 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 415 file_format = f" FILE FORMAT {file_format}" if file_format else "" 416 417 return f"SET{exprs}{location}{file_format}"
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
TYPE_MAPPING =
{<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBYTE', <Type.TINYINT: 'TINYINT'>: 'SMALLINT', <Type.FLOAT: 'FLOAT'>: 'REAL', <Type.DOUBLE: 'DOUBLE'>: 'DOUBLE PRECISION', <Type.BINARY: 'BINARY'>: 'VARBYTE', <Type.VARBINARY: 'VARBINARY'>: 'VARBYTE', <Type.DATETIME: 'DATETIME'>: 'TIMESTAMP', <Type.INT: 'INT'>: 'INTEGER', <Type.TIMETZ: 'TIMETZ'>: 'TIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP'}
TRANSFORMS =
{<class 'sqlglot.expressions.JSONPathKey'>: <function json_path_key_only_name>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function json_extract_segments.<locals>._json_extract_segments>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function json_extract_segments.<locals>._json_extract_segments>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Array'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayFilter'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.ArraySize'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ColumnDef'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CurrentDate'>: <function no_paren_current_date_sql>, <class 'sqlglot.expressions.CurrentTimestamp'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.CurrentUser'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DataType'>: <function _datatype_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONBExtract'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.JSONBExtractScalar'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.JSONBContains'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ParseJSON'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MapFromEntries'>: <function no_map_from_entries_sql>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.Merge'>: <function merge_without_target_sql>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpILike'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrPosition'>: <function str_position_sql>, <class 'sqlglot.expressions.StrToDate'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.StructExtract'>: <function struct_extract_sql>, <class 'sqlglot.expressions.Substring'>: <function _substring_sql>, <class 'sqlglot.expressions.TimeFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function trim_sql>, <class 'sqlglot.expressions.TryCast'>: <function no_trycast_sql>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.UnixToTime'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Postgres.Generator.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Variance'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function bool_xor_sql>, <class 'sqlglot.expressions.Concat'>: <function concat_to_dpipe_sql>, <class 'sqlglot.expressions.ConcatWs'>: <function concat_ws_to_dpipe_sql>, <class 'sqlglot.expressions.ApproxDistinct'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.DistKeyProperty'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.DistStyleProperty'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.FromBase'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GeneratedAsIdentityColumnConstraint'>: <function generatedasidentitycolumnconstraint_sql>, <class 'sqlglot.expressions.Hex'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.SortKeyProperty'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function Redshift.Generator.<lambda>>, <class 'sqlglot.expressions.StringToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TableSample'>: <function no_tablesample_sql>}
RESERVED_KEYWORDS =
{'end', 'placing', 'inner', 'parallel', 'some', 'identity', 'delta', 'truncatecolumns', 'percent', 'analyze', 'order', 'true', 'column', 'without', 'as', 'create', 'then', 'permissions', 'union', 'where', 'any', 'check', 'on', 'case', 'when', 'from', 'between', 'is', 'join', 'delta32k', 'both', 'intersect', 'offline', 'enable', 'initially', 'binary', 'session_user', 'table', 'into', 'tag', 'explicit', 'sysdate', 'authorization', 'desc', 'respect', 'unpivot', 'mostly16', 'in', 'constraint', 'current_date', 'default', 'globaldict64k', 'mostly8', 'references', 'rejectlog', 'aes128', 'oid', 'recover', 'using', 'text32k', 'backup', 'deflate', 'text255', 'with', 'emptyasnull', 'lzop', 'current_user', 'foreign', 'limit', 'only', 'full', 'off', 'encrypt ', 'new', 'and', 'deferrable', 'pivot', 'restore', 'gzip', 'else', 'encode', 'luns', 'user', 'verbose', 'having', 'collate', 'minus', 'unique', 'credentials', 'defrag', 'cross', 'system', 'allowoverwrite', 'select', 'az64', 'top', 'type', 'open', 'notnull', 'aes256', 'outer', 'overlaps', 'bytedict', 'grant', 'ilike', 'nulls', 'asc', 'to', 'current_timestamp', 'current_user_id', 'group', 'leading', 'like', 'raw', 'disable', 'globaldict256', 'natural', 'resort', 'old', 'all', 'offset', 'do', 'similar', 'current_time', 'wallet', 'cast', 'lun', 'null', 'tdes', 'encryption', 'not', 'or', 'timestamp', 'localtime', 'except', 'trailing', 'mostly32', 'bzip2', 'primary', 'freeze', 'blanksasnull', 'interval', 'lzo', 'distinct', 'partition', 'for', 'ignore', 'unnest', 'analyse', 'array', 'readratio', 'snapshot', 'localtimestamp', 'false', 'isnull', 'left', 'right'}
374 def unnest_sql(self, expression: exp.Unnest) -> str: 375 args = expression.expressions 376 num_args = len(args) 377 378 if num_args > 1: 379 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 380 return "" 381 382 arg = self.sql(seq_get(args, 0)) 383 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 384 return f"{arg} AS {alias}" if alias else arg
def
cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
386 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 387 if expression.is_type(exp.DataType.Type.JSON): 388 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 389 return self.sql(expression, "this") 390 391 return super().cast_sql(expression, safe_prefix=safe_prefix)
393 def datatype_sql(self, expression: exp.DataType) -> str: 394 """ 395 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 396 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 397 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 398 `TEXT` to `VARCHAR`. 399 """ 400 if expression.is_type("text"): 401 expression.set("this", exp.DataType.Type.VARCHAR) 402 precision = expression.args.get("expressions") 403 404 if not precision: 405 expression.append("expressions", exp.var("MAX")) 406 407 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
409 def alterset_sql(self, expression: exp.AlterSet) -> str: 410 exprs = self.expressions(expression, flat=True) 411 exprs = f" TABLE PROPERTIES ({exprs})" if exprs else "" 412 location = self.sql(expression, "location") 413 location = f" LOCATION {location}" if location else "" 414 file_format = self.expressions(expression, key="file_format", flat=True, sep=" ") 415 file_format = f" FILE FORMAT {file_format}" if file_format else "" 416 417 return f"SET{exprs}{location}{file_format}"
AFTER_HAVING_MODIFIER_TRANSFORMS =
{'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXPLICIT_UNION
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- OUTER_UNION_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- STAR_EXCEPT
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- NAMED_PLACEHOLDER_TOKEN
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- transformcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- with_properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- set_operations
- union_sql
- union_op
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- currenttimestamp_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterdiststyle_sql
- altersortkey_sql
- renametable_sql
- renamecolumn_sql
- altertable_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- trycast_sql
- try_sql
- log_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- operator_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- generateseries_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- datadeletionproperty_sql
- maskingpolicycolumnconstraint_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- COPY_HAS_INTO_KEYWORD
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- bracket_sql
- matchagainst_sql