sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15) 16from sqlglot.dialects.postgres import Postgres 17from sqlglot.helper import seq_get 18from sqlglot.tokens import TokenType 19 20if t.TYPE_CHECKING: 21 from sqlglot._typing import E 22 23 24def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 25 def _builder(args: t.List) -> E: 26 expr = expr_type(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) 27 if expr_type is exp.TsOrDsAdd: 28 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 29 30 return expr 31 32 return _builder 33 34 35class Redshift(Postgres): 36 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 37 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 38 39 SUPPORTS_USER_DEFINED_TYPES = False 40 INDEX_OFFSET = 0 41 COPY_PARAMS_ARE_CSV = False 42 HEX_LOWERCASE = True 43 44 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 45 TIME_MAPPING = { 46 **Postgres.TIME_MAPPING, 47 "MON": "%b", 48 "HH": "%H", 49 } 50 51 class Parser(Postgres.Parser): 52 FUNCTIONS = { 53 **Postgres.Parser.FUNCTIONS, 54 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 55 this=seq_get(args, 0), 56 expression=seq_get(args, 1), 57 unit=exp.var("month"), 58 return_type=exp.DataType.build("TIMESTAMP"), 59 ), 60 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 63 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 64 "GETDATE": exp.CurrentTimestamp.from_arg_list, 65 "LISTAGG": exp.GroupConcat.from_arg_list, 66 "STRTOL": exp.FromBase.from_arg_list, 67 } 68 69 NO_PAREN_FUNCTION_PARSERS = { 70 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 71 "APPROXIMATE": lambda self: self._parse_approximate_count(), 72 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 73 } 74 75 SUPPORTS_IMPLICIT_UNNEST = True 76 77 def _parse_table( 78 self, 79 schema: bool = False, 80 joins: bool = False, 81 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 82 parse_bracket: bool = False, 83 is_db_reference: bool = False, 84 parse_partition: bool = False, 85 ) -> t.Optional[exp.Expression]: 86 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 87 unpivot = self._match(TokenType.UNPIVOT) 88 table = super()._parse_table( 89 schema=schema, 90 joins=joins, 91 alias_tokens=alias_tokens, 92 parse_bracket=parse_bracket, 93 is_db_reference=is_db_reference, 94 ) 95 96 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 97 98 def _parse_convert( 99 self, strict: bool, safe: t.Optional[bool] = None 100 ) -> t.Optional[exp.Expression]: 101 to = self._parse_types() 102 self._match(TokenType.COMMA) 103 this = self._parse_bitwise() 104 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 105 106 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 107 index = self._index - 1 108 func = self._parse_function() 109 110 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 111 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 112 self._retreat(index) 113 return None 114 115 class Tokenizer(Postgres.Tokenizer): 116 BIT_STRINGS = [] 117 HEX_STRINGS = [] 118 STRING_ESCAPES = ["\\", "'"] 119 120 KEYWORDS = { 121 **Postgres.Tokenizer.KEYWORDS, 122 "HLLSKETCH": TokenType.HLLSKETCH, 123 "SUPER": TokenType.SUPER, 124 "TOP": TokenType.TOP, 125 "UNLOAD": TokenType.COMMAND, 126 "VARBYTE": TokenType.VARBINARY, 127 } 128 KEYWORDS.pop("VALUES") 129 130 # Redshift allows # to appear as a table identifier prefix 131 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 132 SINGLE_TOKENS.pop("#") 133 134 class Generator(Postgres.Generator): 135 LOCKING_READS_SUPPORTED = False 136 QUERY_HINTS = False 137 VALUES_AS_TABLE = False 138 TZ_TO_WITH_TIME_ZONE = True 139 NVL2_SUPPORTED = True 140 LAST_DAY_SUPPORTS_DATE_PART = False 141 CAN_IMPLEMENT_ARRAY_ANY = False 142 MULTI_ARG_DISTINCT = True 143 COPY_PARAMS_ARE_WRAPPED = False 144 HEX_FUNC = "TO_HEX" 145 146 TYPE_MAPPING = { 147 **Postgres.Generator.TYPE_MAPPING, 148 exp.DataType.Type.BINARY: "VARBYTE", 149 exp.DataType.Type.INT: "INTEGER", 150 exp.DataType.Type.TIMETZ: "TIME", 151 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 152 exp.DataType.Type.VARBINARY: "VARBYTE", 153 exp.DataType.Type.ROWVERSION: "VARBYTE", 154 } 155 156 TRANSFORMS = { 157 **Postgres.Generator.TRANSFORMS, 158 exp.Concat: concat_to_dpipe_sql, 159 exp.ConcatWs: concat_ws_to_dpipe_sql, 160 exp.ApproxDistinct: lambda self, 161 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 162 exp.CurrentTimestamp: lambda self, e: ( 163 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 164 ), 165 exp.DateAdd: date_delta_sql("DATEADD"), 166 exp.DateDiff: date_delta_sql("DATEDIFF"), 167 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 168 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 169 exp.FromBase: rename_func("STRTOL"), 170 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 171 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 172 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 173 exp.GroupConcat: rename_func("LISTAGG"), 174 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 175 exp.ParseJSON: rename_func("JSON_PARSE"), 176 exp.Select: transforms.preprocess( 177 [ 178 transforms.eliminate_distinct_on, 179 transforms.eliminate_semi_and_anti_joins, 180 transforms.unqualify_unnest, 181 ] 182 ), 183 exp.SortKeyProperty: lambda self, 184 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 185 exp.StartsWith: lambda self, 186 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 187 exp.TableSample: no_tablesample_sql, 188 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 189 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 190 exp.UnixToTime: lambda self, 191 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 192 } 193 194 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 195 TRANSFORMS.pop(exp.Pivot) 196 197 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 198 TRANSFORMS.pop(exp.Pow) 199 200 # Redshift supports ANY_VALUE(..) 201 TRANSFORMS.pop(exp.AnyValue) 202 203 # Redshift supports LAST_DAY(..) 204 TRANSFORMS.pop(exp.LastDay) 205 206 RESERVED_KEYWORDS = { 207 "aes128", 208 "aes256", 209 "all", 210 "allowoverwrite", 211 "analyse", 212 "analyze", 213 "and", 214 "any", 215 "array", 216 "as", 217 "asc", 218 "authorization", 219 "az64", 220 "backup", 221 "between", 222 "binary", 223 "blanksasnull", 224 "both", 225 "bytedict", 226 "bzip2", 227 "case", 228 "cast", 229 "check", 230 "collate", 231 "column", 232 "constraint", 233 "create", 234 "credentials", 235 "cross", 236 "current_date", 237 "current_time", 238 "current_timestamp", 239 "current_user", 240 "current_user_id", 241 "default", 242 "deferrable", 243 "deflate", 244 "defrag", 245 "delta", 246 "delta32k", 247 "desc", 248 "disable", 249 "distinct", 250 "do", 251 "else", 252 "emptyasnull", 253 "enable", 254 "encode", 255 "encrypt ", 256 "encryption", 257 "end", 258 "except", 259 "explicit", 260 "false", 261 "for", 262 "foreign", 263 "freeze", 264 "from", 265 "full", 266 "globaldict256", 267 "globaldict64k", 268 "grant", 269 "group", 270 "gzip", 271 "having", 272 "identity", 273 "ignore", 274 "ilike", 275 "in", 276 "initially", 277 "inner", 278 "intersect", 279 "interval", 280 "into", 281 "is", 282 "isnull", 283 "join", 284 "leading", 285 "left", 286 "like", 287 "limit", 288 "localtime", 289 "localtimestamp", 290 "lun", 291 "luns", 292 "lzo", 293 "lzop", 294 "minus", 295 "mostly16", 296 "mostly32", 297 "mostly8", 298 "natural", 299 "new", 300 "not", 301 "notnull", 302 "null", 303 "nulls", 304 "off", 305 "offline", 306 "offset", 307 "oid", 308 "old", 309 "on", 310 "only", 311 "open", 312 "or", 313 "order", 314 "outer", 315 "overlaps", 316 "parallel", 317 "partition", 318 "percent", 319 "permissions", 320 "pivot", 321 "placing", 322 "primary", 323 "raw", 324 "readratio", 325 "recover", 326 "references", 327 "rejectlog", 328 "resort", 329 "respect", 330 "restore", 331 "right", 332 "select", 333 "session_user", 334 "similar", 335 "snapshot", 336 "some", 337 "sysdate", 338 "system", 339 "table", 340 "tag", 341 "tdes", 342 "text255", 343 "text32k", 344 "then", 345 "timestamp", 346 "to", 347 "top", 348 "trailing", 349 "true", 350 "truncatecolumns", 351 "type", 352 "union", 353 "unique", 354 "unnest", 355 "unpivot", 356 "user", 357 "using", 358 "verbose", 359 "wallet", 360 "when", 361 "where", 362 "with", 363 "without", 364 } 365 366 def unnest_sql(self, expression: exp.Unnest) -> str: 367 args = expression.expressions 368 num_args = len(args) 369 370 if num_args > 1: 371 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 372 return "" 373 374 arg = self.sql(seq_get(args, 0)) 375 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 376 return f"{arg} AS {alias}" if alias else arg 377 378 def with_properties(self, properties: exp.Properties) -> str: 379 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 380 return self.properties(properties, prefix=" ", suffix="") 381 382 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 383 if expression.is_type(exp.DataType.Type.JSON): 384 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 385 return self.sql(expression, "this") 386 387 return super().cast_sql(expression, safe_prefix=safe_prefix) 388 389 def datatype_sql(self, expression: exp.DataType) -> str: 390 """ 391 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 392 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 393 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 394 `TEXT` to `VARCHAR`. 395 """ 396 if expression.is_type("text"): 397 expression.set("this", exp.DataType.Type.VARCHAR) 398 precision = expression.args.get("expressions") 399 400 if not precision: 401 expression.append("expressions", exp.var("MAX")) 402 403 return super().datatype_sql(expression)
36class Redshift(Postgres): 37 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 38 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 39 40 SUPPORTS_USER_DEFINED_TYPES = False 41 INDEX_OFFSET = 0 42 COPY_PARAMS_ARE_CSV = False 43 HEX_LOWERCASE = True 44 45 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 46 TIME_MAPPING = { 47 **Postgres.TIME_MAPPING, 48 "MON": "%b", 49 "HH": "%H", 50 } 51 52 class Parser(Postgres.Parser): 53 FUNCTIONS = { 54 **Postgres.Parser.FUNCTIONS, 55 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 56 this=seq_get(args, 0), 57 expression=seq_get(args, 1), 58 unit=exp.var("month"), 59 return_type=exp.DataType.build("TIMESTAMP"), 60 ), 61 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 63 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 64 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 65 "GETDATE": exp.CurrentTimestamp.from_arg_list, 66 "LISTAGG": exp.GroupConcat.from_arg_list, 67 "STRTOL": exp.FromBase.from_arg_list, 68 } 69 70 NO_PAREN_FUNCTION_PARSERS = { 71 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 72 "APPROXIMATE": lambda self: self._parse_approximate_count(), 73 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 74 } 75 76 SUPPORTS_IMPLICIT_UNNEST = True 77 78 def _parse_table( 79 self, 80 schema: bool = False, 81 joins: bool = False, 82 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 83 parse_bracket: bool = False, 84 is_db_reference: bool = False, 85 parse_partition: bool = False, 86 ) -> t.Optional[exp.Expression]: 87 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 88 unpivot = self._match(TokenType.UNPIVOT) 89 table = super()._parse_table( 90 schema=schema, 91 joins=joins, 92 alias_tokens=alias_tokens, 93 parse_bracket=parse_bracket, 94 is_db_reference=is_db_reference, 95 ) 96 97 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 98 99 def _parse_convert( 100 self, strict: bool, safe: t.Optional[bool] = None 101 ) -> t.Optional[exp.Expression]: 102 to = self._parse_types() 103 self._match(TokenType.COMMA) 104 this = self._parse_bitwise() 105 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 106 107 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 108 index = self._index - 1 109 func = self._parse_function() 110 111 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 112 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 113 self._retreat(index) 114 return None 115 116 class Tokenizer(Postgres.Tokenizer): 117 BIT_STRINGS = [] 118 HEX_STRINGS = [] 119 STRING_ESCAPES = ["\\", "'"] 120 121 KEYWORDS = { 122 **Postgres.Tokenizer.KEYWORDS, 123 "HLLSKETCH": TokenType.HLLSKETCH, 124 "SUPER": TokenType.SUPER, 125 "TOP": TokenType.TOP, 126 "UNLOAD": TokenType.COMMAND, 127 "VARBYTE": TokenType.VARBINARY, 128 } 129 KEYWORDS.pop("VALUES") 130 131 # Redshift allows # to appear as a table identifier prefix 132 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 133 SINGLE_TOKENS.pop("#") 134 135 class Generator(Postgres.Generator): 136 LOCKING_READS_SUPPORTED = False 137 QUERY_HINTS = False 138 VALUES_AS_TABLE = False 139 TZ_TO_WITH_TIME_ZONE = True 140 NVL2_SUPPORTED = True 141 LAST_DAY_SUPPORTS_DATE_PART = False 142 CAN_IMPLEMENT_ARRAY_ANY = False 143 MULTI_ARG_DISTINCT = True 144 COPY_PARAMS_ARE_WRAPPED = False 145 HEX_FUNC = "TO_HEX" 146 147 TYPE_MAPPING = { 148 **Postgres.Generator.TYPE_MAPPING, 149 exp.DataType.Type.BINARY: "VARBYTE", 150 exp.DataType.Type.INT: "INTEGER", 151 exp.DataType.Type.TIMETZ: "TIME", 152 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 153 exp.DataType.Type.VARBINARY: "VARBYTE", 154 exp.DataType.Type.ROWVERSION: "VARBYTE", 155 } 156 157 TRANSFORMS = { 158 **Postgres.Generator.TRANSFORMS, 159 exp.Concat: concat_to_dpipe_sql, 160 exp.ConcatWs: concat_ws_to_dpipe_sql, 161 exp.ApproxDistinct: lambda self, 162 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 163 exp.CurrentTimestamp: lambda self, e: ( 164 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 165 ), 166 exp.DateAdd: date_delta_sql("DATEADD"), 167 exp.DateDiff: date_delta_sql("DATEDIFF"), 168 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 169 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 170 exp.FromBase: rename_func("STRTOL"), 171 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 172 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 173 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 174 exp.GroupConcat: rename_func("LISTAGG"), 175 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 176 exp.ParseJSON: rename_func("JSON_PARSE"), 177 exp.Select: transforms.preprocess( 178 [ 179 transforms.eliminate_distinct_on, 180 transforms.eliminate_semi_and_anti_joins, 181 transforms.unqualify_unnest, 182 ] 183 ), 184 exp.SortKeyProperty: lambda self, 185 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 186 exp.StartsWith: lambda self, 187 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 188 exp.TableSample: no_tablesample_sql, 189 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 190 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 191 exp.UnixToTime: lambda self, 192 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 193 } 194 195 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 196 TRANSFORMS.pop(exp.Pivot) 197 198 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 199 TRANSFORMS.pop(exp.Pow) 200 201 # Redshift supports ANY_VALUE(..) 202 TRANSFORMS.pop(exp.AnyValue) 203 204 # Redshift supports LAST_DAY(..) 205 TRANSFORMS.pop(exp.LastDay) 206 207 RESERVED_KEYWORDS = { 208 "aes128", 209 "aes256", 210 "all", 211 "allowoverwrite", 212 "analyse", 213 "analyze", 214 "and", 215 "any", 216 "array", 217 "as", 218 "asc", 219 "authorization", 220 "az64", 221 "backup", 222 "between", 223 "binary", 224 "blanksasnull", 225 "both", 226 "bytedict", 227 "bzip2", 228 "case", 229 "cast", 230 "check", 231 "collate", 232 "column", 233 "constraint", 234 "create", 235 "credentials", 236 "cross", 237 "current_date", 238 "current_time", 239 "current_timestamp", 240 "current_user", 241 "current_user_id", 242 "default", 243 "deferrable", 244 "deflate", 245 "defrag", 246 "delta", 247 "delta32k", 248 "desc", 249 "disable", 250 "distinct", 251 "do", 252 "else", 253 "emptyasnull", 254 "enable", 255 "encode", 256 "encrypt ", 257 "encryption", 258 "end", 259 "except", 260 "explicit", 261 "false", 262 "for", 263 "foreign", 264 "freeze", 265 "from", 266 "full", 267 "globaldict256", 268 "globaldict64k", 269 "grant", 270 "group", 271 "gzip", 272 "having", 273 "identity", 274 "ignore", 275 "ilike", 276 "in", 277 "initially", 278 "inner", 279 "intersect", 280 "interval", 281 "into", 282 "is", 283 "isnull", 284 "join", 285 "leading", 286 "left", 287 "like", 288 "limit", 289 "localtime", 290 "localtimestamp", 291 "lun", 292 "luns", 293 "lzo", 294 "lzop", 295 "minus", 296 "mostly16", 297 "mostly32", 298 "mostly8", 299 "natural", 300 "new", 301 "not", 302 "notnull", 303 "null", 304 "nulls", 305 "off", 306 "offline", 307 "offset", 308 "oid", 309 "old", 310 "on", 311 "only", 312 "open", 313 "or", 314 "order", 315 "outer", 316 "overlaps", 317 "parallel", 318 "partition", 319 "percent", 320 "permissions", 321 "pivot", 322 "placing", 323 "primary", 324 "raw", 325 "readratio", 326 "recover", 327 "references", 328 "rejectlog", 329 "resort", 330 "respect", 331 "restore", 332 "right", 333 "select", 334 "session_user", 335 "similar", 336 "snapshot", 337 "some", 338 "sysdate", 339 "system", 340 "table", 341 "tag", 342 "tdes", 343 "text255", 344 "text32k", 345 "then", 346 "timestamp", 347 "to", 348 "top", 349 "trailing", 350 "true", 351 "truncatecolumns", 352 "type", 353 "union", 354 "unique", 355 "unnest", 356 "unpivot", 357 "user", 358 "using", 359 "verbose", 360 "wallet", 361 "when", 362 "where", 363 "with", 364 "without", 365 } 366 367 def unnest_sql(self, expression: exp.Unnest) -> str: 368 args = expression.expressions 369 num_args = len(args) 370 371 if num_args > 1: 372 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 373 return "" 374 375 arg = self.sql(seq_get(args, 0)) 376 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 377 return f"{arg} AS {alias}" if alias else arg 378 379 def with_properties(self, properties: exp.Properties) -> str: 380 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 381 return self.properties(properties, prefix=" ", suffix="") 382 383 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 384 if expression.is_type(exp.DataType.Type.JSON): 385 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 386 return self.sql(expression, "this") 387 388 return super().cast_sql(expression, safe_prefix=safe_prefix) 389 390 def datatype_sql(self, expression: exp.DataType) -> str: 391 """ 392 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 393 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 394 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 395 `TEXT` to `VARCHAR`. 396 """ 397 if expression.is_type("text"): 398 expression.set("this", exp.DataType.Type.VARCHAR) 399 precision = expression.args.get("expressions") 400 401 if not precision: 402 expression.append("expressions", exp.var("MAX")) 403 404 return super().datatype_sql(expression)
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime
formats.
Mapping of an escaped sequence (\n
) to its unescaped version (
).
Inherited Members
- sqlglot.dialects.dialect.Dialect
- Dialect
- WEEK_OFFSET
- UNNEST_COLUMN_ONLY
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- SUPPORTS_SEMI_ANTI_JOIN
- NORMALIZE_FUNCTIONS
- LOG_BASE_FIRST
- SAFE_DIVISION
- DATE_FORMAT
- DATEINT_FORMAT
- FORMAT_MAPPING
- PSEUDOCOLUMNS
- PREFER_CTE_ALIAS_COLUMN
- get_or_raise
- format_time
- normalize_identifier
- case_sensitive
- can_identify
- quote_identifier
- to_json_path
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
52 class Parser(Postgres.Parser): 53 FUNCTIONS = { 54 **Postgres.Parser.FUNCTIONS, 55 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 56 this=seq_get(args, 0), 57 expression=seq_get(args, 1), 58 unit=exp.var("month"), 59 return_type=exp.DataType.build("TIMESTAMP"), 60 ), 61 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 63 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 64 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 65 "GETDATE": exp.CurrentTimestamp.from_arg_list, 66 "LISTAGG": exp.GroupConcat.from_arg_list, 67 "STRTOL": exp.FromBase.from_arg_list, 68 } 69 70 NO_PAREN_FUNCTION_PARSERS = { 71 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 72 "APPROXIMATE": lambda self: self._parse_approximate_count(), 73 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 74 } 75 76 SUPPORTS_IMPLICIT_UNNEST = True 77 78 def _parse_table( 79 self, 80 schema: bool = False, 81 joins: bool = False, 82 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 83 parse_bracket: bool = False, 84 is_db_reference: bool = False, 85 parse_partition: bool = False, 86 ) -> t.Optional[exp.Expression]: 87 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 88 unpivot = self._match(TokenType.UNPIVOT) 89 table = super()._parse_table( 90 schema=schema, 91 joins=joins, 92 alias_tokens=alias_tokens, 93 parse_bracket=parse_bracket, 94 is_db_reference=is_db_reference, 95 ) 96 97 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 98 99 def _parse_convert( 100 self, strict: bool, safe: t.Optional[bool] = None 101 ) -> t.Optional[exp.Expression]: 102 to = self._parse_types() 103 self._match(TokenType.COMMA) 104 this = self._parse_bitwise() 105 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 106 107 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 108 index = self._index - 1 109 func = self._parse_function() 110 111 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 112 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 113 self._retreat(index) 114 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- NO_PAREN_FUNCTIONS
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ID_VAR_TOKENS
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- TYPE_CONVERTER
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
- TABLESAMPLE_CSV
- DEFAULT_SAMPLING_METHOD
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_UNION
- UNION_MODIFIERS
- NO_PAREN_IF_COMMANDS
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
116 class Tokenizer(Postgres.Tokenizer): 117 BIT_STRINGS = [] 118 HEX_STRINGS = [] 119 STRING_ESCAPES = ["\\", "'"] 120 121 KEYWORDS = { 122 **Postgres.Tokenizer.KEYWORDS, 123 "HLLSKETCH": TokenType.HLLSKETCH, 124 "SUPER": TokenType.SUPER, 125 "TOP": TokenType.TOP, 126 "UNLOAD": TokenType.COMMAND, 127 "VARBYTE": TokenType.VARBINARY, 128 } 129 KEYWORDS.pop("VALUES") 130 131 # Redshift allows # to appear as a table identifier prefix 132 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 133 SINGLE_TOKENS.pop("#")
Inherited Members
135 class Generator(Postgres.Generator): 136 LOCKING_READS_SUPPORTED = False 137 QUERY_HINTS = False 138 VALUES_AS_TABLE = False 139 TZ_TO_WITH_TIME_ZONE = True 140 NVL2_SUPPORTED = True 141 LAST_DAY_SUPPORTS_DATE_PART = False 142 CAN_IMPLEMENT_ARRAY_ANY = False 143 MULTI_ARG_DISTINCT = True 144 COPY_PARAMS_ARE_WRAPPED = False 145 HEX_FUNC = "TO_HEX" 146 147 TYPE_MAPPING = { 148 **Postgres.Generator.TYPE_MAPPING, 149 exp.DataType.Type.BINARY: "VARBYTE", 150 exp.DataType.Type.INT: "INTEGER", 151 exp.DataType.Type.TIMETZ: "TIME", 152 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 153 exp.DataType.Type.VARBINARY: "VARBYTE", 154 exp.DataType.Type.ROWVERSION: "VARBYTE", 155 } 156 157 TRANSFORMS = { 158 **Postgres.Generator.TRANSFORMS, 159 exp.Concat: concat_to_dpipe_sql, 160 exp.ConcatWs: concat_ws_to_dpipe_sql, 161 exp.ApproxDistinct: lambda self, 162 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 163 exp.CurrentTimestamp: lambda self, e: ( 164 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 165 ), 166 exp.DateAdd: date_delta_sql("DATEADD"), 167 exp.DateDiff: date_delta_sql("DATEDIFF"), 168 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 169 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 170 exp.FromBase: rename_func("STRTOL"), 171 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 172 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 173 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 174 exp.GroupConcat: rename_func("LISTAGG"), 175 exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))), 176 exp.ParseJSON: rename_func("JSON_PARSE"), 177 exp.Select: transforms.preprocess( 178 [ 179 transforms.eliminate_distinct_on, 180 transforms.eliminate_semi_and_anti_joins, 181 transforms.unqualify_unnest, 182 ] 183 ), 184 exp.SortKeyProperty: lambda self, 185 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 186 exp.StartsWith: lambda self, 187 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 188 exp.TableSample: no_tablesample_sql, 189 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 190 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 191 exp.UnixToTime: lambda self, 192 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 193 } 194 195 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 196 TRANSFORMS.pop(exp.Pivot) 197 198 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 199 TRANSFORMS.pop(exp.Pow) 200 201 # Redshift supports ANY_VALUE(..) 202 TRANSFORMS.pop(exp.AnyValue) 203 204 # Redshift supports LAST_DAY(..) 205 TRANSFORMS.pop(exp.LastDay) 206 207 RESERVED_KEYWORDS = { 208 "aes128", 209 "aes256", 210 "all", 211 "allowoverwrite", 212 "analyse", 213 "analyze", 214 "and", 215 "any", 216 "array", 217 "as", 218 "asc", 219 "authorization", 220 "az64", 221 "backup", 222 "between", 223 "binary", 224 "blanksasnull", 225 "both", 226 "bytedict", 227 "bzip2", 228 "case", 229 "cast", 230 "check", 231 "collate", 232 "column", 233 "constraint", 234 "create", 235 "credentials", 236 "cross", 237 "current_date", 238 "current_time", 239 "current_timestamp", 240 "current_user", 241 "current_user_id", 242 "default", 243 "deferrable", 244 "deflate", 245 "defrag", 246 "delta", 247 "delta32k", 248 "desc", 249 "disable", 250 "distinct", 251 "do", 252 "else", 253 "emptyasnull", 254 "enable", 255 "encode", 256 "encrypt ", 257 "encryption", 258 "end", 259 "except", 260 "explicit", 261 "false", 262 "for", 263 "foreign", 264 "freeze", 265 "from", 266 "full", 267 "globaldict256", 268 "globaldict64k", 269 "grant", 270 "group", 271 "gzip", 272 "having", 273 "identity", 274 "ignore", 275 "ilike", 276 "in", 277 "initially", 278 "inner", 279 "intersect", 280 "interval", 281 "into", 282 "is", 283 "isnull", 284 "join", 285 "leading", 286 "left", 287 "like", 288 "limit", 289 "localtime", 290 "localtimestamp", 291 "lun", 292 "luns", 293 "lzo", 294 "lzop", 295 "minus", 296 "mostly16", 297 "mostly32", 298 "mostly8", 299 "natural", 300 "new", 301 "not", 302 "notnull", 303 "null", 304 "nulls", 305 "off", 306 "offline", 307 "offset", 308 "oid", 309 "old", 310 "on", 311 "only", 312 "open", 313 "or", 314 "order", 315 "outer", 316 "overlaps", 317 "parallel", 318 "partition", 319 "percent", 320 "permissions", 321 "pivot", 322 "placing", 323 "primary", 324 "raw", 325 "readratio", 326 "recover", 327 "references", 328 "rejectlog", 329 "resort", 330 "respect", 331 "restore", 332 "right", 333 "select", 334 "session_user", 335 "similar", 336 "snapshot", 337 "some", 338 "sysdate", 339 "system", 340 "table", 341 "tag", 342 "tdes", 343 "text255", 344 "text32k", 345 "then", 346 "timestamp", 347 "to", 348 "top", 349 "trailing", 350 "true", 351 "truncatecolumns", 352 "type", 353 "union", 354 "unique", 355 "unnest", 356 "unpivot", 357 "user", 358 "using", 359 "verbose", 360 "wallet", 361 "when", 362 "where", 363 "with", 364 "without", 365 } 366 367 def unnest_sql(self, expression: exp.Unnest) -> str: 368 args = expression.expressions 369 num_args = len(args) 370 371 if num_args > 1: 372 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 373 return "" 374 375 arg = self.sql(seq_get(args, 0)) 376 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 377 return f"{arg} AS {alias}" if alias else arg 378 379 def with_properties(self, properties: exp.Properties) -> str: 380 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 381 return self.properties(properties, prefix=" ", suffix="") 382 383 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 384 if expression.is_type(exp.DataType.Type.JSON): 385 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 386 return self.sql(expression, "this") 387 388 return super().cast_sql(expression, safe_prefix=safe_prefix) 389 390 def datatype_sql(self, expression: exp.DataType) -> str: 391 """ 392 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 393 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 394 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 395 `TEXT` to `VARCHAR`. 396 """ 397 if expression.is_type("text"): 398 expression.set("this", exp.DataType.Type.VARCHAR) 399 precision = expression.args.get("expressions") 400 401 if not precision: 402 expression.append("expressions", exp.var("MAX")) 403 404 return super().datatype_sql(expression)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
367 def unnest_sql(self, expression: exp.Unnest) -> str: 368 args = expression.expressions 369 num_args = len(args) 370 371 if num_args > 1: 372 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 373 return "" 374 375 arg = self.sql(seq_get(args, 0)) 376 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 377 return f"{arg} AS {alias}" if alias else arg
379 def with_properties(self, properties: exp.Properties) -> str: 380 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 381 return self.properties(properties, prefix=" ", suffix="")
Redshift doesn't have WITH
as part of their with_properties so we remove it
383 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 384 if expression.is_type(exp.DataType.Type.JSON): 385 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 386 return self.sql(expression, "this") 387 388 return super().cast_sql(expression, safe_prefix=safe_prefix)
390 def datatype_sql(self, expression: exp.DataType) -> str: 391 """ 392 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 393 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 394 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 395 `TEXT` to `VARCHAR`. 396 """ 397 if expression.is_type("text"): 398 expression.set("this", exp.DataType.Type.VARCHAR) 399 precision = expression.args.get("expressions") 400 401 if not precision: 402 expression.append("expressions", exp.var("MAX")) 403 404 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXPLICIT_UNION
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- OUTER_UNION_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- STAR_EXCEPT
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- NAMED_PLACEHOLDER_TOKEN
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- transformcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- hex_sql
- lowerhex_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- set_operations
- union_sql
- union_op
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- currenttimestamp_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterdiststyle_sql
- altersortkey_sql
- renametable_sql
- renamecolumn_sql
- altertable_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- trycast_sql
- try_sql
- log_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- operator_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- generateseries_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- COPY_HAS_INTO_KEYWORD
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- bracket_sql
- matchagainst_sql