sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.postgres import Postgres 7from sqlglot.helper import seq_get 8from sqlglot.tokens import TokenType 9 10 11def _json_sql(self: Postgres.Generator, expression: exp.JSONExtract | exp.JSONExtractScalar) -> str: 12 return f'{self.sql(expression, "this")}."{expression.expression.name}"' 13 14 15class Redshift(Postgres): 16 time_format = "'YYYY-MM-DD HH:MI:SS'" 17 time_mapping = { 18 **Postgres.time_mapping, 19 "MON": "%b", 20 "HH": "%H", 21 } 22 23 class Parser(Postgres.Parser): 24 FUNCTIONS = { 25 **Postgres.Parser.FUNCTIONS, 26 "DATEADD": lambda args: exp.DateAdd( 27 this=seq_get(args, 2), 28 expression=seq_get(args, 1), 29 unit=seq_get(args, 0), 30 ), 31 "DATEDIFF": lambda args: exp.DateDiff( 32 this=seq_get(args, 2), 33 expression=seq_get(args, 1), 34 unit=seq_get(args, 0), 35 ), 36 "NVL": exp.Coalesce.from_arg_list, 37 } 38 39 CONVERT_TYPE_FIRST = True 40 41 def _parse_types( 42 self, check_func: bool = False, schema: bool = False 43 ) -> t.Optional[exp.Expression]: 44 this = super()._parse_types(check_func=check_func, schema=schema) 45 46 if ( 47 isinstance(this, exp.DataType) 48 and this.is_type("varchar") 49 and this.expressions 50 and this.expressions[0].this == exp.column("MAX") 51 ): 52 this.set("expressions", [exp.Var(this="MAX")]) 53 54 return this 55 56 class Tokenizer(Postgres.Tokenizer): 57 BIT_STRINGS = [] 58 HEX_STRINGS = [] 59 STRING_ESCAPES = ["\\"] 60 61 KEYWORDS = { 62 **Postgres.Tokenizer.KEYWORDS, 63 "HLLSKETCH": TokenType.HLLSKETCH, 64 "SUPER": TokenType.SUPER, 65 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 66 "TIME": TokenType.TIMESTAMP, 67 "TIMETZ": TokenType.TIMESTAMPTZ, 68 "TOP": TokenType.TOP, 69 "UNLOAD": TokenType.COMMAND, 70 "VARBYTE": TokenType.VARBINARY, 71 } 72 73 # Redshift allows # to appear as a table identifier prefix 74 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 75 SINGLE_TOKENS.pop("#") 76 77 class Generator(Postgres.Generator): 78 LOCKING_READS_SUPPORTED = False 79 RENAME_TABLE_WITH_DB = False 80 81 TYPE_MAPPING = { 82 **Postgres.Generator.TYPE_MAPPING, 83 exp.DataType.Type.BINARY: "VARBYTE", 84 exp.DataType.Type.VARBINARY: "VARBYTE", 85 exp.DataType.Type.INT: "INTEGER", 86 } 87 88 PROPERTIES_LOCATION = { 89 **Postgres.Generator.PROPERTIES_LOCATION, 90 exp.LikeProperty: exp.Properties.Location.POST_WITH, 91 } 92 93 TRANSFORMS = { 94 **Postgres.Generator.TRANSFORMS, 95 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 96 exp.DateAdd: lambda self, e: self.func( 97 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 98 ), 99 exp.DateDiff: lambda self, e: self.func( 100 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 101 ), 102 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 103 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 104 exp.JSONExtract: _json_sql, 105 exp.JSONExtractScalar: _json_sql, 106 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 107 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 108 } 109 110 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 111 TRANSFORMS.pop(exp.Pivot) 112 113 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 114 TRANSFORMS.pop(exp.Pow) 115 116 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} 117 118 def values_sql(self, expression: exp.Values) -> str: 119 """ 120 Converts `VALUES...` expression into a series of unions. 121 122 Note: If you have a lot of unions then this will result in a large number of recursive statements to 123 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 124 very slow. 125 """ 126 127 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example 128 if not expression.find_ancestor(exp.From, exp.Join): 129 return super().values_sql(expression) 130 131 column_names = expression.alias and expression.args["alias"].columns 132 133 selects = [] 134 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 135 136 for i, row in enumerate(rows): 137 if i == 0 and column_names: 138 row = [ 139 exp.alias_(value, column_name) 140 for value, column_name in zip(row, column_names) 141 ] 142 143 selects.append(exp.Select(expressions=row)) 144 145 subquery_expression: exp.Select | exp.Union = selects[0] 146 if len(selects) > 1: 147 for select in selects[1:]: 148 subquery_expression = exp.union(subquery_expression, select, distinct=False) 149 150 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 151 152 def with_properties(self, properties: exp.Properties) -> str: 153 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 154 return self.properties(properties, prefix=" ", suffix="") 155 156 def datatype_sql(self, expression: exp.DataType) -> str: 157 """ 158 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 159 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 160 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 161 `TEXT` to `VARCHAR`. 162 """ 163 if expression.is_type("text"): 164 expression = expression.copy() 165 expression.set("this", exp.DataType.Type.VARCHAR) 166 precision = expression.args.get("expressions") 167 168 if not precision: 169 expression.append("expressions", exp.Var(this="MAX")) 170 171 return super().datatype_sql(expression)
16class Redshift(Postgres): 17 time_format = "'YYYY-MM-DD HH:MI:SS'" 18 time_mapping = { 19 **Postgres.time_mapping, 20 "MON": "%b", 21 "HH": "%H", 22 } 23 24 class Parser(Postgres.Parser): 25 FUNCTIONS = { 26 **Postgres.Parser.FUNCTIONS, 27 "DATEADD": lambda args: exp.DateAdd( 28 this=seq_get(args, 2), 29 expression=seq_get(args, 1), 30 unit=seq_get(args, 0), 31 ), 32 "DATEDIFF": lambda args: exp.DateDiff( 33 this=seq_get(args, 2), 34 expression=seq_get(args, 1), 35 unit=seq_get(args, 0), 36 ), 37 "NVL": exp.Coalesce.from_arg_list, 38 } 39 40 CONVERT_TYPE_FIRST = True 41 42 def _parse_types( 43 self, check_func: bool = False, schema: bool = False 44 ) -> t.Optional[exp.Expression]: 45 this = super()._parse_types(check_func=check_func, schema=schema) 46 47 if ( 48 isinstance(this, exp.DataType) 49 and this.is_type("varchar") 50 and this.expressions 51 and this.expressions[0].this == exp.column("MAX") 52 ): 53 this.set("expressions", [exp.Var(this="MAX")]) 54 55 return this 56 57 class Tokenizer(Postgres.Tokenizer): 58 BIT_STRINGS = [] 59 HEX_STRINGS = [] 60 STRING_ESCAPES = ["\\"] 61 62 KEYWORDS = { 63 **Postgres.Tokenizer.KEYWORDS, 64 "HLLSKETCH": TokenType.HLLSKETCH, 65 "SUPER": TokenType.SUPER, 66 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 67 "TIME": TokenType.TIMESTAMP, 68 "TIMETZ": TokenType.TIMESTAMPTZ, 69 "TOP": TokenType.TOP, 70 "UNLOAD": TokenType.COMMAND, 71 "VARBYTE": TokenType.VARBINARY, 72 } 73 74 # Redshift allows # to appear as a table identifier prefix 75 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 76 SINGLE_TOKENS.pop("#") 77 78 class Generator(Postgres.Generator): 79 LOCKING_READS_SUPPORTED = False 80 RENAME_TABLE_WITH_DB = False 81 82 TYPE_MAPPING = { 83 **Postgres.Generator.TYPE_MAPPING, 84 exp.DataType.Type.BINARY: "VARBYTE", 85 exp.DataType.Type.VARBINARY: "VARBYTE", 86 exp.DataType.Type.INT: "INTEGER", 87 } 88 89 PROPERTIES_LOCATION = { 90 **Postgres.Generator.PROPERTIES_LOCATION, 91 exp.LikeProperty: exp.Properties.Location.POST_WITH, 92 } 93 94 TRANSFORMS = { 95 **Postgres.Generator.TRANSFORMS, 96 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 97 exp.DateAdd: lambda self, e: self.func( 98 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 99 ), 100 exp.DateDiff: lambda self, e: self.func( 101 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 102 ), 103 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 104 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 105 exp.JSONExtract: _json_sql, 106 exp.JSONExtractScalar: _json_sql, 107 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 108 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 109 } 110 111 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 112 TRANSFORMS.pop(exp.Pivot) 113 114 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 115 TRANSFORMS.pop(exp.Pow) 116 117 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} 118 119 def values_sql(self, expression: exp.Values) -> str: 120 """ 121 Converts `VALUES...` expression into a series of unions. 122 123 Note: If you have a lot of unions then this will result in a large number of recursive statements to 124 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 125 very slow. 126 """ 127 128 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example 129 if not expression.find_ancestor(exp.From, exp.Join): 130 return super().values_sql(expression) 131 132 column_names = expression.alias and expression.args["alias"].columns 133 134 selects = [] 135 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 136 137 for i, row in enumerate(rows): 138 if i == 0 and column_names: 139 row = [ 140 exp.alias_(value, column_name) 141 for value, column_name in zip(row, column_names) 142 ] 143 144 selects.append(exp.Select(expressions=row)) 145 146 subquery_expression: exp.Select | exp.Union = selects[0] 147 if len(selects) > 1: 148 for select in selects[1:]: 149 subquery_expression = exp.union(subquery_expression, select, distinct=False) 150 151 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 152 153 def with_properties(self, properties: exp.Properties) -> str: 154 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 155 return self.properties(properties, prefix=" ", suffix="") 156 157 def datatype_sql(self, expression: exp.DataType) -> str: 158 """ 159 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 160 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 161 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 162 `TEXT` to `VARCHAR`. 163 """ 164 if expression.is_type("text"): 165 expression = expression.copy() 166 expression.set("this", exp.DataType.Type.VARCHAR) 167 precision = expression.args.get("expressions") 168 169 if not precision: 170 expression.append("expressions", exp.Var(this="MAX")) 171 172 return super().datatype_sql(expression)
24 class Parser(Postgres.Parser): 25 FUNCTIONS = { 26 **Postgres.Parser.FUNCTIONS, 27 "DATEADD": lambda args: exp.DateAdd( 28 this=seq_get(args, 2), 29 expression=seq_get(args, 1), 30 unit=seq_get(args, 0), 31 ), 32 "DATEDIFF": lambda args: exp.DateDiff( 33 this=seq_get(args, 2), 34 expression=seq_get(args, 1), 35 unit=seq_get(args, 0), 36 ), 37 "NVL": exp.Coalesce.from_arg_list, 38 } 39 40 CONVERT_TYPE_FIRST = True 41 42 def _parse_types( 43 self, check_func: bool = False, schema: bool = False 44 ) -> t.Optional[exp.Expression]: 45 this = super()._parse_types(check_func=check_func, schema=schema) 46 47 if ( 48 isinstance(this, exp.DataType) 49 and this.is_type("varchar") 50 and this.expressions 51 and this.expressions[0].this == exp.column("MAX") 52 ): 53 this.set("expressions", [exp.Var(this="MAX")]) 54 55 return this
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
57 class Tokenizer(Postgres.Tokenizer): 58 BIT_STRINGS = [] 59 HEX_STRINGS = [] 60 STRING_ESCAPES = ["\\"] 61 62 KEYWORDS = { 63 **Postgres.Tokenizer.KEYWORDS, 64 "HLLSKETCH": TokenType.HLLSKETCH, 65 "SUPER": TokenType.SUPER, 66 "SYSDATE": TokenType.CURRENT_TIMESTAMP, 67 "TIME": TokenType.TIMESTAMP, 68 "TIMETZ": TokenType.TIMESTAMPTZ, 69 "TOP": TokenType.TOP, 70 "UNLOAD": TokenType.COMMAND, 71 "VARBYTE": TokenType.VARBINARY, 72 } 73 74 # Redshift allows # to appear as a table identifier prefix 75 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 76 SINGLE_TOKENS.pop("#")
Inherited Members
78 class Generator(Postgres.Generator): 79 LOCKING_READS_SUPPORTED = False 80 RENAME_TABLE_WITH_DB = False 81 82 TYPE_MAPPING = { 83 **Postgres.Generator.TYPE_MAPPING, 84 exp.DataType.Type.BINARY: "VARBYTE", 85 exp.DataType.Type.VARBINARY: "VARBYTE", 86 exp.DataType.Type.INT: "INTEGER", 87 } 88 89 PROPERTIES_LOCATION = { 90 **Postgres.Generator.PROPERTIES_LOCATION, 91 exp.LikeProperty: exp.Properties.Location.POST_WITH, 92 } 93 94 TRANSFORMS = { 95 **Postgres.Generator.TRANSFORMS, 96 exp.CurrentTimestamp: lambda self, e: "SYSDATE", 97 exp.DateAdd: lambda self, e: self.func( 98 "DATEADD", exp.var(e.text("unit") or "day"), e.expression, e.this 99 ), 100 exp.DateDiff: lambda self, e: self.func( 101 "DATEDIFF", exp.var(e.text("unit") or "day"), e.expression, e.this 102 ), 103 exp.DistKeyProperty: lambda self, e: f"DISTKEY({e.name})", 104 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 105 exp.JSONExtract: _json_sql, 106 exp.JSONExtractScalar: _json_sql, 107 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]), 108 exp.SortKeyProperty: lambda self, e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 109 } 110 111 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 112 TRANSFORMS.pop(exp.Pivot) 113 114 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 115 TRANSFORMS.pop(exp.Pow) 116 117 RESERVED_KEYWORDS = {*Postgres.Generator.RESERVED_KEYWORDS, "snapshot", "type"} 118 119 def values_sql(self, expression: exp.Values) -> str: 120 """ 121 Converts `VALUES...` expression into a series of unions. 122 123 Note: If you have a lot of unions then this will result in a large number of recursive statements to 124 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 125 very slow. 126 """ 127 128 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example 129 if not expression.find_ancestor(exp.From, exp.Join): 130 return super().values_sql(expression) 131 132 column_names = expression.alias and expression.args["alias"].columns 133 134 selects = [] 135 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 136 137 for i, row in enumerate(rows): 138 if i == 0 and column_names: 139 row = [ 140 exp.alias_(value, column_name) 141 for value, column_name in zip(row, column_names) 142 ] 143 144 selects.append(exp.Select(expressions=row)) 145 146 subquery_expression: exp.Select | exp.Union = selects[0] 147 if len(selects) > 1: 148 for select in selects[1:]: 149 subquery_expression = exp.union(subquery_expression, select, distinct=False) 150 151 return self.subquery_sql(subquery_expression.subquery(expression.alias)) 152 153 def with_properties(self, properties: exp.Properties) -> str: 154 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 155 return self.properties(properties, prefix=" ", suffix="") 156 157 def datatype_sql(self, expression: exp.DataType) -> str: 158 """ 159 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 160 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 161 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 162 `TEXT` to `VARCHAR`. 163 """ 164 if expression.is_type("text"): 165 expression = expression.copy() 166 expression.set("this", exp.DataType.Type.VARCHAR) 167 precision = expression.args.get("expressions") 168 169 if not precision: 170 expression.append("expressions", exp.Var(this="MAX")) 171 172 return super().datatype_sql(expression)
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
- bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
- hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
- hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
- byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
- byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
- raw_start (str): specifies which starting character to use to delimit raw literals. Default: None.
- raw_end (str): specifies which ending character to use to delimit raw literals. Default: None.
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- identifiers_can_start_with_digit (bool): if an unquoted identifier can start with digit Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
119 def values_sql(self, expression: exp.Values) -> str: 120 """ 121 Converts `VALUES...` expression into a series of unions. 122 123 Note: If you have a lot of unions then this will result in a large number of recursive statements to 124 evaluate the expression. You may need to increase `sys.setrecursionlimit` to run and it can also be 125 very slow. 126 """ 127 128 # The VALUES clause is still valid in an `INSERT INTO ..` statement, for example 129 if not expression.find_ancestor(exp.From, exp.Join): 130 return super().values_sql(expression) 131 132 column_names = expression.alias and expression.args["alias"].columns 133 134 selects = [] 135 rows = [tuple_exp.expressions for tuple_exp in expression.expressions] 136 137 for i, row in enumerate(rows): 138 if i == 0 and column_names: 139 row = [ 140 exp.alias_(value, column_name) 141 for value, column_name in zip(row, column_names) 142 ] 143 144 selects.append(exp.Select(expressions=row)) 145 146 subquery_expression: exp.Select | exp.Union = selects[0] 147 if len(selects) > 1: 148 for select in selects[1:]: 149 subquery_expression = exp.union(subquery_expression, select, distinct=False) 150 151 return self.subquery_sql(subquery_expression.subquery(expression.alias))
Converts VALUES...
expression into a series of unions.
Note: If you have a lot of unions then this will result in a large number of recursive statements to
evaluate the expression. You may need to increase sys.setrecursionlimit
to run and it can also be
very slow.
153 def with_properties(self, properties: exp.Properties) -> str: 154 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 155 return self.properties(properties, prefix=" ", suffix="")
Redshift doesn't have WITH
as part of their with_properties so we remove it
157 def datatype_sql(self, expression: exp.DataType) -> str: 158 """ 159 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 160 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 161 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 162 `TEXT` to `VARCHAR`. 163 """ 164 if expression.is_type("text"): 165 expression = expression.copy() 166 expression.set("this", exp.DataType.Type.VARCHAR) 167 precision = expression.args.get("expressions") 168 169 if not precision: 170 expression.append("expressions", exp.Var(this="MAX")) 171 172 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql