Merging upstream version 26.3.8.
Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
parent
34733e7b48
commit
c16ed2270a
89 changed files with 59179 additions and 57645 deletions
16
.github/workflows/python-publish.yml
vendored
16
.github/workflows/python-publish.yml
vendored
|
@ -66,9 +66,9 @@ jobs:
|
||||||
manylinux: auto
|
manylinux: auto
|
||||||
working-directory: ./sqlglotrs
|
working-directory: ./sqlglotrs
|
||||||
- name: Upload wheels
|
- name: Upload wheels
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: wheels
|
name: wheels-${{ matrix.os }}-${{ matrix.target }}
|
||||||
path: sqlglotrs/dist
|
path: sqlglotrs/dist
|
||||||
|
|
||||||
sdist-rs:
|
sdist-rs:
|
||||||
|
@ -84,9 +84,9 @@ jobs:
|
||||||
args: --out dist
|
args: --out dist
|
||||||
working-directory: ./sqlglotrs
|
working-directory: ./sqlglotrs
|
||||||
- name: Upload sdist
|
- name: Upload sdist
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: wheels
|
name: wheels-sdist
|
||||||
path: sqlglotrs/dist
|
path: sqlglotrs/dist
|
||||||
|
|
||||||
deploy-rs:
|
deploy-rs:
|
||||||
|
@ -94,16 +94,18 @@ jobs:
|
||||||
if: needs.should-deploy-rs.outputs.deploy == 'true'
|
if: needs.should-deploy-rs.outputs.deploy == 'true'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: wheels
|
pattern: wheels-*
|
||||||
|
path: sqlglotrs/dist
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
uses: PyO3/maturin-action@v1
|
uses: PyO3/maturin-action@v1
|
||||||
env:
|
env:
|
||||||
MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
with:
|
with:
|
||||||
command: upload
|
command: upload
|
||||||
args: --non-interactive --skip-existing *
|
args: --non-interactive --skip-existing dist/wheels-*/*
|
||||||
|
working-directory: ./sqlglotrs
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
122
CHANGELOG.md
122
CHANGELOG.md
|
@ -1,6 +1,119 @@
|
||||||
Changelog
|
Changelog
|
||||||
=========
|
=========
|
||||||
|
|
||||||
|
## [v26.3.7] - 2025-01-24
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`14ad1a0`](https://github.com/tobymao/sqlglot/commit/14ad1a04e86fea5ea88f99948e4cc283692e72a2) - bump sqlglotrs to 0.3.13 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.6] - 2025-01-24
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`085fef6`](https://github.com/tobymao/sqlglot/commit/085fef6971a4ebd43b5c7013c6bbcb0d00dfdc30) - bump sqlglotrs to 0.3.12 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.5] - 2025-01-24
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`acb7217`](https://github.com/tobymao/sqlglot/commit/acb7217d89e12de549663b67af4687a08512993f) - bump sqlglotrs to 0.3.11 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.4] - 2025-01-24
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`bb7548d`](https://github.com/tobymao/sqlglot/commit/bb7548d1e9f371d3ce931fcbd86c65c895f159d1) - bump sqlglotrs to 0.3.10 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.3] - 2025-01-23
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`3a188ef`](https://github.com/tobymao/sqlglot/commit/3a188ef0d42a6313625b25003c27195156e7e753) - fix sqlglotrs deployment job *(PR [#4657](https://github.com/tobymao/sqlglot/pull/4657) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`7e55533`](https://github.com/tobymao/sqlglot/commit/7e55533d9bb06783803f275415640217c89085d0) - bump sqlglotrs to 0.3.9 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.2] - 2025-01-23
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`28f56cb`](https://github.com/tobymao/sqlglot/commit/28f56cb7d9805ce898e7bf6bb884cccb1bd32c52) - fix sqlglotrs deployment job *(PR [#4656](https://github.com/tobymao/sqlglot/pull/4656) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`846b141`](https://github.com/tobymao/sqlglot/commit/846b1414183e3d193b4aacc82f3861378adb9ec9) - bump sqlglotrs to 0.3.8 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.1] - 2025-01-23
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`ff9ea0c`](https://github.com/tobymao/sqlglot/commit/ff9ea0c4554ef0fa46b3460d01374d4a3f9c36ff) - change upload-artifact to v4 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`61c4784`](https://github.com/tobymao/sqlglot/commit/61c4784033940e34e91732e2464e4baba77e6b7c) - bump sqlglotrs to 0.3.7 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.3.0] - 2025-01-23
|
||||||
|
### :boom: BREAKING CHANGES
|
||||||
|
- due to [`8b465d4`](https://github.com/tobymao/sqlglot/commit/8b465d498e0aa9feee53306f631e258443ee3060) - expand single VALUES clause in CTE into a SELECT * *(PR [#4617](https://github.com/tobymao/sqlglot/pull/4617) by [@georgesittas](https://github.com/georgesittas))*:
|
||||||
|
|
||||||
|
expand single VALUES clause in CTE into a SELECT * (#4617)
|
||||||
|
|
||||||
|
- due to [`59d886d`](https://github.com/tobymao/sqlglot/commit/59d886d6abfc00726b785a4d468f6b2e0f9d3b1a) - treat LEVEL column in CONNECT BY queries as an identifier *(PR [#4627](https://github.com/tobymao/sqlglot/pull/4627) by [@georgesittas](https://github.com/georgesittas))*:
|
||||||
|
|
||||||
|
treat LEVEL column in CONNECT BY queries as an identifier (#4627)
|
||||||
|
|
||||||
|
- due to [`9db09ff`](https://github.com/tobymao/sqlglot/commit/9db09ff91931802c675a219951f28afee1d4019d) - support more compact SAFE_DIVIDE transpilation [#4634](https://github.com/tobymao/sqlglot/pull/4634) *(PR [#4641](https://github.com/tobymao/sqlglot/pull/4641) by [@geooo109](https://github.com/geooo109))*:
|
||||||
|
|
||||||
|
support more compact SAFE_DIVIDE transpilation #4634 (#4641)
|
||||||
|
|
||||||
|
- due to [`94af80b`](https://github.com/tobymao/sqlglot/commit/94af80b8bc3c44aa9770d6503f4e07ad4e37e314) - Do not remove parens on bracketed expressions *(PR [#4645](https://github.com/tobymao/sqlglot/pull/4645) by [@VaggelisD](https://github.com/VaggelisD))*:
|
||||||
|
|
||||||
|
Do not remove parens on bracketed expressions (#4645)
|
||||||
|
|
||||||
|
- due to [`35923e9`](https://github.com/tobymao/sqlglot/commit/35923e959ff934093a7b82c58f13c5a89a768f5e) - POSITION and all their variants for all dialects *(PR [#4606](https://github.com/tobymao/sqlglot/pull/4606) by [@pruzko](https://github.com/pruzko))*:
|
||||||
|
|
||||||
|
POSITION and all their variants for all dialects (#4606)
|
||||||
|
|
||||||
|
|
||||||
|
### :sparkles: New Features
|
||||||
|
- [`e47a7c9`](https://github.com/tobymao/sqlglot/commit/e47a7c943b0beef37e30cd7c71ea98c27b82c11b) - Fix Oracle Integer Type Mapping *(PR [#4616](https://github.com/tobymao/sqlglot/pull/4616) by [@pruzko](https://github.com/pruzko))*
|
||||||
|
- [`d8ade83`](https://github.com/tobymao/sqlglot/commit/d8ade830bbca4d2893a7e406868a0bd3a654057e) - **clickhouse**: Dynamic data type *(PR [#4624](https://github.com/tobymao/sqlglot/pull/4624) by [@pkit](https://github.com/pkit))*
|
||||||
|
- [`f7628ad`](https://github.com/tobymao/sqlglot/commit/f7628adf12e03a09ec89fe883d5b710a0f7e0151) - **optimizer**: Fix qualify for SEMI/ANTI joins *(PR [#4622](https://github.com/tobymao/sqlglot/pull/4622) by [@VaggelisD](https://github.com/VaggelisD))*
|
||||||
|
- :arrow_lower_right: *addresses issue [#3557](https://github.com/TobikoData/sqlmesh/issues/3557) opened by [@Bilbottom](https://github.com/Bilbottom)*
|
||||||
|
- [`a20b663`](https://github.com/tobymao/sqlglot/commit/a20b663964a9845d3eb3c43def5880a531dab4a4) - improve rs tokenizer performance *(PR [#4638](https://github.com/tobymao/sqlglot/pull/4638) by [@benfdking](https://github.com/benfdking))*
|
||||||
|
- [`ffa0df7`](https://github.com/tobymao/sqlglot/commit/ffa0df72e36c6a08f1fc707d9c83e98eccc214c1) - **parser**: Support Oracle/Postgres XMLNAMESPACES in XMLTABLE *(PR [#4643](https://github.com/tobymao/sqlglot/pull/4643) by [@rbreejen](https://github.com/rbreejen))*
|
||||||
|
- :arrow_lower_right: *addresses issue [#4642](https://github.com/tobymao/sqlglot/issues/4642) opened by [@rbreejen](https://github.com/rbreejen)*
|
||||||
|
- [`35923e9`](https://github.com/tobymao/sqlglot/commit/35923e959ff934093a7b82c58f13c5a89a768f5e) - POSITION and all their variants for all dialects *(PR [#4606](https://github.com/tobymao/sqlglot/pull/4606) by [@pruzko](https://github.com/pruzko))*
|
||||||
|
|
||||||
|
### :bug: Bug Fixes
|
||||||
|
- [`14474ee`](https://github.com/tobymao/sqlglot/commit/14474ee689025cc67b1f9a07e51d2f414ec5ab49) - **tsql**: support TSQL PRIMARY KEY constraint with DESC, ASC *(PR [#4618](https://github.com/tobymao/sqlglot/pull/4618) by [@geooo109](https://github.com/geooo109))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4610](https://github.com/tobymao/sqlglot/issues/4610) opened by [@cchambers-rdi](https://github.com/cchambers-rdi)*
|
||||||
|
- [`8b465d4`](https://github.com/tobymao/sqlglot/commit/8b465d498e0aa9feee53306f631e258443ee3060) - **parser**: expand single VALUES clause in CTE into a SELECT * *(PR [#4617](https://github.com/tobymao/sqlglot/pull/4617) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#3556](https://github.com/TobikoData/sqlmesh/issues/3556) opened by [@Bilbottom](https://github.com/Bilbottom)*
|
||||||
|
- [`647d986`](https://github.com/tobymao/sqlglot/commit/647d98650a3d6ba6aa7d57560555832548dd89aa) - **snowflake**: get rid of incorrect time mappings *(PR [#4629](https://github.com/tobymao/sqlglot/pull/4629) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`9cbd5ef`](https://github.com/tobymao/sqlglot/commit/9cbd5ef798d1f34d4eebe501cead8295564fc15c) - **trino**: generate ArrayUniqueAgg as ARRAY_AGG(DISTINCT ...) *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`59d886d`](https://github.com/tobymao/sqlglot/commit/59d886d6abfc00726b785a4d468f6b2e0f9d3b1a) - **optimizer**: treat LEVEL column in CONNECT BY queries as an identifier *(PR [#4627](https://github.com/tobymao/sqlglot/pull/4627) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4620](https://github.com/tobymao/sqlglot/issues/4620) opened by [@snovik75](https://github.com/snovik75)*
|
||||||
|
- [`6107661`](https://github.com/tobymao/sqlglot/commit/6107661424622651447da09fb9d7e456ff453bff) - **snowflake**: Allow parsing of TO_TIME *(PR [#4631](https://github.com/tobymao/sqlglot/pull/4631) by [@VaggelisD](https://github.com/VaggelisD))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4625](https://github.com/tobymao/sqlglot/issues/4625) opened by [@aletheavilla](https://github.com/aletheavilla)*
|
||||||
|
- [`9fdfd4d`](https://github.com/tobymao/sqlglot/commit/9fdfd4d6824702f019223536ba4013a966170ff6) - **trino**: support QUOTES option for JSON_QUERY [#4623](https://github.com/tobymao/sqlglot/pull/4623) *(PR [#4628](https://github.com/tobymao/sqlglot/pull/4628) by [@geooo109](https://github.com/geooo109))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4623](https://github.com/tobymao/sqlglot/issues/4623) opened by [@betodealmeida](https://github.com/betodealmeida)*
|
||||||
|
- [`43eb0d9`](https://github.com/tobymao/sqlglot/commit/43eb0d9360f3154039e9eb71ee8818b6590d220a) - **tsql**: create schema ast access fixup fixes [#4632](https://github.com/tobymao/sqlglot/pull/4632) *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`59f6525`](https://github.com/tobymao/sqlglot/commit/59f652572037940f136508ee60b8e0a137ce18f0) - **duckdb**: Transpile exp.RegexpILike *(PR [#4640](https://github.com/tobymao/sqlglot/pull/4640) by [@VaggelisD](https://github.com/VaggelisD))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4639](https://github.com/tobymao/sqlglot/issues/4639) opened by [@dor-bernstein](https://github.com/dor-bernstein)*
|
||||||
|
- [`9db09ff`](https://github.com/tobymao/sqlglot/commit/9db09ff91931802c675a219951f28afee1d4019d) - **bigquery**: support more compact SAFE_DIVIDE transpilation [#4634](https://github.com/tobymao/sqlglot/pull/4634) *(PR [#4641](https://github.com/tobymao/sqlglot/pull/4641) by [@geooo109](https://github.com/geooo109))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4634](https://github.com/tobymao/sqlglot/issues/4634) opened by [@bbernst](https://github.com/bbernst)*
|
||||||
|
- [`94af80b`](https://github.com/tobymao/sqlglot/commit/94af80b8bc3c44aa9770d6503f4e07ad4e37e314) - **optimizer**: Do not remove parens on bracketed expressions *(PR [#4645](https://github.com/tobymao/sqlglot/pull/4645) by [@VaggelisD](https://github.com/VaggelisD))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#3672](https://github.com/TobikoData/sqlmesh/issues/3672) opened by [@simon-pactum](https://github.com/simon-pactum)*
|
||||||
|
- [`761e835`](https://github.com/tobymao/sqlglot/commit/761e835e39fa819ef478b8086bfd814dbecc7927) - qualify using *(PR [#4646](https://github.com/tobymao/sqlglot/pull/4646) by [@tobymao](https://github.com/tobymao))*
|
||||||
|
- [`8b0b8ac`](https://github.com/tobymao/sqlglot/commit/8b0b8ac4ccbaf54d5fa948d9900ca53ccca9115b) - **sqlite**: allow 2-arg version of UNHEX closes [#4648](https://github.com/tobymao/sqlglot/pull/4648) *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`2f12bd9`](https://github.com/tobymao/sqlglot/commit/2f12bd94d8583ddf9af808dda4df1690179ee592) - **athena**: Generate PartitionedByProperty correctly on CTAS for an Iceberg table *(PR [#4654](https://github.com/tobymao/sqlglot/pull/4654) by [@erindru](https://github.com/erindru))*
|
||||||
|
- [`1ea0dc2`](https://github.com/tobymao/sqlglot/commit/1ea0dc296ca2e47d466ddce162ad64945c532586) - **postgres**: Support WITHIN GROUP ( order_by_clause ) FILTER for Postgres *(PR [#4652](https://github.com/tobymao/sqlglot/pull/4652) by [@gl3nnleblanc](https://github.com/gl3nnleblanc))*
|
||||||
|
- :arrow_lower_right: *fixes issue [#4651](https://github.com/tobymao/sqlglot/issues/4651) opened by [@gl3nnleblanc](https://github.com/gl3nnleblanc)*
|
||||||
|
|
||||||
|
### :recycle: Refactors
|
||||||
|
- [`284a936`](https://github.com/tobymao/sqlglot/commit/284a9360c5d43301da34d8d5199f101423ade289) - simplify WITHIN GROUP ... FILTER support *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`73512f9`](https://github.com/tobymao/sqlglot/commit/73512f9dde03b632b5f9eff0331713f9b44996d7) - set default properly for use_rs_tokenizer *(PR [#4619](https://github.com/tobymao/sqlglot/pull/4619) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
- [`9ba1db3`](https://github.com/tobymao/sqlglot/commit/9ba1db3436d2afba5821b853cb3c573aada370e7) - add bench command *(PR [#4621](https://github.com/tobymao/sqlglot/pull/4621) by [@benfdking](https://github.com/benfdking))*
|
||||||
|
- [`0aa1516`](https://github.com/tobymao/sqlglot/commit/0aa1516cd8bf5f7d77e6d743f30f1526ccf15633) - move to string new *(PR [#4637](https://github.com/tobymao/sqlglot/pull/4637) by [@benfdking](https://github.com/benfdking))*
|
||||||
|
- [`2355a91`](https://github.com/tobymao/sqlglot/commit/2355a914752f3add75457849ae8f8ec00754f888) - clean up unnecessary mut *(PR [#4636](https://github.com/tobymao/sqlglot/pull/4636) by [@benfdking](https://github.com/benfdking))*
|
||||||
|
- [`0b68af5`](https://github.com/tobymao/sqlglot/commit/0b68af545bc82317ee16903d525e7b47f273d92d) - bump sqlglotrs to 0.3.6 *(commit by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
|
## [v26.2.1] - 2025-01-15
|
||||||
|
### :wrench: Chores
|
||||||
|
- [`b447322`](https://github.com/tobymao/sqlglot/commit/b4473220f0f50a9ce2463b3a98a77bf2fdd897af) - parser accepts ctes without as keyword again, except for clickhouse *(PR [#4612](https://github.com/tobymao/sqlglot/pull/4612) by [@georgesittas](https://github.com/georgesittas))*
|
||||||
|
|
||||||
|
|
||||||
## [v26.2.0] - 2025-01-14
|
## [v26.2.0] - 2025-01-14
|
||||||
### :boom: BREAKING CHANGES
|
### :boom: BREAKING CHANGES
|
||||||
- due to [`f3fcc10`](https://github.com/tobymao/sqlglot/commit/f3fcc1013dfcfdaa388ba3426ed82c4fe0eefab1) - allow limit, offset to be used as both modifiers and aliases *(PR [#4589](https://github.com/tobymao/sqlglot/pull/4589) by [@georgesittas](https://github.com/georgesittas))*:
|
- due to [`f3fcc10`](https://github.com/tobymao/sqlglot/commit/f3fcc1013dfcfdaa388ba3426ed82c4fe0eefab1) - allow limit, offset to be used as both modifiers and aliases *(PR [#4589](https://github.com/tobymao/sqlglot/pull/4589) by [@georgesittas](https://github.com/georgesittas))*:
|
||||||
|
@ -5643,3 +5756,12 @@ Changelog
|
||||||
[v26.1.2]: https://github.com/tobymao/sqlglot/compare/v26.1.1...v26.1.2
|
[v26.1.2]: https://github.com/tobymao/sqlglot/compare/v26.1.1...v26.1.2
|
||||||
[v26.1.3]: https://github.com/tobymao/sqlglot/compare/v26.1.2...v26.1.3
|
[v26.1.3]: https://github.com/tobymao/sqlglot/compare/v26.1.2...v26.1.3
|
||||||
[v26.2.0]: https://github.com/tobymao/sqlglot/compare/v26.1.3...v26.2.0
|
[v26.2.0]: https://github.com/tobymao/sqlglot/compare/v26.1.3...v26.2.0
|
||||||
|
[v26.2.1]: https://github.com/tobymao/sqlglot/compare/v26.2.0...v26.2.1
|
||||||
|
[v26.3.0]: https://github.com/tobymao/sqlglot/compare/v26.2.1...v26.3.0
|
||||||
|
[v26.3.1]: https://github.com/tobymao/sqlglot/compare/v26.3.0...v26.3.1
|
||||||
|
[v26.3.2]: https://github.com/tobymao/sqlglot/compare/v26.3.1...v26.3.2
|
||||||
|
[v26.3.3]: https://github.com/tobymao/sqlglot/compare/v26.3.2...v26.3.3
|
||||||
|
[v26.3.4]: https://github.com/tobymao/sqlglot/compare/v26.3.3...v26.3.4
|
||||||
|
[v26.3.5]: https://github.com/tobymao/sqlglot/compare/v26.3.4...v26.3.5
|
||||||
|
[v26.3.6]: https://github.com/tobymao/sqlglot/compare/v26.3.5...v26.3.6
|
||||||
|
[v26.3.7]: https://github.com/tobymao/sqlglot/compare/v26.3.6...v26.3.7
|
||||||
|
|
3
Makefile
3
Makefile
|
@ -3,6 +3,9 @@
|
||||||
install:
|
install:
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
|
bench: install-dev-rs-release
|
||||||
|
python benchmarks/bench.py
|
||||||
|
|
||||||
install-dev-rs-release:
|
install-dev-rs-release:
|
||||||
cd sqlglotrs/ && python -m maturin develop -r
|
cd sqlglotrs/ && python -m maturin develop -r
|
||||||
|
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -76,8 +76,8 @@
|
||||||
</span><span id="L-12"><a href="#L-12"><span class="linenos">12</span></a><span class="n">__version_tuple__</span><span class="p">:</span> <span class="n">VERSION_TUPLE</span>
|
</span><span id="L-12"><a href="#L-12"><span class="linenos">12</span></a><span class="n">__version_tuple__</span><span class="p">:</span> <span class="n">VERSION_TUPLE</span>
|
||||||
</span><span id="L-13"><a href="#L-13"><span class="linenos">13</span></a><span class="n">version_tuple</span><span class="p">:</span> <span class="n">VERSION_TUPLE</span>
|
</span><span id="L-13"><a href="#L-13"><span class="linenos">13</span></a><span class="n">version_tuple</span><span class="p">:</span> <span class="n">VERSION_TUPLE</span>
|
||||||
</span><span id="L-14"><a href="#L-14"><span class="linenos">14</span></a>
|
</span><span id="L-14"><a href="#L-14"><span class="linenos">14</span></a>
|
||||||
</span><span id="L-15"><a href="#L-15"><span class="linenos">15</span></a><span class="n">__version__</span> <span class="o">=</span> <span class="n">version</span> <span class="o">=</span> <span class="s1">'26.2.0'</span>
|
</span><span id="L-15"><a href="#L-15"><span class="linenos">15</span></a><span class="n">__version__</span> <span class="o">=</span> <span class="n">version</span> <span class="o">=</span> <span class="s1">'26.3.7'</span>
|
||||||
</span><span id="L-16"><a href="#L-16"><span class="linenos">16</span></a><span class="n">__version_tuple__</span> <span class="o">=</span> <span class="n">version_tuple</span> <span class="o">=</span> <span class="p">(</span><span class="mi">26</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">0</span><span class="p">)</span>
|
</span><span id="L-16"><a href="#L-16"><span class="linenos">16</span></a><span class="n">__version_tuple__</span> <span class="o">=</span> <span class="n">version_tuple</span> <span class="o">=</span> <span class="p">(</span><span class="mi">26</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">7</span><span class="p">)</span>
|
||||||
</span></pre></div>
|
</span></pre></div>
|
||||||
|
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@
|
||||||
<section id="version">
|
<section id="version">
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">version</span><span class="annotation">: str</span> =
|
<span class="name">version</span><span class="annotation">: str</span> =
|
||||||
<span class="default_value">'26.2.0'</span>
|
<span class="default_value">'26.3.7'</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -109,7 +109,7 @@
|
||||||
<section id="version_tuple">
|
<section id="version_tuple">
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">version_tuple</span><span class="annotation">: object</span> =
|
<span class="name">version_tuple</span><span class="annotation">: object</span> =
|
||||||
<span class="default_value">(26, 2, 0)</span>
|
<span class="default_value">(26, 3, 7)</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -1047,6 +1047,7 @@ Default: True</li>
|
||||||
<dd id="Druid.Generator.intdiv_sql" class="function"><a href="../generator.html#Generator.intdiv_sql">intdiv_sql</a></dd>
|
<dd id="Druid.Generator.intdiv_sql" class="function"><a href="../generator.html#Generator.intdiv_sql">intdiv_sql</a></dd>
|
||||||
<dd id="Druid.Generator.dpipe_sql" class="function"><a href="../generator.html#Generator.dpipe_sql">dpipe_sql</a></dd>
|
<dd id="Druid.Generator.dpipe_sql" class="function"><a href="../generator.html#Generator.dpipe_sql">dpipe_sql</a></dd>
|
||||||
<dd id="Druid.Generator.div_sql" class="function"><a href="../generator.html#Generator.div_sql">div_sql</a></dd>
|
<dd id="Druid.Generator.div_sql" class="function"><a href="../generator.html#Generator.div_sql">div_sql</a></dd>
|
||||||
|
<dd id="Druid.Generator.safedivide_sql" class="function"><a href="../generator.html#Generator.safedivide_sql">safedivide_sql</a></dd>
|
||||||
<dd id="Druid.Generator.overlaps_sql" class="function"><a href="../generator.html#Generator.overlaps_sql">overlaps_sql</a></dd>
|
<dd id="Druid.Generator.overlaps_sql" class="function"><a href="../generator.html#Generator.overlaps_sql">overlaps_sql</a></dd>
|
||||||
<dd id="Druid.Generator.distance_sql" class="function"><a href="../generator.html#Generator.distance_sql">distance_sql</a></dd>
|
<dd id="Druid.Generator.distance_sql" class="function"><a href="../generator.html#Generator.distance_sql">distance_sql</a></dd>
|
||||||
<dd id="Druid.Generator.dot_sql" class="function"><a href="../generator.html#Generator.dot_sql">dot_sql</a></dd>
|
<dd id="Druid.Generator.dot_sql" class="function"><a href="../generator.html#Generator.dot_sql">dot_sql</a></dd>
|
||||||
|
@ -1150,6 +1151,7 @@ Default: True</li>
|
||||||
<dd id="Druid.Generator.conditionalinsert_sql" class="function"><a href="../generator.html#Generator.conditionalinsert_sql">conditionalinsert_sql</a></dd>
|
<dd id="Druid.Generator.conditionalinsert_sql" class="function"><a href="../generator.html#Generator.conditionalinsert_sql">conditionalinsert_sql</a></dd>
|
||||||
<dd id="Druid.Generator.multitableinserts_sql" class="function"><a href="../generator.html#Generator.multitableinserts_sql">multitableinserts_sql</a></dd>
|
<dd id="Druid.Generator.multitableinserts_sql" class="function"><a href="../generator.html#Generator.multitableinserts_sql">multitableinserts_sql</a></dd>
|
||||||
<dd id="Druid.Generator.oncondition_sql" class="function"><a href="../generator.html#Generator.oncondition_sql">oncondition_sql</a></dd>
|
<dd id="Druid.Generator.oncondition_sql" class="function"><a href="../generator.html#Generator.oncondition_sql">oncondition_sql</a></dd>
|
||||||
|
<dd id="Druid.Generator.jsonextractquote_sql" class="function"><a href="../generator.html#Generator.jsonextractquote_sql">jsonextractquote_sql</a></dd>
|
||||||
<dd id="Druid.Generator.jsonexists_sql" class="function"><a href="../generator.html#Generator.jsonexists_sql">jsonexists_sql</a></dd>
|
<dd id="Druid.Generator.jsonexists_sql" class="function"><a href="../generator.html#Generator.jsonexists_sql">jsonexists_sql</a></dd>
|
||||||
<dd id="Druid.Generator.arrayagg_sql" class="function"><a href="../generator.html#Generator.arrayagg_sql">arrayagg_sql</a></dd>
|
<dd id="Druid.Generator.arrayagg_sql" class="function"><a href="../generator.html#Generator.arrayagg_sql">arrayagg_sql</a></dd>
|
||||||
<dd id="Druid.Generator.apply_sql" class="function"><a href="../generator.html#Generator.apply_sql">apply_sql</a></dd>
|
<dd id="Druid.Generator.apply_sql" class="function"><a href="../generator.html#Generator.apply_sql">apply_sql</a></dd>
|
||||||
|
@ -1183,6 +1185,7 @@ Default: True</li>
|
||||||
<dd id="Druid.Generator.analyzevalidate_sql" class="function"><a href="../generator.html#Generator.analyzevalidate_sql">analyzevalidate_sql</a></dd>
|
<dd id="Druid.Generator.analyzevalidate_sql" class="function"><a href="../generator.html#Generator.analyzevalidate_sql">analyzevalidate_sql</a></dd>
|
||||||
<dd id="Druid.Generator.analyze_sql" class="function"><a href="../generator.html#Generator.analyze_sql">analyze_sql</a></dd>
|
<dd id="Druid.Generator.analyze_sql" class="function"><a href="../generator.html#Generator.analyze_sql">analyze_sql</a></dd>
|
||||||
<dd id="Druid.Generator.xmltable_sql" class="function"><a href="../generator.html#Generator.xmltable_sql">xmltable_sql</a></dd>
|
<dd id="Druid.Generator.xmltable_sql" class="function"><a href="../generator.html#Generator.xmltable_sql">xmltable_sql</a></dd>
|
||||||
|
<dd id="Druid.Generator.xmlnamespace_sql" class="function"><a href="../generator.html#Generator.xmlnamespace_sql">xmlnamespace_sql</a></dd>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
</dl>
|
</dl>
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -2680,6 +2680,7 @@ Default: True</li>
|
||||||
<dd id="Python.Generator.intdiv_sql" class="function"><a href="../generator.html#Generator.intdiv_sql">intdiv_sql</a></dd>
|
<dd id="Python.Generator.intdiv_sql" class="function"><a href="../generator.html#Generator.intdiv_sql">intdiv_sql</a></dd>
|
||||||
<dd id="Python.Generator.dpipe_sql" class="function"><a href="../generator.html#Generator.dpipe_sql">dpipe_sql</a></dd>
|
<dd id="Python.Generator.dpipe_sql" class="function"><a href="../generator.html#Generator.dpipe_sql">dpipe_sql</a></dd>
|
||||||
<dd id="Python.Generator.div_sql" class="function"><a href="../generator.html#Generator.div_sql">div_sql</a></dd>
|
<dd id="Python.Generator.div_sql" class="function"><a href="../generator.html#Generator.div_sql">div_sql</a></dd>
|
||||||
|
<dd id="Python.Generator.safedivide_sql" class="function"><a href="../generator.html#Generator.safedivide_sql">safedivide_sql</a></dd>
|
||||||
<dd id="Python.Generator.overlaps_sql" class="function"><a href="../generator.html#Generator.overlaps_sql">overlaps_sql</a></dd>
|
<dd id="Python.Generator.overlaps_sql" class="function"><a href="../generator.html#Generator.overlaps_sql">overlaps_sql</a></dd>
|
||||||
<dd id="Python.Generator.distance_sql" class="function"><a href="../generator.html#Generator.distance_sql">distance_sql</a></dd>
|
<dd id="Python.Generator.distance_sql" class="function"><a href="../generator.html#Generator.distance_sql">distance_sql</a></dd>
|
||||||
<dd id="Python.Generator.dot_sql" class="function"><a href="../generator.html#Generator.dot_sql">dot_sql</a></dd>
|
<dd id="Python.Generator.dot_sql" class="function"><a href="../generator.html#Generator.dot_sql">dot_sql</a></dd>
|
||||||
|
@ -2783,6 +2784,7 @@ Default: True</li>
|
||||||
<dd id="Python.Generator.conditionalinsert_sql" class="function"><a href="../generator.html#Generator.conditionalinsert_sql">conditionalinsert_sql</a></dd>
|
<dd id="Python.Generator.conditionalinsert_sql" class="function"><a href="../generator.html#Generator.conditionalinsert_sql">conditionalinsert_sql</a></dd>
|
||||||
<dd id="Python.Generator.multitableinserts_sql" class="function"><a href="../generator.html#Generator.multitableinserts_sql">multitableinserts_sql</a></dd>
|
<dd id="Python.Generator.multitableinserts_sql" class="function"><a href="../generator.html#Generator.multitableinserts_sql">multitableinserts_sql</a></dd>
|
||||||
<dd id="Python.Generator.oncondition_sql" class="function"><a href="../generator.html#Generator.oncondition_sql">oncondition_sql</a></dd>
|
<dd id="Python.Generator.oncondition_sql" class="function"><a href="../generator.html#Generator.oncondition_sql">oncondition_sql</a></dd>
|
||||||
|
<dd id="Python.Generator.jsonextractquote_sql" class="function"><a href="../generator.html#Generator.jsonextractquote_sql">jsonextractquote_sql</a></dd>
|
||||||
<dd id="Python.Generator.jsonexists_sql" class="function"><a href="../generator.html#Generator.jsonexists_sql">jsonexists_sql</a></dd>
|
<dd id="Python.Generator.jsonexists_sql" class="function"><a href="../generator.html#Generator.jsonexists_sql">jsonexists_sql</a></dd>
|
||||||
<dd id="Python.Generator.arrayagg_sql" class="function"><a href="../generator.html#Generator.arrayagg_sql">arrayagg_sql</a></dd>
|
<dd id="Python.Generator.arrayagg_sql" class="function"><a href="../generator.html#Generator.arrayagg_sql">arrayagg_sql</a></dd>
|
||||||
<dd id="Python.Generator.apply_sql" class="function"><a href="../generator.html#Generator.apply_sql">apply_sql</a></dd>
|
<dd id="Python.Generator.apply_sql" class="function"><a href="../generator.html#Generator.apply_sql">apply_sql</a></dd>
|
||||||
|
@ -2816,6 +2818,7 @@ Default: True</li>
|
||||||
<dd id="Python.Generator.analyzevalidate_sql" class="function"><a href="../generator.html#Generator.analyzevalidate_sql">analyzevalidate_sql</a></dd>
|
<dd id="Python.Generator.analyzevalidate_sql" class="function"><a href="../generator.html#Generator.analyzevalidate_sql">analyzevalidate_sql</a></dd>
|
||||||
<dd id="Python.Generator.analyze_sql" class="function"><a href="../generator.html#Generator.analyze_sql">analyze_sql</a></dd>
|
<dd id="Python.Generator.analyze_sql" class="function"><a href="../generator.html#Generator.analyze_sql">analyze_sql</a></dd>
|
||||||
<dd id="Python.Generator.xmltable_sql" class="function"><a href="../generator.html#Generator.xmltable_sql">xmltable_sql</a></dd>
|
<dd id="Python.Generator.xmltable_sql" class="function"><a href="../generator.html#Generator.xmltable_sql">xmltable_sql</a></dd>
|
||||||
|
<dd id="Python.Generator.xmlnamespace_sql" class="function"><a href="../generator.html#Generator.xmlnamespace_sql">xmlnamespace_sql</a></dd>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
</dl>
|
</dl>
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1920,7 +1920,7 @@ belong to some totally-ordered set.</p>
|
||||||
<section id="DATE_UNITS">
|
<section id="DATE_UNITS">
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">DATE_UNITS</span> =
|
<span class="name">DATE_UNITS</span> =
|
||||||
<span class="default_value">{'day', 'week', 'quarter', 'month', 'year', 'year_month'}</span>
|
<span class="default_value">{'year', 'day', 'month', 'year_month', 'quarter', 'week'}</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
|
@ -641,7 +641,7 @@
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">ALL_JSON_PATH_PARTS</span> =
|
<span class="name">ALL_JSON_PATH_PARTS</span> =
|
||||||
<input id="ALL_JSON_PATH_PARTS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
<input id="ALL_JSON_PATH_PARTS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
||||||
<label class="view-value-button pdoc-button" for="ALL_JSON_PATH_PARTS-view-value"></label><span class="default_value">{<class '<a href="expressions.html#JSONPathSlice">sqlglot.expressions.JSONPathSlice</a>'>, <class '<a href="expressions.html#JSONPathScript">sqlglot.expressions.JSONPathScript</a>'>, <class '<a href="expressions.html#JSONPathRoot">sqlglot.expressions.JSONPathRoot</a>'>, <class '<a href="expressions.html#JSONPathRecursive">sqlglot.expressions.JSONPathRecursive</a>'>, <class '<a href="expressions.html#JSONPathKey">sqlglot.expressions.JSONPathKey</a>'>, <class '<a href="expressions.html#JSONPathWildcard">sqlglot.expressions.JSONPathWildcard</a>'>, <class '<a href="expressions.html#JSONPathFilter">sqlglot.expressions.JSONPathFilter</a>'>, <class '<a href="expressions.html#JSONPathUnion">sqlglot.expressions.JSONPathUnion</a>'>, <class '<a href="expressions.html#JSONPathSubscript">sqlglot.expressions.JSONPathSubscript</a>'>, <class '<a href="expressions.html#JSONPathSelector">sqlglot.expressions.JSONPathSelector</a>'>}</span>
|
<label class="view-value-button pdoc-button" for="ALL_JSON_PATH_PARTS-view-value"></label><span class="default_value">{<class '<a href="expressions.html#JSONPathKey">sqlglot.expressions.JSONPathKey</a>'>, <class '<a href="expressions.html#JSONPathWildcard">sqlglot.expressions.JSONPathWildcard</a>'>, <class '<a href="expressions.html#JSONPathFilter">sqlglot.expressions.JSONPathFilter</a>'>, <class '<a href="expressions.html#JSONPathUnion">sqlglot.expressions.JSONPathUnion</a>'>, <class '<a href="expressions.html#JSONPathSubscript">sqlglot.expressions.JSONPathSubscript</a>'>, <class '<a href="expressions.html#JSONPathSelector">sqlglot.expressions.JSONPathSelector</a>'>, <class '<a href="expressions.html#JSONPathSlice">sqlglot.expressions.JSONPathSlice</a>'>, <class '<a href="expressions.html#JSONPathScript">sqlglot.expressions.JSONPathScript</a>'>, <class '<a href="expressions.html#JSONPathRoot">sqlglot.expressions.JSONPathRoot</a>'>, <class '<a href="expressions.html#JSONPathRecursive">sqlglot.expressions.JSONPathRecursive</a>'>}</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -581,7 +581,7 @@ queries if it would result in multiple table selects in a single query:</p>
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">UNMERGABLE_ARGS</span> =
|
<span class="name">UNMERGABLE_ARGS</span> =
|
||||||
<input id="UNMERGABLE_ARGS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
<input id="UNMERGABLE_ARGS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
||||||
<label class="view-value-button pdoc-button" for="UNMERGABLE_ARGS-view-value"></label><span class="default_value">{'operation_modifiers', 'cluster', 'kind', 'limit', 'sort', 'laterals', 'options', 'distinct', 'format', 'with', 'settings', 'connect', 'match', 'qualify', 'prewhere', 'group', 'locks', 'offset', 'sample', 'into', 'distribute', 'windows', 'pivots', 'having'}</span>
|
<label class="view-value-button pdoc-button" for="UNMERGABLE_ARGS-view-value"></label><span class="default_value">{'distinct', 'having', 'cluster', 'kind', 'qualify', 'windows', 'sort', 'pivots', 'offset', 'laterals', 'settings', 'prewhere', 'connect', 'format', 'options', 'with', 'sample', 'into', 'distribute', 'group', 'match', 'operation_modifiers', 'locks', 'limit'}</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1125,7 +1125,7 @@
|
||||||
</span><span id="L-749"><a href="#L-749"><span class="linenos"> 749</span></a>
|
</span><span id="L-749"><a href="#L-749"><span class="linenos"> 749</span></a>
|
||||||
</span><span id="L-750"><a href="#L-750"><span class="linenos"> 750</span></a> <span class="k">if</span> <span class="p">(</span>
|
</span><span id="L-750"><a href="#L-750"><span class="linenos"> 750</span></a> <span class="k">if</span> <span class="p">(</span>
|
||||||
</span><span id="L-751"><a href="#L-751"><span class="linenos"> 751</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">this</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Select</span><span class="p">)</span>
|
</span><span id="L-751"><a href="#L-751"><span class="linenos"> 751</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">this</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Select</span><span class="p">)</span>
|
||||||
</span><span id="L-752"><a href="#L-752"><span class="linenos"> 752</span></a> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">SubqueryPredicate</span><span class="p">)</span>
|
</span><span id="L-752"><a href="#L-752"><span class="linenos"> 752</span></a> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">SubqueryPredicate</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Bracket</span><span class="p">))</span>
|
||||||
</span><span id="L-753"><a href="#L-753"><span class="linenos"> 753</span></a> <span class="ow">and</span> <span class="p">(</span>
|
</span><span id="L-753"><a href="#L-753"><span class="linenos"> 753</span></a> <span class="ow">and</span> <span class="p">(</span>
|
||||||
</span><span id="L-754"><a href="#L-754"><span class="linenos"> 754</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">Condition</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Binary</span><span class="p">))</span>
|
</span><span id="L-754"><a href="#L-754"><span class="linenos"> 754</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">Condition</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Binary</span><span class="p">))</span>
|
||||||
</span><span id="L-755"><a href="#L-755"><span class="linenos"> 755</span></a> <span class="ow">or</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Paren</span><span class="p">)</span>
|
</span><span id="L-755"><a href="#L-755"><span class="linenos"> 755</span></a> <span class="ow">or</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Paren</span><span class="p">)</span>
|
||||||
|
@ -2951,7 +2951,7 @@ a b
|
||||||
</span><span id="simplify_parens-750"><a href="#simplify_parens-750"><span class="linenos">750</span></a>
|
</span><span id="simplify_parens-750"><a href="#simplify_parens-750"><span class="linenos">750</span></a>
|
||||||
</span><span id="simplify_parens-751"><a href="#simplify_parens-751"><span class="linenos">751</span></a> <span class="k">if</span> <span class="p">(</span>
|
</span><span id="simplify_parens-751"><a href="#simplify_parens-751"><span class="linenos">751</span></a> <span class="k">if</span> <span class="p">(</span>
|
||||||
</span><span id="simplify_parens-752"><a href="#simplify_parens-752"><span class="linenos">752</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">this</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Select</span><span class="p">)</span>
|
</span><span id="simplify_parens-752"><a href="#simplify_parens-752"><span class="linenos">752</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">this</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Select</span><span class="p">)</span>
|
||||||
</span><span id="simplify_parens-753"><a href="#simplify_parens-753"><span class="linenos">753</span></a> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">SubqueryPredicate</span><span class="p">)</span>
|
</span><span id="simplify_parens-753"><a href="#simplify_parens-753"><span class="linenos">753</span></a> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">SubqueryPredicate</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Bracket</span><span class="p">))</span>
|
||||||
</span><span id="simplify_parens-754"><a href="#simplify_parens-754"><span class="linenos">754</span></a> <span class="ow">and</span> <span class="p">(</span>
|
</span><span id="simplify_parens-754"><a href="#simplify_parens-754"><span class="linenos">754</span></a> <span class="ow">and</span> <span class="p">(</span>
|
||||||
</span><span id="simplify_parens-755"><a href="#simplify_parens-755"><span class="linenos">755</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">Condition</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Binary</span><span class="p">))</span>
|
</span><span id="simplify_parens-755"><a href="#simplify_parens-755"><span class="linenos">755</span></a> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="p">(</span><span class="n">exp</span><span class="o">.</span><span class="n">Condition</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Binary</span><span class="p">))</span>
|
||||||
</span><span id="simplify_parens-756"><a href="#simplify_parens-756"><span class="linenos">756</span></a> <span class="ow">or</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Paren</span><span class="p">)</span>
|
</span><span id="simplify_parens-756"><a href="#simplify_parens-756"><span class="linenos">756</span></a> <span class="ow">or</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">parent</span><span class="p">,</span> <span class="n">exp</span><span class="o">.</span><span class="n">Paren</span><span class="p">)</span>
|
||||||
|
@ -3238,7 +3238,7 @@ prefix are statically known.</p>
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">DATETRUNC_COMPARISONS</span> =
|
<span class="name">DATETRUNC_COMPARISONS</span> =
|
||||||
<input id="DATETRUNC_COMPARISONS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
<input id="DATETRUNC_COMPARISONS-view-value" class="view-value-toggle-state" type="checkbox" aria-hidden="true" tabindex="-1">
|
||||||
<label class="view-value-button pdoc-button" for="DATETRUNC_COMPARISONS-view-value"></label><span class="default_value">{<class '<a href="../expressions.html#LTE">sqlglot.expressions.LTE</a>'>, <class '<a href="../expressions.html#GT">sqlglot.expressions.GT</a>'>, <class '<a href="../expressions.html#LT">sqlglot.expressions.LT</a>'>, <class '<a href="../expressions.html#NEQ">sqlglot.expressions.NEQ</a>'>, <class '<a href="../expressions.html#EQ">sqlglot.expressions.EQ</a>'>, <class '<a href="../expressions.html#In">sqlglot.expressions.In</a>'>, <class '<a href="../expressions.html#GTE">sqlglot.expressions.GTE</a>'>}</span>
|
<label class="view-value-button pdoc-button" for="DATETRUNC_COMPARISONS-view-value"></label><span class="default_value">{<class '<a href="../expressions.html#GTE">sqlglot.expressions.GTE</a>'>, <class '<a href="../expressions.html#EQ">sqlglot.expressions.EQ</a>'>, <class '<a href="../expressions.html#LTE">sqlglot.expressions.LTE</a>'>, <class '<a href="../expressions.html#LT">sqlglot.expressions.LT</a>'>, <class '<a href="../expressions.html#GT">sqlglot.expressions.GT</a>'>, <class '<a href="../expressions.html#In">sqlglot.expressions.In</a>'>, <class '<a href="../expressions.html#NEQ">sqlglot.expressions.NEQ</a>'>}</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -3322,7 +3322,7 @@ prefix are statically known.</p>
|
||||||
<section id="JOINS">
|
<section id="JOINS">
|
||||||
<div class="attr variable">
|
<div class="attr variable">
|
||||||
<span class="name">JOINS</span> =
|
<span class="name">JOINS</span> =
|
||||||
<span class="default_value">{('RIGHT', ''), ('', 'INNER'), ('RIGHT', 'OUTER'), ('', '')}</span>
|
<span class="default_value">{('RIGHT', 'OUTER'), ('', 'INNER'), ('', ''), ('RIGHT', '')}</span>
|
||||||
|
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
|
30441
docs/sqlglot/parser.html
30441
docs/sqlglot/parser.html
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
|
@ -32,6 +32,18 @@ def _generate_as_hive(expression: exp.Expression) -> bool:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _is_iceberg_table(properties: exp.Properties) -> bool:
|
||||||
|
table_type_property = next(
|
||||||
|
(
|
||||||
|
p
|
||||||
|
for p in properties.expressions
|
||||||
|
if isinstance(p, exp.Property) and p.name == "table_type"
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
return bool(table_type_property and table_type_property.text("value").lower() == "iceberg")
|
||||||
|
|
||||||
|
|
||||||
def _location_property_sql(self: Athena.Generator, e: exp.LocationProperty):
|
def _location_property_sql(self: Athena.Generator, e: exp.LocationProperty):
|
||||||
# If table_type='iceberg', the LocationProperty is called 'location'
|
# If table_type='iceberg', the LocationProperty is called 'location'
|
||||||
# Otherwise, it's called 'external_location'
|
# Otherwise, it's called 'external_location'
|
||||||
|
@ -40,20 +52,25 @@ def _location_property_sql(self: Athena.Generator, e: exp.LocationProperty):
|
||||||
prop_name = "external_location"
|
prop_name = "external_location"
|
||||||
|
|
||||||
if isinstance(e.parent, exp.Properties):
|
if isinstance(e.parent, exp.Properties):
|
||||||
table_type_property = next(
|
if _is_iceberg_table(e.parent):
|
||||||
(
|
|
||||||
p
|
|
||||||
for p in e.parent.expressions
|
|
||||||
if isinstance(p, exp.Property) and p.name == "table_type"
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
if table_type_property and table_type_property.text("value").lower() == "iceberg":
|
|
||||||
prop_name = "location"
|
prop_name = "location"
|
||||||
|
|
||||||
return f"{prop_name}={self.sql(e, 'this')}"
|
return f"{prop_name}={self.sql(e, 'this')}"
|
||||||
|
|
||||||
|
|
||||||
|
def _partitioned_by_property_sql(self: Athena.Generator, e: exp.PartitionedByProperty) -> str:
|
||||||
|
# If table_type='iceberg' then the table property for partitioning is called 'partitioning'
|
||||||
|
# If table_type='hive' it's called 'partitioned_by'
|
||||||
|
# ref: https://docs.aws.amazon.com/athena/latest/ug/create-table-as.html#ctas-table-properties
|
||||||
|
|
||||||
|
prop_name = "partitioned_by"
|
||||||
|
if isinstance(e.parent, exp.Properties):
|
||||||
|
if _is_iceberg_table(e.parent):
|
||||||
|
prop_name = "partitioning"
|
||||||
|
|
||||||
|
return f"{prop_name}={self.sql(e, 'this')}"
|
||||||
|
|
||||||
|
|
||||||
class Athena(Trino):
|
class Athena(Trino):
|
||||||
"""
|
"""
|
||||||
Over the years, it looks like AWS has taken various execution engines, bolted on AWS-specific modifications and then
|
Over the years, it looks like AWS has taken various execution engines, bolted on AWS-specific modifications and then
|
||||||
|
@ -132,7 +149,7 @@ class Athena(Trino):
|
||||||
TRANSFORMS = {
|
TRANSFORMS = {
|
||||||
**Trino.Generator.TRANSFORMS,
|
**Trino.Generator.TRANSFORMS,
|
||||||
exp.FileFormatProperty: lambda self, e: f"format={self.sql(e, 'this')}",
|
exp.FileFormatProperty: lambda self, e: f"format={self.sql(e, 'this')}",
|
||||||
exp.PartitionedByProperty: lambda self, e: f"partitioned_by={self.sql(e, 'this')}",
|
exp.PartitionedByProperty: _partitioned_by_property_sql,
|
||||||
exp.LocationProperty: _location_property_sql,
|
exp.LocationProperty: _location_property_sql,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ from sqlglot.dialects.dialect import (
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
ts_or_ds_add_cast,
|
ts_or_ds_add_cast,
|
||||||
unit_to_var,
|
unit_to_var,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
)
|
)
|
||||||
from sqlglot.helper import seq_get, split_num_words
|
from sqlglot.helper import seq_get, split_num_words
|
||||||
from sqlglot.tokens import TokenType
|
from sqlglot.tokens import TokenType
|
||||||
|
@ -934,7 +934,11 @@ class BigQuery(Dialect):
|
||||||
"DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
|
"DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
|
||||||
),
|
),
|
||||||
exp.String: rename_func("STRING"),
|
exp.String: rename_func("STRING"),
|
||||||
exp.StrPosition: str_position_sql,
|
exp.StrPosition: lambda self, e: (
|
||||||
|
strposition_sql(
|
||||||
|
self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
|
||||||
|
)
|
||||||
|
),
|
||||||
exp.StrToDate: _str_to_datetime_sql,
|
exp.StrToDate: _str_to_datetime_sql,
|
||||||
exp.StrToTime: _str_to_datetime_sql,
|
exp.StrToTime: _str_to_datetime_sql,
|
||||||
exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
|
exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
|
||||||
|
@ -957,6 +961,7 @@ class BigQuery(Dialect):
|
||||||
exp.Uuid: lambda *_: "GENERATE_UUID()",
|
exp.Uuid: lambda *_: "GENERATE_UUID()",
|
||||||
exp.Values: _derived_table_values_to_unnest,
|
exp.Values: _derived_table_values_to_unnest,
|
||||||
exp.VariancePop: rename_func("VAR_POP"),
|
exp.VariancePop: rename_func("VAR_POP"),
|
||||||
|
exp.SafeDivide: rename_func("SAFE_DIVIDE"),
|
||||||
}
|
}
|
||||||
|
|
||||||
SUPPORTED_JSON_PATH_PARTS = {
|
SUPPORTED_JSON_PATH_PARTS = {
|
||||||
|
|
|
@ -16,6 +16,7 @@ from sqlglot.dialects.dialect import (
|
||||||
build_json_extract_path,
|
build_json_extract_path,
|
||||||
rename_func,
|
rename_func,
|
||||||
sha256_sql,
|
sha256_sql,
|
||||||
|
strposition_sql,
|
||||||
var_map_sql,
|
var_map_sql,
|
||||||
timestamptrunc_sql,
|
timestamptrunc_sql,
|
||||||
unit_to_var,
|
unit_to_var,
|
||||||
|
@ -200,6 +201,7 @@ class ClickHouse(Dialect):
|
||||||
"DATE32": TokenType.DATE32,
|
"DATE32": TokenType.DATE32,
|
||||||
"DATETIME64": TokenType.DATETIME64,
|
"DATETIME64": TokenType.DATETIME64,
|
||||||
"DICTIONARY": TokenType.DICTIONARY,
|
"DICTIONARY": TokenType.DICTIONARY,
|
||||||
|
"DYNAMIC": TokenType.DYNAMIC,
|
||||||
"ENUM8": TokenType.ENUM8,
|
"ENUM8": TokenType.ENUM8,
|
||||||
"ENUM16": TokenType.ENUM16,
|
"ENUM16": TokenType.ENUM16,
|
||||||
"FINAL": TokenType.FINAL,
|
"FINAL": TokenType.FINAL,
|
||||||
|
@ -447,6 +449,9 @@ class ClickHouse(Dialect):
|
||||||
|
|
||||||
FUNCTION_PARSERS.pop("MATCH")
|
FUNCTION_PARSERS.pop("MATCH")
|
||||||
|
|
||||||
|
PROPERTY_PARSERS = parser.Parser.PROPERTY_PARSERS.copy()
|
||||||
|
PROPERTY_PARSERS.pop("DYNAMIC")
|
||||||
|
|
||||||
NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
|
NO_PAREN_FUNCTION_PARSERS = parser.Parser.NO_PAREN_FUNCTION_PARSERS.copy()
|
||||||
NO_PAREN_FUNCTION_PARSERS.pop("ANY")
|
NO_PAREN_FUNCTION_PARSERS.pop("ANY")
|
||||||
|
|
||||||
|
@ -953,6 +958,7 @@ class ClickHouse(Dialect):
|
||||||
exp.DataType.Type.MULTIPOLYGON: "MultiPolygon",
|
exp.DataType.Type.MULTIPOLYGON: "MultiPolygon",
|
||||||
exp.DataType.Type.AGGREGATEFUNCTION: "AggregateFunction",
|
exp.DataType.Type.AGGREGATEFUNCTION: "AggregateFunction",
|
||||||
exp.DataType.Type.SIMPLEAGGREGATEFUNCTION: "SimpleAggregateFunction",
|
exp.DataType.Type.SIMPLEAGGREGATEFUNCTION: "SimpleAggregateFunction",
|
||||||
|
exp.DataType.Type.DYNAMIC: "Dynamic",
|
||||||
}
|
}
|
||||||
|
|
||||||
TRANSFORMS = {
|
TRANSFORMS = {
|
||||||
|
@ -992,8 +998,12 @@ class ClickHouse(Dialect):
|
||||||
exp.RegexpLike: lambda self, e: self.func("match", e.this, e.expression),
|
exp.RegexpLike: lambda self, e: self.func("match", e.this, e.expression),
|
||||||
exp.Rand: rename_func("randCanonical"),
|
exp.Rand: rename_func("randCanonical"),
|
||||||
exp.StartsWith: rename_func("startsWith"),
|
exp.StartsWith: rename_func("startsWith"),
|
||||||
exp.StrPosition: lambda self, e: self.func(
|
exp.StrPosition: lambda self, e: strposition_sql(
|
||||||
"position", e.this, e.args.get("substr"), e.args.get("position")
|
self,
|
||||||
|
e,
|
||||||
|
func_name="POSITION",
|
||||||
|
supports_position=True,
|
||||||
|
use_ansi_position=False,
|
||||||
),
|
),
|
||||||
exp.TimeToStr: lambda self, e: self.func(
|
exp.TimeToStr: lambda self, e: self.func(
|
||||||
"formatDateTime", e.this, self.format_time(e), e.args.get("zone")
|
"formatDateTime", e.this, self.format_time(e), e.args.get("zone")
|
||||||
|
|
|
@ -1037,12 +1037,6 @@ def no_recursive_cte_sql(self: Generator, expression: exp.With) -> str:
|
||||||
return self.with_sql(expression)
|
return self.with_sql(expression)
|
||||||
|
|
||||||
|
|
||||||
def no_safe_divide_sql(self: Generator, expression: exp.SafeDivide, if_sql: str = "IF") -> str:
|
|
||||||
n = self.sql(expression, "this")
|
|
||||||
d = self.sql(expression, "expression")
|
|
||||||
return f"{if_sql}(({d}) <> 0, ({n}) / ({d}), NULL)"
|
|
||||||
|
|
||||||
|
|
||||||
def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
|
def no_tablesample_sql(self: Generator, expression: exp.TableSample) -> str:
|
||||||
self.unsupported("TABLESAMPLE unsupported")
|
self.unsupported("TABLESAMPLE unsupported")
|
||||||
return self.sql(expression.this)
|
return self.sql(expression.this)
|
||||||
|
@ -1073,36 +1067,47 @@ def property_sql(self: Generator, expression: exp.Property) -> str:
|
||||||
return f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
|
return f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
|
||||||
|
|
||||||
|
|
||||||
def str_position_sql(
|
def strposition_sql(
|
||||||
self: Generator,
|
self: Generator,
|
||||||
expression: exp.StrPosition,
|
expression: exp.StrPosition,
|
||||||
generate_instance: bool = False,
|
func_name: str = "STRPOS",
|
||||||
str_position_func_name: str = "STRPOS",
|
supports_position: bool = False,
|
||||||
|
supports_occurrence: bool = False,
|
||||||
|
use_ansi_position: bool = True,
|
||||||
) -> str:
|
) -> str:
|
||||||
this = self.sql(expression, "this")
|
string = expression.this
|
||||||
substr = self.sql(expression, "substr")
|
substr = expression.args.get("substr")
|
||||||
position = self.sql(expression, "position")
|
position = expression.args.get("position")
|
||||||
instance = expression.args.get("instance") if generate_instance else None
|
occurrence = expression.args.get("occurrence")
|
||||||
position_offset = ""
|
|
||||||
|
|
||||||
if position:
|
|
||||||
# Normalize third 'pos' argument into 'SUBSTR(..) + offset' across dialects
|
|
||||||
this = self.func("SUBSTR", this, position)
|
|
||||||
position_offset = f" + {position} - 1"
|
|
||||||
|
|
||||||
strpos_sql = self.func(str_position_func_name, this, substr, instance)
|
|
||||||
|
|
||||||
if position_offset:
|
|
||||||
zero = exp.Literal.number(0)
|
zero = exp.Literal.number(0)
|
||||||
# If match is not found (returns 0) the position offset should not be applied
|
one = exp.Literal.number(1)
|
||||||
case = exp.If(
|
|
||||||
this=exp.EQ(this=strpos_sql, expression=zero),
|
|
||||||
true=zero,
|
|
||||||
false=strpos_sql + position_offset,
|
|
||||||
)
|
|
||||||
strpos_sql = self.sql(case)
|
|
||||||
|
|
||||||
return strpos_sql
|
if supports_occurrence and occurrence and supports_position and not position:
|
||||||
|
position = one
|
||||||
|
|
||||||
|
transpile_position = position and not supports_position
|
||||||
|
if transpile_position:
|
||||||
|
string = exp.Substring(this=string, start=position)
|
||||||
|
|
||||||
|
if func_name == "POSITION" and use_ansi_position:
|
||||||
|
func = exp.Anonymous(this=func_name, expressions=[exp.In(this=substr, field=string)])
|
||||||
|
else:
|
||||||
|
args = [substr, string] if func_name in ("LOCATE", "CHARINDEX") else [string, substr]
|
||||||
|
if supports_position:
|
||||||
|
args.append(position)
|
||||||
|
if occurrence:
|
||||||
|
if supports_occurrence:
|
||||||
|
args.append(occurrence)
|
||||||
|
else:
|
||||||
|
self.unsupported(f"{func_name} does not support the occurrence parameter.")
|
||||||
|
func = exp.Anonymous(this=func_name, expressions=args)
|
||||||
|
|
||||||
|
if transpile_position:
|
||||||
|
func_with_offset = exp.Sub(this=func + position, expression=one)
|
||||||
|
func_wrapped = exp.If(this=func.eq(zero), true=zero, false=func_with_offset)
|
||||||
|
return self.sql(func_wrapped)
|
||||||
|
|
||||||
|
return self.sql(func)
|
||||||
|
|
||||||
|
|
||||||
def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
|
def struct_extract_sql(self: Generator, expression: exp.StructExtract) -> str:
|
||||||
|
@ -1275,18 +1280,6 @@ def no_datetime_sql(self: Generator, expression: exp.Datetime) -> str:
|
||||||
return self.sql(exp.cast(exp.Add(this=this, expression=expr), exp.DataType.Type.TIMESTAMP))
|
return self.sql(exp.cast(exp.Add(this=this, expression=expr), exp.DataType.Type.TIMESTAMP))
|
||||||
|
|
||||||
|
|
||||||
def locate_to_strposition(args: t.List) -> exp.Expression:
|
|
||||||
return exp.StrPosition(
|
|
||||||
this=seq_get(args, 1), substr=seq_get(args, 0), position=seq_get(args, 2)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def strposition_to_locate_sql(self: Generator, expression: exp.StrPosition) -> str:
|
|
||||||
return self.func(
|
|
||||||
"LOCATE", expression.args.get("substr"), expression.this, expression.args.get("position")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def left_to_substring_sql(self: Generator, expression: exp.Left) -> str:
|
def left_to_substring_sql(self: Generator, expression: exp.Left) -> str:
|
||||||
return self.sql(
|
return self.sql(
|
||||||
exp.Substring(
|
exp.Substring(
|
||||||
|
|
|
@ -8,7 +8,7 @@ from sqlglot.dialects.dialect import (
|
||||||
build_formatted_time,
|
build_formatted_time,
|
||||||
no_trycast_sql,
|
no_trycast_sql,
|
||||||
rename_func,
|
rename_func,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
)
|
)
|
||||||
from sqlglot.dialects.mysql import date_add_sql
|
from sqlglot.dialects.mysql import date_add_sql
|
||||||
|
@ -136,12 +136,12 @@ class Drill(Dialect):
|
||||||
),
|
),
|
||||||
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
|
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
|
||||||
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
|
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
|
||||||
exp.StrPosition: str_position_sql,
|
|
||||||
exp.StrToDate: _str_to_date,
|
exp.StrToDate: _str_to_date,
|
||||||
exp.Pow: rename_func("POW"),
|
exp.Pow: rename_func("POW"),
|
||||||
exp.Select: transforms.preprocess(
|
exp.Select: transforms.preprocess(
|
||||||
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
||||||
),
|
),
|
||||||
|
exp.StrPosition: strposition_sql,
|
||||||
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
||||||
exp.TimeStrToDate: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.DATE)),
|
exp.TimeStrToDate: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.DATE)),
|
||||||
exp.TimeStrToTime: timestrtotime_sql,
|
exp.TimeStrToTime: timestrtotime_sql,
|
||||||
|
|
|
@ -20,12 +20,11 @@ from sqlglot.dialects.dialect import (
|
||||||
build_formatted_time,
|
build_formatted_time,
|
||||||
inline_array_unless_query,
|
inline_array_unless_query,
|
||||||
no_comment_column_constraint_sql,
|
no_comment_column_constraint_sql,
|
||||||
no_safe_divide_sql,
|
|
||||||
no_time_sql,
|
no_time_sql,
|
||||||
no_timestamp_sql,
|
no_timestamp_sql,
|
||||||
pivot_column_names,
|
pivot_column_names,
|
||||||
rename_func,
|
rename_func,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
str_to_time_sql,
|
str_to_time_sql,
|
||||||
timestamptrunc_sql,
|
timestamptrunc_sql,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
|
@ -608,16 +607,18 @@ class DuckDB(Dialect):
|
||||||
e.args.get("modifiers"),
|
e.args.get("modifiers"),
|
||||||
),
|
),
|
||||||
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
|
exp.RegexpLike: rename_func("REGEXP_MATCHES"),
|
||||||
|
exp.RegexpILike: lambda self, e: self.func(
|
||||||
|
"REGEXP_MATCHES", e.this, e.expression, exp.Literal.string("i")
|
||||||
|
),
|
||||||
exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
|
exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
|
||||||
exp.Return: lambda self, e: self.sql(e, "this"),
|
exp.Return: lambda self, e: self.sql(e, "this"),
|
||||||
exp.ReturnsProperty: lambda self, e: "TABLE" if isinstance(e.this, exp.Schema) else "",
|
exp.ReturnsProperty: lambda self, e: "TABLE" if isinstance(e.this, exp.Schema) else "",
|
||||||
exp.Rand: rename_func("RANDOM"),
|
exp.Rand: rename_func("RANDOM"),
|
||||||
exp.SafeDivide: no_safe_divide_sql,
|
|
||||||
exp.SHA: rename_func("SHA1"),
|
exp.SHA: rename_func("SHA1"),
|
||||||
exp.SHA2: sha256_sql,
|
exp.SHA2: sha256_sql,
|
||||||
exp.Split: rename_func("STR_SPLIT"),
|
exp.Split: rename_func("STR_SPLIT"),
|
||||||
exp.SortArray: _sort_array_sql,
|
exp.SortArray: _sort_array_sql,
|
||||||
exp.StrPosition: str_position_sql,
|
exp.StrPosition: strposition_sql,
|
||||||
exp.StrToUnix: lambda self, e: self.func(
|
exp.StrToUnix: lambda self, e: self.func(
|
||||||
"EPOCH", self.func("STRPTIME", e.this, self.format_time(e))
|
"EPOCH", self.func("STRPTIME", e.this, self.format_time(e))
|
||||||
),
|
),
|
||||||
|
|
|
@ -15,18 +15,16 @@ from sqlglot.dialects.dialect import (
|
||||||
if_sql,
|
if_sql,
|
||||||
is_parse_json,
|
is_parse_json,
|
||||||
left_to_substring_sql,
|
left_to_substring_sql,
|
||||||
locate_to_strposition,
|
|
||||||
max_or_greatest,
|
max_or_greatest,
|
||||||
min_or_least,
|
min_or_least,
|
||||||
no_ilike_sql,
|
no_ilike_sql,
|
||||||
no_recursive_cte_sql,
|
no_recursive_cte_sql,
|
||||||
no_safe_divide_sql,
|
|
||||||
no_trycast_sql,
|
no_trycast_sql,
|
||||||
regexp_extract_sql,
|
regexp_extract_sql,
|
||||||
regexp_replace_sql,
|
regexp_replace_sql,
|
||||||
rename_func,
|
rename_func,
|
||||||
right_to_substring_sql,
|
right_to_substring_sql,
|
||||||
strposition_to_locate_sql,
|
strposition_sql,
|
||||||
struct_extract_sql,
|
struct_extract_sql,
|
||||||
time_format,
|
time_format,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
|
@ -306,7 +304,6 @@ class Hive(Dialect):
|
||||||
"GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
|
"GET_JSON_OBJECT": exp.JSONExtractScalar.from_arg_list,
|
||||||
"LAST": _build_with_ignore_nulls(exp.Last),
|
"LAST": _build_with_ignore_nulls(exp.Last),
|
||||||
"LAST_VALUE": _build_with_ignore_nulls(exp.LastValue),
|
"LAST_VALUE": _build_with_ignore_nulls(exp.LastValue),
|
||||||
"LOCATE": locate_to_strposition,
|
|
||||||
"MAP": parser.build_var_map,
|
"MAP": parser.build_var_map,
|
||||||
"MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
|
"MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate.from_arg_list(args)),
|
||||||
"PERCENTILE": exp.Quantile.from_arg_list,
|
"PERCENTILE": exp.Quantile.from_arg_list,
|
||||||
|
@ -550,7 +547,6 @@ class Hive(Dialect):
|
||||||
exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
|
exp.RegexpLike: lambda self, e: self.binary(e, "RLIKE"),
|
||||||
exp.RegexpSplit: rename_func("SPLIT"),
|
exp.RegexpSplit: rename_func("SPLIT"),
|
||||||
exp.Right: right_to_substring_sql,
|
exp.Right: right_to_substring_sql,
|
||||||
exp.SafeDivide: no_safe_divide_sql,
|
|
||||||
exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
|
exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
|
||||||
exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
|
exp.ArrayUniqueAgg: rename_func("COLLECT_SET"),
|
||||||
exp.Split: lambda self, e: self.func(
|
exp.Split: lambda self, e: self.func(
|
||||||
|
@ -564,7 +560,9 @@ class Hive(Dialect):
|
||||||
transforms.any_to_exists,
|
transforms.any_to_exists,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.StrPosition: strposition_to_locate_sql,
|
exp.StrPosition: lambda self, e: strposition_sql(
|
||||||
|
self, e, func_name="LOCATE", supports_position=True
|
||||||
|
),
|
||||||
exp.StrToDate: _str_to_date_sql,
|
exp.StrToDate: _str_to_date_sql,
|
||||||
exp.StrToTime: _str_to_time_sql,
|
exp.StrToTime: _str_to_time_sql,
|
||||||
exp.StrToUnix: _str_to_unix_sql,
|
exp.StrToUnix: _str_to_unix_sql,
|
||||||
|
|
|
@ -12,7 +12,6 @@ from sqlglot.dialects.dialect import (
|
||||||
build_formatted_time,
|
build_formatted_time,
|
||||||
isnull_to_is_null,
|
isnull_to_is_null,
|
||||||
length_or_char_length_sql,
|
length_or_char_length_sql,
|
||||||
locate_to_strposition,
|
|
||||||
max_or_greatest,
|
max_or_greatest,
|
||||||
min_or_least,
|
min_or_least,
|
||||||
no_ilike_sql,
|
no_ilike_sql,
|
||||||
|
@ -23,7 +22,7 @@ from sqlglot.dialects.dialect import (
|
||||||
build_date_delta,
|
build_date_delta,
|
||||||
build_date_delta_with_interval,
|
build_date_delta_with_interval,
|
||||||
rename_func,
|
rename_func,
|
||||||
strposition_to_locate_sql,
|
strposition_sql,
|
||||||
unit_to_var,
|
unit_to_var,
|
||||||
trim_sql,
|
trim_sql,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
|
@ -311,7 +310,6 @@ class MySQL(Dialect):
|
||||||
"FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"),
|
"FROM_UNIXTIME": build_formatted_time(exp.UnixToTime, "mysql"),
|
||||||
"ISNULL": isnull_to_is_null,
|
"ISNULL": isnull_to_is_null,
|
||||||
"LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
|
"LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
|
||||||
"LOCATE": locate_to_strposition,
|
|
||||||
"MAKETIME": exp.TimeFromParts.from_arg_list,
|
"MAKETIME": exp.TimeFromParts.from_arg_list,
|
||||||
"MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
|
"MONTH": lambda args: exp.Month(this=exp.TsOrDsToDate(this=seq_get(args, 0))),
|
||||||
"MONTHNAME": lambda args: exp.TimeToStr(
|
"MONTHNAME": lambda args: exp.TimeToStr(
|
||||||
|
@ -750,7 +748,9 @@ class MySQL(Dialect):
|
||||||
transforms.unnest_generate_date_array_using_recursive_cte,
|
transforms.unnest_generate_date_array_using_recursive_cte,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.StrPosition: strposition_to_locate_sql,
|
exp.StrPosition: lambda self, e: strposition_sql(
|
||||||
|
self, e, func_name="LOCATE", supports_position=True
|
||||||
|
),
|
||||||
exp.StrToDate: _str_to_date_sql,
|
exp.StrToDate: _str_to_date_sql,
|
||||||
exp.StrToTime: _str_to_date_sql,
|
exp.StrToTime: _str_to_date_sql,
|
||||||
exp.Stuff: rename_func("INSERT"),
|
exp.Stuff: rename_func("INSERT"),
|
||||||
|
|
|
@ -9,7 +9,7 @@ from sqlglot.dialects.dialect import (
|
||||||
build_formatted_time,
|
build_formatted_time,
|
||||||
no_ilike_sql,
|
no_ilike_sql,
|
||||||
rename_func,
|
rename_func,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
to_number_with_nls_param,
|
to_number_with_nls_param,
|
||||||
trim_sql,
|
trim_sql,
|
||||||
)
|
)
|
||||||
|
@ -266,10 +266,10 @@ class Oracle(Dialect):
|
||||||
|
|
||||||
TYPE_MAPPING = {
|
TYPE_MAPPING = {
|
||||||
**generator.Generator.TYPE_MAPPING,
|
**generator.Generator.TYPE_MAPPING,
|
||||||
exp.DataType.Type.TINYINT: "NUMBER",
|
exp.DataType.Type.TINYINT: "SMALLINT",
|
||||||
exp.DataType.Type.SMALLINT: "NUMBER",
|
exp.DataType.Type.SMALLINT: "SMALLINT",
|
||||||
exp.DataType.Type.INT: "NUMBER",
|
exp.DataType.Type.INT: "INT",
|
||||||
exp.DataType.Type.BIGINT: "NUMBER",
|
exp.DataType.Type.BIGINT: "INT",
|
||||||
exp.DataType.Type.DECIMAL: "NUMBER",
|
exp.DataType.Type.DECIMAL: "NUMBER",
|
||||||
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
|
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
|
||||||
exp.DataType.Type.VARCHAR: "VARCHAR2",
|
exp.DataType.Type.VARCHAR: "VARCHAR2",
|
||||||
|
@ -300,8 +300,10 @@ class Oracle(Dialect):
|
||||||
transforms.eliminate_qualify,
|
transforms.eliminate_qualify,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.StrPosition: lambda self, e: str_position_sql(
|
exp.StrPosition: lambda self, e: (
|
||||||
self, e, str_position_func_name="INSTR"
|
strposition_sql(
|
||||||
|
self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
|
||||||
|
)
|
||||||
),
|
),
|
||||||
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
||||||
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
|
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
|
||||||
|
|
|
@ -32,7 +32,7 @@ from sqlglot.dialects.dialect import (
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
trim_sql,
|
trim_sql,
|
||||||
ts_or_ds_add_cast,
|
ts_or_ds_add_cast,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
)
|
)
|
||||||
from sqlglot.helper import is_int, seq_get
|
from sqlglot.helper import is_int, seq_get
|
||||||
from sqlglot.parser import binary_range_parser
|
from sqlglot.parser import binary_range_parser
|
||||||
|
@ -584,7 +584,7 @@ class Postgres(Dialect):
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.SHA2: sha256_sql,
|
exp.SHA2: sha256_sql,
|
||||||
exp.StrPosition: str_position_sql,
|
exp.StrPosition: lambda self, e: strposition_sql(self, e, func_name="POSITION"),
|
||||||
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
|
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
|
||||||
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
||||||
exp.StructExtract: struct_extract_sql,
|
exp.StructExtract: struct_extract_sql,
|
||||||
|
@ -611,6 +611,7 @@ class Postgres(Dialect):
|
||||||
exp.UnixToTime: _unix_to_time_sql,
|
exp.UnixToTime: _unix_to_time_sql,
|
||||||
exp.Levenshtein: _levenshtein_sql,
|
exp.Levenshtein: _levenshtein_sql,
|
||||||
}
|
}
|
||||||
|
|
||||||
TRANSFORMS.pop(exp.CommentColumnConstraint)
|
TRANSFORMS.pop(exp.CommentColumnConstraint)
|
||||||
|
|
||||||
PROPERTIES_LOCATION = {
|
PROPERTIES_LOCATION = {
|
||||||
|
|
|
@ -16,14 +16,13 @@ from sqlglot.dialects.dialect import (
|
||||||
left_to_substring_sql,
|
left_to_substring_sql,
|
||||||
no_ilike_sql,
|
no_ilike_sql,
|
||||||
no_pivot_sql,
|
no_pivot_sql,
|
||||||
no_safe_divide_sql,
|
|
||||||
no_timestamp_sql,
|
no_timestamp_sql,
|
||||||
regexp_extract_sql,
|
regexp_extract_sql,
|
||||||
rename_func,
|
rename_func,
|
||||||
right_to_substring_sql,
|
right_to_substring_sql,
|
||||||
sha256_sql,
|
sha256_sql,
|
||||||
|
strposition_sql,
|
||||||
struct_extract_sql,
|
struct_extract_sql,
|
||||||
str_position_sql,
|
|
||||||
timestamptrunc_sql,
|
timestamptrunc_sql,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
ts_or_ds_add_cast,
|
ts_or_ds_add_cast,
|
||||||
|
@ -292,7 +291,7 @@ class Presto(Dialect):
|
||||||
"SET_AGG": exp.ArrayUniqueAgg.from_arg_list,
|
"SET_AGG": exp.ArrayUniqueAgg.from_arg_list,
|
||||||
"SPLIT_TO_MAP": exp.StrToMap.from_arg_list,
|
"SPLIT_TO_MAP": exp.StrToMap.from_arg_list,
|
||||||
"STRPOS": lambda args: exp.StrPosition(
|
"STRPOS": lambda args: exp.StrPosition(
|
||||||
this=seq_get(args, 0), substr=seq_get(args, 1), instance=seq_get(args, 2)
|
this=seq_get(args, 0), substr=seq_get(args, 1), occurrence=seq_get(args, 2)
|
||||||
),
|
),
|
||||||
"TO_CHAR": _build_to_char,
|
"TO_CHAR": _build_to_char,
|
||||||
"TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
|
"TO_UNIXTIME": exp.TimeToUnix.from_arg_list,
|
||||||
|
@ -418,7 +417,6 @@ class Presto(Dialect):
|
||||||
exp.RegexpExtract: regexp_extract_sql,
|
exp.RegexpExtract: regexp_extract_sql,
|
||||||
exp.RegexpExtractAll: regexp_extract_sql,
|
exp.RegexpExtractAll: regexp_extract_sql,
|
||||||
exp.Right: right_to_substring_sql,
|
exp.Right: right_to_substring_sql,
|
||||||
exp.SafeDivide: no_safe_divide_sql,
|
|
||||||
exp.Schema: _schema_sql,
|
exp.Schema: _schema_sql,
|
||||||
exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
|
exp.SchemaCommentProperty: lambda self, e: self.naked_property(e),
|
||||||
exp.Select: transforms.preprocess(
|
exp.Select: transforms.preprocess(
|
||||||
|
@ -430,7 +428,7 @@ class Presto(Dialect):
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.SortArray: _no_sort_array,
|
exp.SortArray: _no_sort_array,
|
||||||
exp.StrPosition: lambda self, e: str_position_sql(self, e, generate_instance=True),
|
exp.StrPosition: lambda self, e: strposition_sql(self, e, supports_occurrence=True),
|
||||||
exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
|
exp.StrToDate: lambda self, e: f"CAST({_str_to_time_sql(self, e)} AS DATE)",
|
||||||
exp.StrToMap: rename_func("SPLIT_TO_MAP"),
|
exp.StrToMap: rename_func("SPLIT_TO_MAP"),
|
||||||
exp.StrToTime: _str_to_time_sql,
|
exp.StrToTime: _str_to_time_sql,
|
||||||
|
|
|
@ -22,8 +22,8 @@ from sqlglot.dialects.dialect import (
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
var_map_sql,
|
var_map_sql,
|
||||||
map_date_part,
|
map_date_part,
|
||||||
no_safe_divide_sql,
|
|
||||||
no_timestamp_sql,
|
no_timestamp_sql,
|
||||||
|
strposition_sql,
|
||||||
timestampdiff_sql,
|
timestampdiff_sql,
|
||||||
no_make_interval_sql,
|
no_make_interval_sql,
|
||||||
)
|
)
|
||||||
|
@ -67,8 +67,9 @@ def _build_datetime(
|
||||||
expr.set("safe", safe)
|
expr.set("safe", safe)
|
||||||
return expr
|
return expr
|
||||||
|
|
||||||
if kind == exp.DataType.Type.DATE and not int_value:
|
if kind in (exp.DataType.Type.DATE, exp.DataType.Type.TIME) and not int_value:
|
||||||
formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
|
klass = exp.TsOrDsToDate if kind == exp.DataType.Type.DATE else exp.TsOrDsToTime
|
||||||
|
formatted_exp = build_formatted_time(klass, "snowflake")(args)
|
||||||
formatted_exp.set("safe", safe)
|
formatted_exp.set("safe", safe)
|
||||||
return formatted_exp
|
return formatted_exp
|
||||||
|
|
||||||
|
@ -357,8 +358,6 @@ class Snowflake(Dialect):
|
||||||
"mi": "%M",
|
"mi": "%M",
|
||||||
"SS": "%S",
|
"SS": "%S",
|
||||||
"ss": "%S",
|
"ss": "%S",
|
||||||
"FF": "%f",
|
|
||||||
"ff": "%f",
|
|
||||||
"FF6": "%f",
|
"FF6": "%f",
|
||||||
"ff6": "%f",
|
"ff6": "%f",
|
||||||
}
|
}
|
||||||
|
@ -451,6 +450,7 @@ class Snowflake(Dialect):
|
||||||
"TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
|
"TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
|
||||||
"TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
|
"TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
|
||||||
"TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
|
"TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
|
||||||
|
"TRY_TO_TIME": _build_datetime("TRY_TO_TIME", exp.DataType.Type.TIME, safe=True),
|
||||||
"TRY_TO_TIMESTAMP": _build_datetime(
|
"TRY_TO_TIMESTAMP": _build_datetime(
|
||||||
"TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
|
"TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
|
||||||
),
|
),
|
||||||
|
@ -955,12 +955,11 @@ class Snowflake(Dialect):
|
||||||
_transform_generate_date_array,
|
_transform_generate_date_array,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
|
|
||||||
exp.SHA: rename_func("SHA1"),
|
exp.SHA: rename_func("SHA1"),
|
||||||
exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
|
exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
|
||||||
exp.StartsWith: rename_func("STARTSWITH"),
|
exp.StartsWith: rename_func("STARTSWITH"),
|
||||||
exp.StrPosition: lambda self, e: self.func(
|
exp.StrPosition: lambda self, e: strposition_sql(
|
||||||
"POSITION", e.args.get("substr"), e.this, e.args.get("position")
|
self, e, func_name="CHARINDEX", supports_position=True
|
||||||
),
|
),
|
||||||
exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
|
exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
|
||||||
exp.Stuff: rename_func("INSERT"),
|
exp.Stuff: rename_func("INSERT"),
|
||||||
|
@ -981,6 +980,9 @@ class Snowflake(Dialect):
|
||||||
exp.TsOrDsToDate: lambda self, e: self.func(
|
exp.TsOrDsToDate: lambda self, e: self.func(
|
||||||
"TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
|
"TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
|
||||||
),
|
),
|
||||||
|
exp.TsOrDsToTime: lambda self, e: self.func(
|
||||||
|
"TRY_TO_TIME" if e.args.get("safe") else "TO_TIME", e.this, self.format_time(e)
|
||||||
|
),
|
||||||
exp.UnixToTime: rename_func("TO_TIMESTAMP"),
|
exp.UnixToTime: rename_func("TO_TIMESTAMP"),
|
||||||
exp.Uuid: rename_func("UUID_STRING"),
|
exp.Uuid: rename_func("UUID_STRING"),
|
||||||
exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
|
exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
|
||||||
|
|
|
@ -15,7 +15,7 @@ from sqlglot.dialects.dialect import (
|
||||||
no_tablesample_sql,
|
no_tablesample_sql,
|
||||||
no_trycast_sql,
|
no_trycast_sql,
|
||||||
rename_func,
|
rename_func,
|
||||||
str_position_sql,
|
strposition_sql,
|
||||||
)
|
)
|
||||||
from sqlglot.tokens import TokenType
|
from sqlglot.tokens import TokenType
|
||||||
from sqlglot.generator import unsupported_args
|
from sqlglot.generator import unsupported_args
|
||||||
|
@ -200,9 +200,7 @@ class SQLite(Dialect):
|
||||||
transforms.eliminate_semi_and_anti_joins,
|
transforms.eliminate_semi_and_anti_joins,
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.StrPosition: lambda self, e: str_position_sql(
|
exp.StrPosition: lambda self, e: strposition_sql(self, e, func_name="INSTR"),
|
||||||
self, e, str_position_func_name="INSTR"
|
|
||||||
),
|
|
||||||
exp.TableSample: no_tablesample_sql,
|
exp.TableSample: no_tablesample_sql,
|
||||||
exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
|
exp.TimeStrToTime: lambda self, e: self.sql(e, "this"),
|
||||||
exp.TimeToStr: lambda self, e: self.func("STRFTIME", e.args.get("format"), e.this),
|
exp.TimeToStr: lambda self, e: self.func("STRFTIME", e.args.get("format"), e.this),
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from sqlglot import exp, generator, parser, tokens, transforms
|
from sqlglot import exp, generator, parser, tokens, transforms
|
||||||
from sqlglot.dialects.dialect import Dialect, rename_func
|
from sqlglot.dialects.dialect import Dialect, rename_func, strposition_sql as _strposition_sql
|
||||||
|
from sqlglot.helper import seq_get
|
||||||
|
|
||||||
|
|
||||||
class Tableau(Dialect):
|
class Tableau(Dialect):
|
||||||
|
@ -39,9 +40,22 @@ class Tableau(Dialect):
|
||||||
return self.func("COUNTD", *this.expressions)
|
return self.func("COUNTD", *this.expressions)
|
||||||
return self.func("COUNT", this)
|
return self.func("COUNT", this)
|
||||||
|
|
||||||
|
def strposition_sql(self, expression: exp.StrPosition) -> str:
|
||||||
|
has_occurrence = "occurrence" in expression.args
|
||||||
|
return _strposition_sql(
|
||||||
|
self,
|
||||||
|
expression,
|
||||||
|
func_name="FINDNTH" if has_occurrence else "FIND",
|
||||||
|
supports_occurrence=has_occurrence,
|
||||||
|
)
|
||||||
|
|
||||||
class Parser(parser.Parser):
|
class Parser(parser.Parser):
|
||||||
FUNCTIONS = {
|
FUNCTIONS = {
|
||||||
**parser.Parser.FUNCTIONS,
|
**parser.Parser.FUNCTIONS,
|
||||||
"COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
|
"COUNTD": lambda args: exp.Count(this=exp.Distinct(expressions=args)),
|
||||||
|
"FIND": exp.StrPosition.from_arg_list,
|
||||||
|
"FINDNTH": lambda args: exp.StrPosition(
|
||||||
|
this=seq_get(args, 0), substr=seq_get(args, 1), occurrence=seq_get(args, 2)
|
||||||
|
),
|
||||||
}
|
}
|
||||||
NO_PAREN_IF_COMMANDS = False
|
NO_PAREN_IF_COMMANDS = False
|
||||||
|
|
|
@ -8,6 +8,7 @@ from sqlglot.dialects.dialect import (
|
||||||
max_or_greatest,
|
max_or_greatest,
|
||||||
min_or_least,
|
min_or_least,
|
||||||
rename_func,
|
rename_func,
|
||||||
|
strposition_sql,
|
||||||
to_number_with_nls_param,
|
to_number_with_nls_param,
|
||||||
)
|
)
|
||||||
from sqlglot.helper import seq_get
|
from sqlglot.helper import seq_get
|
||||||
|
@ -255,6 +256,11 @@ class Teradata(Dialect):
|
||||||
exp.Select: transforms.preprocess(
|
exp.Select: transforms.preprocess(
|
||||||
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
[transforms.eliminate_distinct_on, transforms.eliminate_semi_and_anti_joins]
|
||||||
),
|
),
|
||||||
|
exp.StrPosition: lambda self, e: (
|
||||||
|
strposition_sql(
|
||||||
|
self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
|
||||||
|
)
|
||||||
|
),
|
||||||
exp.StrToDate: lambda self,
|
exp.StrToDate: lambda self,
|
||||||
e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
|
e: f"CAST({self.sql(e, 'this')} AS DATE FORMAT {self.format_time(e)})",
|
||||||
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
||||||
|
|
|
@ -4,6 +4,7 @@ from sqlglot import exp, parser
|
||||||
from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql, timestrtotime_sql
|
from sqlglot.dialects.dialect import merge_without_target_sql, trim_sql, timestrtotime_sql
|
||||||
from sqlglot.dialects.presto import Presto
|
from sqlglot.dialects.presto import Presto
|
||||||
from sqlglot.tokens import TokenType
|
from sqlglot.tokens import TokenType
|
||||||
|
import typing as t
|
||||||
|
|
||||||
|
|
||||||
class Trino(Presto):
|
class Trino(Presto):
|
||||||
|
@ -33,13 +34,26 @@ class Trino(Presto):
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
def _parse_json_query(self):
|
def _parse_json_query_quote(self) -> t.Optional[exp.JSONExtractQuote]:
|
||||||
|
if not (
|
||||||
|
self._match_text_seq("KEEP", "QUOTES") or self._match_text_seq("OMIT", "QUOTES")
|
||||||
|
):
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.expression(
|
||||||
|
exp.JSONExtractQuote,
|
||||||
|
option=self._tokens[self._index - 2].text.upper(),
|
||||||
|
scalar=self._match_text_seq("ON", "SCALAR", "STRING"),
|
||||||
|
)
|
||||||
|
|
||||||
|
def _parse_json_query(self) -> exp.JSONExtract:
|
||||||
return self.expression(
|
return self.expression(
|
||||||
exp.JSONExtract,
|
exp.JSONExtract,
|
||||||
this=self._parse_bitwise(),
|
this=self._parse_bitwise(),
|
||||||
expression=self._match(TokenType.COMMA) and self._parse_bitwise(),
|
expression=self._match(TokenType.COMMA) and self._parse_bitwise(),
|
||||||
option=self._parse_var_from_options(self.JSON_QUERY_OPTIONS, raise_unmatched=False),
|
option=self._parse_var_from_options(self.JSON_QUERY_OPTIONS, raise_unmatched=False),
|
||||||
json_query=True,
|
json_query=True,
|
||||||
|
quote=self._parse_json_query_quote(),
|
||||||
)
|
)
|
||||||
|
|
||||||
class Generator(Presto.Generator):
|
class Generator(Presto.Generator):
|
||||||
|
@ -47,6 +61,7 @@ class Trino(Presto):
|
||||||
**Presto.Generator.TRANSFORMS,
|
**Presto.Generator.TRANSFORMS,
|
||||||
exp.ArraySum: lambda self,
|
exp.ArraySum: lambda self,
|
||||||
e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
|
e: f"REDUCE({self.sql(e, 'this')}, 0, (acc, x) -> acc + x, acc -> acc)",
|
||||||
|
exp.ArrayUniqueAgg: lambda self, e: f"ARRAY_AGG(DISTINCT {self.sql(e, 'this')})",
|
||||||
exp.Merge: merge_without_target_sql,
|
exp.Merge: merge_without_target_sql,
|
||||||
exp.TimeStrToTime: lambda self, e: timestrtotime_sql(self, e, include_precision=True),
|
exp.TimeStrToTime: lambda self, e: timestrtotime_sql(self, e, include_precision=True),
|
||||||
exp.Trim: trim_sql,
|
exp.Trim: trim_sql,
|
||||||
|
@ -67,7 +82,10 @@ class Trino(Presto):
|
||||||
option = self.sql(expression, "option")
|
option = self.sql(expression, "option")
|
||||||
option = f" {option}" if option else ""
|
option = f" {option}" if option else ""
|
||||||
|
|
||||||
return self.func("JSON_QUERY", expression.this, json_path + option)
|
quote = self.sql(expression, "quote")
|
||||||
|
quote = f" {quote}" if quote else ""
|
||||||
|
|
||||||
|
return self.func("JSON_QUERY", expression.this, json_path + option + quote)
|
||||||
|
|
||||||
def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
|
def groupconcat_sql(self, expression: exp.GroupConcat) -> str:
|
||||||
this = expression.this
|
this = expression.this
|
||||||
|
|
|
@ -17,6 +17,7 @@ from sqlglot.dialects.dialect import (
|
||||||
min_or_least,
|
min_or_least,
|
||||||
build_date_delta,
|
build_date_delta,
|
||||||
rename_func,
|
rename_func,
|
||||||
|
strposition_sql,
|
||||||
trim_sql,
|
trim_sql,
|
||||||
timestrtotime_sql,
|
timestrtotime_sql,
|
||||||
)
|
)
|
||||||
|
@ -768,7 +769,7 @@ class TSQL(Dialect):
|
||||||
|
|
||||||
if isinstance(create, exp.Create):
|
if isinstance(create, exp.Create):
|
||||||
table = create.this.this if isinstance(create.this, exp.Schema) else create.this
|
table = create.this.this if isinstance(create.this, exp.Schema) else create.this
|
||||||
if isinstance(table, exp.Table) and table.this.args.get("temporary"):
|
if isinstance(table, exp.Table) and table.this and table.this.args.get("temporary"):
|
||||||
if not create.args.get("properties"):
|
if not create.args.get("properties"):
|
||||||
create.set("properties", exp.Properties(expressions=[]))
|
create.set("properties", exp.Properties(expressions=[]))
|
||||||
|
|
||||||
|
@ -935,8 +936,8 @@ class TSQL(Dialect):
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
exp.Stddev: rename_func("STDEV"),
|
exp.Stddev: rename_func("STDEV"),
|
||||||
exp.StrPosition: lambda self, e: self.func(
|
exp.StrPosition: lambda self, e: strposition_sql(
|
||||||
"CHARINDEX", e.args.get("substr"), e.this, e.args.get("position")
|
self, e, func_name="CHARINDEX", supports_position=True
|
||||||
),
|
),
|
||||||
exp.Subquery: transforms.preprocess([qualify_derived_table_outputs]),
|
exp.Subquery: transforms.preprocess([qualify_derived_table_outputs]),
|
||||||
exp.SHA: lambda self, e: self.func("HASHBYTES", exp.Literal.string("SHA1"), e.this),
|
exp.SHA: lambda self, e: self.func("HASHBYTES", exp.Literal.string("SHA1"), e.this),
|
||||||
|
|
|
@ -2453,6 +2453,10 @@ class Join(Expression):
|
||||||
def alias_or_name(self) -> str:
|
def alias_or_name(self) -> str:
|
||||||
return self.this.alias_or_name
|
return self.this.alias_or_name
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_semi_or_anti_join(self) -> bool:
|
||||||
|
return self.kind in ("SEMI", "ANTI")
|
||||||
|
|
||||||
def on(
|
def on(
|
||||||
self,
|
self,
|
||||||
*expressions: t.Optional[ExpOrStr],
|
*expressions: t.Optional[ExpOrStr],
|
||||||
|
@ -4382,6 +4386,7 @@ class DataType(Expression):
|
||||||
DECIMAL128 = auto()
|
DECIMAL128 = auto()
|
||||||
DECIMAL256 = auto()
|
DECIMAL256 = auto()
|
||||||
DOUBLE = auto()
|
DOUBLE = auto()
|
||||||
|
DYNAMIC = auto()
|
||||||
ENUM = auto()
|
ENUM = auto()
|
||||||
ENUM8 = auto()
|
ENUM8 = auto()
|
||||||
ENUM16 = auto()
|
ENUM16 = auto()
|
||||||
|
@ -6155,6 +6160,7 @@ class JSONExtract(Binary, Func):
|
||||||
"variant_extract": False,
|
"variant_extract": False,
|
||||||
"json_query": False,
|
"json_query": False,
|
||||||
"option": False,
|
"option": False,
|
||||||
|
"quote": False,
|
||||||
}
|
}
|
||||||
_sql_names = ["JSON_EXTRACT"]
|
_sql_names = ["JSON_EXTRACT"]
|
||||||
is_var_len_args = True
|
is_var_len_args = True
|
||||||
|
@ -6164,6 +6170,14 @@ class JSONExtract(Binary, Func):
|
||||||
return self.expression.output_name if not self.expressions else ""
|
return self.expression.output_name if not self.expressions else ""
|
||||||
|
|
||||||
|
|
||||||
|
# https://trino.io/docs/current/functions/json.html#json-query
|
||||||
|
class JSONExtractQuote(Expression):
|
||||||
|
arg_types = {
|
||||||
|
"option": True,
|
||||||
|
"scalar": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class JSONExtractArray(Func):
|
class JSONExtractArray(Func):
|
||||||
arg_types = {"this": True, "expression": False}
|
arg_types = {"this": True, "expression": False}
|
||||||
_sql_names = ["JSON_EXTRACT_ARRAY"]
|
_sql_names = ["JSON_EXTRACT_ARRAY"]
|
||||||
|
@ -6516,7 +6530,7 @@ class StrPosition(Func):
|
||||||
"this": True,
|
"this": True,
|
||||||
"substr": True,
|
"substr": True,
|
||||||
"position": False,
|
"position": False,
|
||||||
"instance": False,
|
"occurrence": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -6653,7 +6667,7 @@ class TsOrDsToDatetime(Func):
|
||||||
|
|
||||||
|
|
||||||
class TsOrDsToTime(Func):
|
class TsOrDsToTime(Func):
|
||||||
pass
|
arg_types = {"this": True, "format": False, "safe": False}
|
||||||
|
|
||||||
|
|
||||||
class TsOrDsToTimestamp(Func):
|
class TsOrDsToTimestamp(Func):
|
||||||
|
@ -6665,7 +6679,7 @@ class TsOrDiToDi(Func):
|
||||||
|
|
||||||
|
|
||||||
class Unhex(Func):
|
class Unhex(Func):
|
||||||
pass
|
arg_types = {"this": True, "expression": False}
|
||||||
|
|
||||||
|
|
||||||
class Unicode(Func):
|
class Unicode(Func):
|
||||||
|
@ -6768,7 +6782,17 @@ class XMLElement(Func):
|
||||||
|
|
||||||
|
|
||||||
class XMLTable(Func):
|
class XMLTable(Func):
|
||||||
arg_types = {"this": True, "passing": False, "columns": False, "by_ref": False}
|
arg_types = {
|
||||||
|
"this": True,
|
||||||
|
"namespaces": False,
|
||||||
|
"passing": False,
|
||||||
|
"columns": False,
|
||||||
|
"by_ref": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class XMLNamespace(Expression):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Year(Func):
|
class Year(Func):
|
||||||
|
|
|
@ -2491,11 +2491,11 @@ class Generator(metaclass=_Generator):
|
||||||
return csv(
|
return csv(
|
||||||
*sqls,
|
*sqls,
|
||||||
*[self.sql(join) for join in expression.args.get("joins") or []],
|
*[self.sql(join) for join in expression.args.get("joins") or []],
|
||||||
self.sql(expression, "connect"),
|
|
||||||
self.sql(expression, "match"),
|
self.sql(expression, "match"),
|
||||||
*[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
|
*[self.sql(lateral) for lateral in expression.args.get("laterals") or []],
|
||||||
self.sql(expression, "prewhere"),
|
self.sql(expression, "prewhere"),
|
||||||
self.sql(expression, "where"),
|
self.sql(expression, "where"),
|
||||||
|
self.sql(expression, "connect"),
|
||||||
self.sql(expression, "group"),
|
self.sql(expression, "group"),
|
||||||
self.sql(expression, "having"),
|
self.sql(expression, "having"),
|
||||||
*[gen(self, expression) for gen in self.AFTER_HAVING_MODIFIER_TRANSFORMS.values()],
|
*[gen(self, expression) for gen in self.AFTER_HAVING_MODIFIER_TRANSFORMS.values()],
|
||||||
|
@ -3410,6 +3410,11 @@ class Generator(metaclass=_Generator):
|
||||||
|
|
||||||
return self.binary(expression, "/")
|
return self.binary(expression, "/")
|
||||||
|
|
||||||
|
def safedivide_sql(self, expression: exp.SafeDivide) -> str:
|
||||||
|
n = exp._wrap(expression.this, exp.Binary)
|
||||||
|
d = exp._wrap(expression.expression, exp.Binary)
|
||||||
|
return self.sql(exp.If(this=d.neq(0), true=n / d, false=exp.Null()))
|
||||||
|
|
||||||
def overlaps_sql(self, expression: exp.Overlaps) -> str:
|
def overlaps_sql(self, expression: exp.Overlaps) -> str:
|
||||||
return self.binary(expression, "OVERLAPS")
|
return self.binary(expression, "OVERLAPS")
|
||||||
|
|
||||||
|
@ -3934,6 +3939,16 @@ class Generator(metaclass=_Generator):
|
||||||
|
|
||||||
def tsordstotime_sql(self, expression: exp.TsOrDsToTime) -> str:
|
def tsordstotime_sql(self, expression: exp.TsOrDsToTime) -> str:
|
||||||
this = expression.this
|
this = expression.this
|
||||||
|
time_format = self.format_time(expression)
|
||||||
|
|
||||||
|
if time_format:
|
||||||
|
return self.sql(
|
||||||
|
exp.cast(
|
||||||
|
exp.StrToTime(this=this, format=expression.args["format"]),
|
||||||
|
exp.DataType.Type.TIME,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
if isinstance(this, exp.TsOrDsToTime) or this.is_type(exp.DataType.Type.TIME):
|
if isinstance(this, exp.TsOrDsToTime) or this.is_type(exp.DataType.Type.TIME):
|
||||||
return self.sql(this)
|
return self.sql(this)
|
||||||
|
|
||||||
|
@ -4421,6 +4436,10 @@ class Generator(metaclass=_Generator):
|
||||||
|
|
||||||
return f"{empty}{error}{null}"
|
return f"{empty}{error}{null}"
|
||||||
|
|
||||||
|
def jsonextractquote_sql(self, expression: exp.JSONExtractQuote) -> str:
|
||||||
|
scalar = " ON SCALAR STRING" if expression.args.get("scalar") else ""
|
||||||
|
return f"{self.sql(expression, 'option')} QUOTES{scalar}"
|
||||||
|
|
||||||
def jsonexists_sql(self, expression: exp.JSONExists) -> str:
|
def jsonexists_sql(self, expression: exp.JSONExists) -> str:
|
||||||
this = self.sql(expression, "this")
|
this = self.sql(expression, "this")
|
||||||
path = self.sql(expression, "path")
|
path = self.sql(expression, "path")
|
||||||
|
@ -4719,9 +4738,15 @@ class Generator(metaclass=_Generator):
|
||||||
|
|
||||||
def xmltable_sql(self, expression: exp.XMLTable) -> str:
|
def xmltable_sql(self, expression: exp.XMLTable) -> str:
|
||||||
this = self.sql(expression, "this")
|
this = self.sql(expression, "this")
|
||||||
|
namespaces = self.expressions(expression, key="namespaces")
|
||||||
|
namespaces = f"XMLNAMESPACES({namespaces}), " if namespaces else ""
|
||||||
passing = self.expressions(expression, key="passing")
|
passing = self.expressions(expression, key="passing")
|
||||||
passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
|
passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
|
||||||
columns = self.expressions(expression, key="columns")
|
columns = self.expressions(expression, key="columns")
|
||||||
columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
|
columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
|
||||||
by_ref = f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
|
by_ref = f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
|
||||||
return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
|
return f"XMLTABLE({self.sep('')}{self.indent(namespaces + this + passing + by_ref + columns)}{self.seg(')', sep='')}"
|
||||||
|
|
||||||
|
def xmlnamespace_sql(self, expression: exp.XMLNamespace) -> str:
|
||||||
|
this = self.sql(expression, "this")
|
||||||
|
return this if isinstance(expression.this, exp.Alias) else f"DEFAULT {this}"
|
||||||
|
|
|
@ -56,7 +56,21 @@ def qualify_columns(
|
||||||
dialect = Dialect.get_or_raise(schema.dialect)
|
dialect = Dialect.get_or_raise(schema.dialect)
|
||||||
pseudocolumns = dialect.PSEUDOCOLUMNS
|
pseudocolumns = dialect.PSEUDOCOLUMNS
|
||||||
|
|
||||||
|
snowflake_or_oracle = dialect in ("oracle", "snowflake")
|
||||||
|
|
||||||
for scope in traverse_scope(expression):
|
for scope in traverse_scope(expression):
|
||||||
|
scope_expression = scope.expression
|
||||||
|
is_select = isinstance(scope_expression, exp.Select)
|
||||||
|
|
||||||
|
if is_select and snowflake_or_oracle and scope_expression.args.get("connect"):
|
||||||
|
# In Snowflake / Oracle queries that have a CONNECT BY clause, one can use the LEVEL
|
||||||
|
# pseudocolumn, which doesn't belong to a table, so we change it into an identifier
|
||||||
|
scope_expression.transform(
|
||||||
|
lambda n: n.this if isinstance(n, exp.Column) and n.name == "LEVEL" else n,
|
||||||
|
copy=False,
|
||||||
|
)
|
||||||
|
scope.clear_cache()
|
||||||
|
|
||||||
resolver = Resolver(scope, schema, infer_schema=infer_schema)
|
resolver = Resolver(scope, schema, infer_schema=infer_schema)
|
||||||
_pop_table_column_aliases(scope.ctes)
|
_pop_table_column_aliases(scope.ctes)
|
||||||
_pop_table_column_aliases(scope.derived_tables)
|
_pop_table_column_aliases(scope.derived_tables)
|
||||||
|
@ -76,7 +90,7 @@ def qualify_columns(
|
||||||
if not schema.empty and expand_alias_refs:
|
if not schema.empty and expand_alias_refs:
|
||||||
_expand_alias_refs(scope, resolver, dialect)
|
_expand_alias_refs(scope, resolver, dialect)
|
||||||
|
|
||||||
if isinstance(scope.expression, exp.Select):
|
if is_select:
|
||||||
if expand_stars:
|
if expand_stars:
|
||||||
_expand_stars(
|
_expand_stars(
|
||||||
scope,
|
scope,
|
||||||
|
@ -159,6 +173,9 @@ def _expand_using(scope: Scope, resolver: Resolver) -> t.Dict[str, t.Any]:
|
||||||
names = {join.alias_or_name for join in joins}
|
names = {join.alias_or_name for join in joins}
|
||||||
ordered = [key for key in scope.selected_sources if key not in names]
|
ordered = [key for key in scope.selected_sources if key not in names]
|
||||||
|
|
||||||
|
if names and not ordered:
|
||||||
|
raise OptimizeError(f"Joins {names} missing source table {scope.expression}")
|
||||||
|
|
||||||
# Mapping of automatically joined column names to an ordered set of source names (dict).
|
# Mapping of automatically joined column names to an ordered set of source names (dict).
|
||||||
column_tables: t.Dict[str, t.Dict[str, t.Any]] = {}
|
column_tables: t.Dict[str, t.Dict[str, t.Any]] = {}
|
||||||
|
|
||||||
|
@ -180,6 +197,7 @@ def _expand_using(scope: Scope, resolver: Resolver) -> t.Dict[str, t.Any]:
|
||||||
join_columns = resolver.get_source_columns(join_table)
|
join_columns = resolver.get_source_columns(join_table)
|
||||||
conditions = []
|
conditions = []
|
||||||
using_identifier_count = len(using)
|
using_identifier_count = len(using)
|
||||||
|
is_semi_or_anti_join = join.is_semi_or_anti_join
|
||||||
|
|
||||||
for identifier in using:
|
for identifier in using:
|
||||||
identifier = identifier.name
|
identifier = identifier.name
|
||||||
|
@ -208,6 +226,10 @@ def _expand_using(scope: Scope, resolver: Resolver) -> t.Dict[str, t.Any]:
|
||||||
|
|
||||||
# Set all values in the dict to None, because we only care about the key ordering
|
# Set all values in the dict to None, because we only care about the key ordering
|
||||||
tables = column_tables.setdefault(identifier, {})
|
tables = column_tables.setdefault(identifier, {})
|
||||||
|
|
||||||
|
# Do not update the dict if this was a SEMI/ANTI join in
|
||||||
|
# order to avoid generating COALESCE columns for this join pair
|
||||||
|
if not is_semi_or_anti_join:
|
||||||
if table not in tables:
|
if table not in tables:
|
||||||
tables[table] = None
|
tables[table] = None
|
||||||
if join_table not in tables:
|
if join_table not in tables:
|
||||||
|
@ -898,22 +920,10 @@ class Resolver:
|
||||||
for (name, alias) in itertools.zip_longest(columns, column_aliases)
|
for (name, alias) in itertools.zip_longest(columns, column_aliases)
|
||||||
]
|
]
|
||||||
|
|
||||||
pseudocolumns = self._get_source_pseudocolumns(name)
|
|
||||||
if pseudocolumns:
|
|
||||||
columns = list(columns)
|
|
||||||
columns.extend(c for c in pseudocolumns if c not in columns)
|
|
||||||
|
|
||||||
self._get_source_columns_cache[cache_key] = columns
|
self._get_source_columns_cache[cache_key] = columns
|
||||||
|
|
||||||
return self._get_source_columns_cache[cache_key]
|
return self._get_source_columns_cache[cache_key]
|
||||||
|
|
||||||
def _get_source_pseudocolumns(self, name: str) -> t.Sequence[str]:
|
|
||||||
if self.schema.dialect == "snowflake" and self.scope.expression.args.get("connect"):
|
|
||||||
# When there is a CONNECT BY clause, there is only one table being scanned
|
|
||||||
# See: https://docs.snowflake.com/en/sql-reference/constructs/connect-by
|
|
||||||
return ["LEVEL"]
|
|
||||||
return []
|
|
||||||
|
|
||||||
def _get_all_source_columns(self) -> t.Dict[str, t.Sequence[str]]:
|
def _get_all_source_columns(self) -> t.Dict[str, t.Sequence[str]]:
|
||||||
if self._source_columns is None:
|
if self._source_columns is None:
|
||||||
self._source_columns = {
|
self._source_columns = {
|
||||||
|
|
|
@ -100,6 +100,7 @@ class Scope:
|
||||||
self._join_hints = None
|
self._join_hints = None
|
||||||
self._pivots = None
|
self._pivots = None
|
||||||
self._references = None
|
self._references = None
|
||||||
|
self._semi_anti_join_tables = None
|
||||||
|
|
||||||
def branch(
|
def branch(
|
||||||
self, expression, scope_type, sources=None, cte_sources=None, lateral_sources=None, **kwargs
|
self, expression, scope_type, sources=None, cte_sources=None, lateral_sources=None, **kwargs
|
||||||
|
@ -126,6 +127,7 @@ class Scope:
|
||||||
self._raw_columns = []
|
self._raw_columns = []
|
||||||
self._stars = []
|
self._stars = []
|
||||||
self._join_hints = []
|
self._join_hints = []
|
||||||
|
self._semi_anti_join_tables = set()
|
||||||
|
|
||||||
for node in self.walk(bfs=False):
|
for node in self.walk(bfs=False):
|
||||||
if node is self.expression:
|
if node is self.expression:
|
||||||
|
@ -139,6 +141,10 @@ class Scope:
|
||||||
else:
|
else:
|
||||||
self._raw_columns.append(node)
|
self._raw_columns.append(node)
|
||||||
elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
|
elif isinstance(node, exp.Table) and not isinstance(node.parent, exp.JoinHint):
|
||||||
|
parent = node.parent
|
||||||
|
if isinstance(parent, exp.Join) and parent.is_semi_or_anti_join:
|
||||||
|
self._semi_anti_join_tables.add(node.alias_or_name)
|
||||||
|
|
||||||
self._tables.append(node)
|
self._tables.append(node)
|
||||||
elif isinstance(node, exp.JoinHint):
|
elif isinstance(node, exp.JoinHint):
|
||||||
self._join_hints.append(node)
|
self._join_hints.append(node)
|
||||||
|
@ -311,6 +317,11 @@ class Scope:
|
||||||
result = {}
|
result = {}
|
||||||
|
|
||||||
for name, node in self.references:
|
for name, node in self.references:
|
||||||
|
if name in self._semi_anti_join_tables:
|
||||||
|
# The RHS table of SEMI/ANTI joins shouldn't be collected as a
|
||||||
|
# selected source
|
||||||
|
continue
|
||||||
|
|
||||||
if name in result:
|
if name in result:
|
||||||
raise OptimizeError(f"Alias already used: {name}")
|
raise OptimizeError(f"Alias already used: {name}")
|
||||||
if name in self.sources:
|
if name in self.sources:
|
||||||
|
@ -351,7 +362,10 @@ class Scope:
|
||||||
self._external_columns = left.external_columns + right.external_columns
|
self._external_columns = left.external_columns + right.external_columns
|
||||||
else:
|
else:
|
||||||
self._external_columns = [
|
self._external_columns = [
|
||||||
c for c in self.columns if c.table not in self.selected_sources
|
c
|
||||||
|
for c in self.columns
|
||||||
|
if c.table not in self.selected_sources
|
||||||
|
and c.table not in self.semi_or_anti_join_tables
|
||||||
]
|
]
|
||||||
|
|
||||||
return self._external_columns
|
return self._external_columns
|
||||||
|
@ -387,6 +401,10 @@ class Scope:
|
||||||
|
|
||||||
return self._pivots
|
return self._pivots
|
||||||
|
|
||||||
|
@property
|
||||||
|
def semi_or_anti_join_tables(self):
|
||||||
|
return self._semi_anti_join_tables or set()
|
||||||
|
|
||||||
def source_columns(self, source_name):
|
def source_columns(self, source_name):
|
||||||
"""
|
"""
|
||||||
Get all columns in the current scope for a particular source.
|
Get all columns in the current scope for a particular source.
|
||||||
|
|
|
@ -749,7 +749,7 @@ def simplify_parens(expression):
|
||||||
|
|
||||||
if (
|
if (
|
||||||
not isinstance(this, exp.Select)
|
not isinstance(this, exp.Select)
|
||||||
and not isinstance(parent, exp.SubqueryPredicate)
|
and not isinstance(parent, (exp.SubqueryPredicate, exp.Bracket))
|
||||||
and (
|
and (
|
||||||
not isinstance(parent, (exp.Condition, exp.Binary))
|
not isinstance(parent, (exp.Condition, exp.Binary))
|
||||||
or isinstance(parent, exp.Paren)
|
or isinstance(parent, exp.Paren)
|
||||||
|
|
|
@ -153,6 +153,14 @@ def build_coalesce(args: t.List, is_nvl: t.Optional[bool] = None) -> exp.Coalesc
|
||||||
return exp.Coalesce(this=seq_get(args, 0), expressions=args[1:], is_nvl=is_nvl)
|
return exp.Coalesce(this=seq_get(args, 0), expressions=args[1:], is_nvl=is_nvl)
|
||||||
|
|
||||||
|
|
||||||
|
def build_locate_strposition(args: t.List):
|
||||||
|
return exp.StrPosition(
|
||||||
|
this=seq_get(args, 1),
|
||||||
|
substr=seq_get(args, 0),
|
||||||
|
position=seq_get(args, 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class _Parser(type):
|
class _Parser(type):
|
||||||
def __new__(cls, clsname, bases, attrs):
|
def __new__(cls, clsname, bases, attrs):
|
||||||
klass = super().__new__(cls, clsname, bases, attrs)
|
klass = super().__new__(cls, clsname, bases, attrs)
|
||||||
|
@ -213,7 +221,6 @@ class Parser(metaclass=_Parser):
|
||||||
),
|
),
|
||||||
"GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
|
"GLOB": lambda args: exp.Glob(this=seq_get(args, 1), expression=seq_get(args, 0)),
|
||||||
"HEX": build_hex,
|
"HEX": build_hex,
|
||||||
"INSTR": lambda args: exp.StrPosition(this=seq_get(args, 0), substr=seq_get(args, 1)),
|
|
||||||
"JSON_EXTRACT": build_extract_json_with_path(exp.JSONExtract),
|
"JSON_EXTRACT": build_extract_json_with_path(exp.JSONExtract),
|
||||||
"JSON_EXTRACT_SCALAR": build_extract_json_with_path(exp.JSONExtractScalar),
|
"JSON_EXTRACT_SCALAR": build_extract_json_with_path(exp.JSONExtractScalar),
|
||||||
"JSON_EXTRACT_PATH_TEXT": build_extract_json_with_path(exp.JSONExtractScalar),
|
"JSON_EXTRACT_PATH_TEXT": build_extract_json_with_path(exp.JSONExtractScalar),
|
||||||
|
@ -232,6 +239,10 @@ class Parser(metaclass=_Parser):
|
||||||
"SCOPE_RESOLUTION": lambda args: exp.ScopeResolution(expression=seq_get(args, 0))
|
"SCOPE_RESOLUTION": lambda args: exp.ScopeResolution(expression=seq_get(args, 0))
|
||||||
if len(args) != 2
|
if len(args) != 2
|
||||||
else exp.ScopeResolution(this=seq_get(args, 0), expression=seq_get(args, 1)),
|
else exp.ScopeResolution(this=seq_get(args, 0), expression=seq_get(args, 1)),
|
||||||
|
"STRPOS": exp.StrPosition.from_arg_list,
|
||||||
|
"CHARINDEX": lambda args: build_locate_strposition(args),
|
||||||
|
"INSTR": exp.StrPosition.from_arg_list,
|
||||||
|
"LOCATE": lambda args: build_locate_strposition(args),
|
||||||
"TIME_TO_TIME_STR": lambda args: exp.Cast(
|
"TIME_TO_TIME_STR": lambda args: exp.Cast(
|
||||||
this=seq_get(args, 0),
|
this=seq_get(args, 0),
|
||||||
to=exp.DataType(this=exp.DataType.Type.TEXT),
|
to=exp.DataType(this=exp.DataType.Type.TEXT),
|
||||||
|
@ -276,6 +287,7 @@ class Parser(metaclass=_Parser):
|
||||||
}
|
}
|
||||||
|
|
||||||
ENUM_TYPE_TOKENS = {
|
ENUM_TYPE_TOKENS = {
|
||||||
|
TokenType.DYNAMIC,
|
||||||
TokenType.ENUM,
|
TokenType.ENUM,
|
||||||
TokenType.ENUM8,
|
TokenType.ENUM8,
|
||||||
TokenType.ENUM16,
|
TokenType.ENUM16,
|
||||||
|
@ -394,6 +406,7 @@ class Parser(metaclass=_Parser):
|
||||||
TokenType.NULL,
|
TokenType.NULL,
|
||||||
TokenType.NAME,
|
TokenType.NAME,
|
||||||
TokenType.TDIGEST,
|
TokenType.TDIGEST,
|
||||||
|
TokenType.DYNAMIC,
|
||||||
*ENUM_TYPE_TOKENS,
|
*ENUM_TYPE_TOKENS,
|
||||||
*NESTED_TYPE_TOKENS,
|
*NESTED_TYPE_TOKENS,
|
||||||
*AGGREGATE_TYPE_TOKENS,
|
*AGGREGATE_TYPE_TOKENS,
|
||||||
|
@ -3197,7 +3210,7 @@ class Parser(metaclass=_Parser):
|
||||||
else:
|
else:
|
||||||
materialized = None
|
materialized = None
|
||||||
|
|
||||||
return self.expression(
|
cte = self.expression(
|
||||||
exp.CTE,
|
exp.CTE,
|
||||||
this=self._parse_wrapped(self._parse_statement),
|
this=self._parse_wrapped(self._parse_statement),
|
||||||
alias=alias,
|
alias=alias,
|
||||||
|
@ -3205,6 +3218,11 @@ class Parser(metaclass=_Parser):
|
||||||
comments=comments,
|
comments=comments,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if isinstance(cte.this, exp.Values):
|
||||||
|
cte.set("this", exp.select("*").from_(exp.alias_(cte.this, "_values", table=True)))
|
||||||
|
|
||||||
|
return cte
|
||||||
|
|
||||||
def _parse_table_alias(
|
def _parse_table_alias(
|
||||||
self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
|
self, alias_tokens: t.Optional[t.Collection[TokenType]] = None
|
||||||
) -> t.Optional[exp.TableAlias]:
|
) -> t.Optional[exp.TableAlias]:
|
||||||
|
@ -5902,7 +5920,7 @@ class Parser(metaclass=_Parser):
|
||||||
)
|
)
|
||||||
|
|
||||||
def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:
|
def _parse_primary_key_part(self) -> t.Optional[exp.Expression]:
|
||||||
return self._parse_field()
|
return self._parse_ordered() or self._parse_field()
|
||||||
|
|
||||||
def _parse_period_for_system_time(self) -> t.Optional[exp.PeriodForSystemTimeConstraint]:
|
def _parse_period_for_system_time(self) -> t.Optional[exp.PeriodForSystemTimeConstraint]:
|
||||||
if not self._match(TokenType.TIMESTAMP_SNAPSHOT):
|
if not self._match(TokenType.TIMESTAMP_SNAPSHOT):
|
||||||
|
@ -6205,11 +6223,16 @@ class Parser(metaclass=_Parser):
|
||||||
return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to, safe=safe)
|
return self.expression(exp.Cast if strict else exp.TryCast, this=this, to=to, safe=safe)
|
||||||
|
|
||||||
def _parse_xml_table(self) -> exp.XMLTable:
|
def _parse_xml_table(self) -> exp.XMLTable:
|
||||||
this = self._parse_string()
|
namespaces = None
|
||||||
|
|
||||||
passing = None
|
passing = None
|
||||||
columns = None
|
columns = None
|
||||||
|
|
||||||
|
if self._match_text_seq("XMLNAMESPACES", "("):
|
||||||
|
namespaces = self._parse_xml_namespace()
|
||||||
|
self._match_text_seq(")", ",")
|
||||||
|
|
||||||
|
this = self._parse_string()
|
||||||
|
|
||||||
if self._match_text_seq("PASSING"):
|
if self._match_text_seq("PASSING"):
|
||||||
# The BY VALUE keywords are optional and are provided for semantic clarity
|
# The BY VALUE keywords are optional and are provided for semantic clarity
|
||||||
self._match_text_seq("BY", "VALUE")
|
self._match_text_seq("BY", "VALUE")
|
||||||
|
@ -6221,9 +6244,28 @@ class Parser(metaclass=_Parser):
|
||||||
columns = self._parse_csv(self._parse_field_def)
|
columns = self._parse_csv(self._parse_field_def)
|
||||||
|
|
||||||
return self.expression(
|
return self.expression(
|
||||||
exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref
|
exp.XMLTable,
|
||||||
|
this=this,
|
||||||
|
namespaces=namespaces,
|
||||||
|
passing=passing,
|
||||||
|
columns=columns,
|
||||||
|
by_ref=by_ref,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _parse_xml_namespace(self) -> t.List[exp.XMLNamespace]:
|
||||||
|
namespaces = []
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if self._match_text_seq("DEFAULT"):
|
||||||
|
uri = self._parse_string()
|
||||||
|
else:
|
||||||
|
uri = self._parse_alias(self._parse_string())
|
||||||
|
namespaces.append(self.expression(exp.XMLNamespace, this=uri))
|
||||||
|
if not self._match(TokenType.COMMA):
|
||||||
|
break
|
||||||
|
|
||||||
|
return namespaces
|
||||||
|
|
||||||
def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]:
|
def _parse_decode(self) -> t.Optional[exp.Decode | exp.Case]:
|
||||||
"""
|
"""
|
||||||
There are generally two variants of the DECODE function:
|
There are generally two variants of the DECODE function:
|
||||||
|
@ -6464,8 +6506,8 @@ class Parser(metaclass=_Parser):
|
||||||
haystack = seq_get(args, 0)
|
haystack = seq_get(args, 0)
|
||||||
needle = seq_get(args, 1)
|
needle = seq_get(args, 1)
|
||||||
else:
|
else:
|
||||||
needle = seq_get(args, 0)
|
|
||||||
haystack = seq_get(args, 1)
|
haystack = seq_get(args, 1)
|
||||||
|
needle = seq_get(args, 0)
|
||||||
|
|
||||||
return self.expression(
|
return self.expression(
|
||||||
exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2)
|
exp.StrPosition, this=haystack, substr=needle, position=seq_get(args, 2)
|
||||||
|
@ -6561,6 +6603,12 @@ class Parser(metaclass=_Parser):
|
||||||
func = this
|
func = this
|
||||||
comments = func.comments if isinstance(func, exp.Expression) else None
|
comments = func.comments if isinstance(func, exp.Expression) else None
|
||||||
|
|
||||||
|
# T-SQL allows the OVER (...) syntax after WITHIN GROUP.
|
||||||
|
# https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
|
||||||
|
if self._match_text_seq("WITHIN", "GROUP"):
|
||||||
|
order = self._parse_wrapped(self._parse_order)
|
||||||
|
this = self.expression(exp.WithinGroup, this=this, expression=order)
|
||||||
|
|
||||||
if self._match_pair(TokenType.FILTER, TokenType.L_PAREN):
|
if self._match_pair(TokenType.FILTER, TokenType.L_PAREN):
|
||||||
self._match(TokenType.WHERE)
|
self._match(TokenType.WHERE)
|
||||||
this = self.expression(
|
this = self.expression(
|
||||||
|
@ -6568,12 +6616,6 @@ class Parser(metaclass=_Parser):
|
||||||
)
|
)
|
||||||
self._match_r_paren()
|
self._match_r_paren()
|
||||||
|
|
||||||
# T-SQL allows the OVER (...) syntax after WITHIN GROUP.
|
|
||||||
# https://learn.microsoft.com/en-us/sql/t-sql/functions/percentile-disc-transact-sql?view=sql-server-ver16
|
|
||||||
if self._match_text_seq("WITHIN", "GROUP"):
|
|
||||||
order = self._parse_wrapped(self._parse_order)
|
|
||||||
this = self.expression(exp.WithinGroup, this=this, expression=order)
|
|
||||||
|
|
||||||
# SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
|
# SQL spec defines an optional [ { IGNORE | RESPECT } NULLS ] OVER
|
||||||
# Some dialects choose to implement and some do not.
|
# Some dialects choose to implement and some do not.
|
||||||
# https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
|
# https://dev.mysql.com/doc/refman/8.0/en/window-function-descriptions.html
|
||||||
|
|
|
@ -216,6 +216,7 @@ class TokenType(AutoName):
|
||||||
TDIGEST = auto()
|
TDIGEST = auto()
|
||||||
UNKNOWN = auto()
|
UNKNOWN = auto()
|
||||||
VECTOR = auto()
|
VECTOR = auto()
|
||||||
|
DYNAMIC = auto()
|
||||||
|
|
||||||
# keywords
|
# keywords
|
||||||
ALIAS = auto()
|
ALIAS = auto()
|
||||||
|
@ -993,16 +994,18 @@ class Tokenizer(metaclass=_Tokenizer):
|
||||||
)
|
)
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self, dialect: DialectType = None, use_rs_tokenizer: bool = USE_RS_TOKENIZER
|
self, dialect: DialectType = None, use_rs_tokenizer: t.Optional[bool] = None
|
||||||
) -> None:
|
) -> None:
|
||||||
from sqlglot.dialects import Dialect
|
from sqlglot.dialects import Dialect
|
||||||
|
|
||||||
self.dialect = Dialect.get_or_raise(dialect)
|
self.dialect = Dialect.get_or_raise(dialect)
|
||||||
|
|
||||||
# initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance
|
# initialize `use_rs_tokenizer`, and allow it to be overwritten per Tokenizer instance
|
||||||
self.use_rs_tokenizer = use_rs_tokenizer
|
self.use_rs_tokenizer = (
|
||||||
|
use_rs_tokenizer if use_rs_tokenizer is not None else USE_RS_TOKENIZER
|
||||||
|
)
|
||||||
|
|
||||||
if USE_RS_TOKENIZER:
|
if self.use_rs_tokenizer:
|
||||||
self._rs_dialect_settings = RsTokenizerDialectSettings(
|
self._rs_dialect_settings = RsTokenizerDialectSettings(
|
||||||
unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES,
|
unescaped_sequences=self.dialect.UNESCAPED_SEQUENCES,
|
||||||
identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
|
identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
|
||||||
|
|
2
sqlglotrs/Cargo.lock
generated
2
sqlglotrs/Cargo.lock
generated
|
@ -503,7 +503,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlglotrs"
|
name = "sqlglotrs"
|
||||||
version = "0.3.5"
|
version = "0.3.14"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"criterion",
|
"criterion",
|
||||||
"pyo3",
|
"pyo3",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "sqlglotrs"
|
name = "sqlglotrs"
|
||||||
version = "0.3.5"
|
version = "0.3.14"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
||||||
|
|
|
@ -197,7 +197,7 @@ impl<'a> TokenizerState<'a> {
|
||||||
if end <= self.size {
|
if end <= self.size {
|
||||||
self.sql[start..end].iter().collect()
|
self.sql[start..end].iter().collect()
|
||||||
} else {
|
} else {
|
||||||
String::from("")
|
String::new()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -433,7 +433,7 @@ impl<'a> TokenizerState<'a> {
|
||||||
self.advance(1)?;
|
self.advance(1)?;
|
||||||
|
|
||||||
let tag = if self.current_char.to_string() == *end {
|
let tag = if self.current_char.to_string() == *end {
|
||||||
String::from("")
|
String::new()
|
||||||
} else {
|
} else {
|
||||||
self.extract_string(end, false, true, !self.settings.heredoc_tag_is_identifier)?
|
self.extract_string(end, false, true, !self.settings.heredoc_tag_is_identifier)?
|
||||||
};
|
};
|
||||||
|
@ -516,7 +516,7 @@ impl<'a> TokenizerState<'a> {
|
||||||
self.advance(1)?;
|
self.advance(1)?;
|
||||||
} else if self.is_alphabetic_or_underscore(self.peek_char) {
|
} else if self.is_alphabetic_or_underscore(self.peek_char) {
|
||||||
let number_text = self.text();
|
let number_text = self.text();
|
||||||
let mut literal = String::from("");
|
let mut literal = String::new();
|
||||||
|
|
||||||
while !self.peek_char.is_whitespace()
|
while !self.peek_char.is_whitespace()
|
||||||
&& !self.is_end
|
&& !self.is_end
|
||||||
|
@ -533,8 +533,9 @@ impl<'a> TokenizerState<'a> {
|
||||||
self.settings
|
self.settings
|
||||||
.numeric_literals
|
.numeric_literals
|
||||||
.get(&literal.to_uppercase())
|
.get(&literal.to_uppercase())
|
||||||
.unwrap_or(&String::from("")),
|
.unwrap_or(&String::new()),
|
||||||
).copied();
|
)
|
||||||
|
.copied();
|
||||||
|
|
||||||
let replaced = literal.replace("_", "");
|
let replaced = literal.replace("_", "");
|
||||||
|
|
||||||
|
@ -603,7 +604,8 @@ impl<'a> TokenizerState<'a> {
|
||||||
} else {
|
} else {
|
||||||
self.settings
|
self.settings
|
||||||
.keywords
|
.keywords
|
||||||
.get(&self.text().to_uppercase()).copied()
|
.get(&self.text().to_uppercase())
|
||||||
|
.copied()
|
||||||
.unwrap_or(self.token_types.var)
|
.unwrap_or(self.token_types.var)
|
||||||
};
|
};
|
||||||
self.add(token_type, None)
|
self.add(token_type, None)
|
||||||
|
@ -622,19 +624,19 @@ impl<'a> TokenizerState<'a> {
|
||||||
raw_string: bool,
|
raw_string: bool,
|
||||||
raise_unmatched: bool,
|
raise_unmatched: bool,
|
||||||
) -> Result<String, TokenizerError> {
|
) -> Result<String, TokenizerError> {
|
||||||
let mut text = String::from("");
|
let mut text = String::new();
|
||||||
|
let mut combined_identifier_escapes = None;
|
||||||
|
if use_identifier_escapes {
|
||||||
|
let mut tmp = self.settings.identifier_escapes.clone();
|
||||||
|
tmp.extend(delimiter.chars());
|
||||||
|
combined_identifier_escapes = Some(tmp);
|
||||||
|
}
|
||||||
|
let escapes = match combined_identifier_escapes {
|
||||||
|
Some(ref v) => v,
|
||||||
|
None => &self.settings.string_escapes,
|
||||||
|
};
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut new_identifier_escapes;
|
|
||||||
let escapes = if use_identifier_escapes {
|
|
||||||
new_identifier_escapes = self.settings.identifier_escapes.clone();
|
|
||||||
new_identifier_escapes.extend(delimiter.chars());
|
|
||||||
&new_identifier_escapes
|
|
||||||
} else {
|
|
||||||
&self.settings.string_escapes
|
|
||||||
};
|
|
||||||
let peek_char_str = self.peek_char.to_string();
|
|
||||||
|
|
||||||
if !raw_string
|
if !raw_string
|
||||||
&& !self.dialect_settings.unescaped_sequences.is_empty()
|
&& !self.dialect_settings.unescaped_sequences.is_empty()
|
||||||
&& !self.peek_char.is_whitespace()
|
&& !self.peek_char.is_whitespace()
|
||||||
|
@ -652,14 +654,16 @@ impl<'a> TokenizerState<'a> {
|
||||||
|
|
||||||
if (self.settings.string_escapes_allowed_in_raw_strings || !raw_string)
|
if (self.settings.string_escapes_allowed_in_raw_strings || !raw_string)
|
||||||
&& escapes.contains(&self.current_char)
|
&& escapes.contains(&self.current_char)
|
||||||
&& (peek_char_str == delimiter || escapes.contains(&self.peek_char))
|
|
||||||
&& (self.current_char == self.peek_char
|
&& (self.current_char == self.peek_char
|
||||||
|| !self
|
|| !self
|
||||||
.settings
|
.settings
|
||||||
.quotes
|
.quotes
|
||||||
.contains_key(&self.current_char.to_string()))
|
.contains_key(&self.current_char.to_string()))
|
||||||
{
|
{
|
||||||
if peek_char_str == delimiter {
|
let peek_char_str = self.peek_char.to_string();
|
||||||
|
let equal_delimiter = delimiter == peek_char_str;
|
||||||
|
if equal_delimiter || escapes.contains(&self.peek_char) {
|
||||||
|
if equal_delimiter {
|
||||||
text.push(self.peek_char);
|
text.push(self.peek_char);
|
||||||
} else {
|
} else {
|
||||||
text.push(self.current_char);
|
text.push(self.current_char);
|
||||||
|
@ -673,7 +677,9 @@ impl<'a> TokenizerState<'a> {
|
||||||
delimiter, self.line, self.current
|
delimiter, self.line, self.current
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
} else {
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
if self.chars(delimiter.len()) == delimiter {
|
if self.chars(delimiter.len()) == delimiter {
|
||||||
if delimiter.len() > 1 {
|
if delimiter.len() > 1 {
|
||||||
self.advance((delimiter.len() - 1) as isize)?;
|
self.advance((delimiter.len() - 1) as isize)?;
|
||||||
|
@ -700,15 +706,14 @@ impl<'a> TokenizerState<'a> {
|
||||||
.collect::<String>(),
|
.collect::<String>(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
Ok(text)
|
Ok(text)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_alphabetic_or_underscore(&mut self, name: char) -> bool {
|
fn is_alphabetic_or_underscore(&self, name: char) -> bool {
|
||||||
name.is_alphabetic() || name == '_'
|
name.is_alphabetic() || name == '_'
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_identifier(&mut self, s: &str) -> bool {
|
fn is_identifier(&self, s: &str) -> bool {
|
||||||
s.chars().enumerate().all(|(i, c)| {
|
s.chars().enumerate().all(|(i, c)| {
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
self.is_alphabetic_or_underscore(c)
|
self.is_alphabetic_or_underscore(c)
|
||||||
|
@ -718,7 +723,7 @@ impl<'a> TokenizerState<'a> {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_numeric(&mut self, s: &str) -> bool {
|
fn is_numeric(&self, s: &str) -> bool {
|
||||||
s.chars().all(|c| c.is_ascii_digit())
|
s.chars().all(|c| c.is_ascii_digit())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -198,7 +198,8 @@ class TestAthena(Validator):
|
||||||
|
|
||||||
def test_ctas(self):
|
def test_ctas(self):
|
||||||
# Hive tables use 'external_location' to specify the table location, Iceberg tables use 'location' to specify the table location
|
# Hive tables use 'external_location' to specify the table location, Iceberg tables use 'location' to specify the table location
|
||||||
# The 'table_type' property is used to determine if it's a Hive or an Iceberg table
|
# In addition, Hive tables used 'partitioned_by' to specify the partition fields and Iceberg tables use 'partitioning' to specify the partition fields
|
||||||
|
# The 'table_type' property is used to determine if it's a Hive or an Iceberg table. If it's omitted, it defaults to Hive
|
||||||
# ref: https://docs.aws.amazon.com/athena/latest/ug/create-table-as.html#ctas-table-properties
|
# ref: https://docs.aws.amazon.com/athena/latest/ug/create-table-as.html#ctas-table-properties
|
||||||
ctas_hive = exp.Create(
|
ctas_hive = exp.Create(
|
||||||
this=exp.to_table("foo.bar"),
|
this=exp.to_table("foo.bar"),
|
||||||
|
@ -207,13 +208,16 @@ class TestAthena(Validator):
|
||||||
expressions=[
|
expressions=[
|
||||||
exp.FileFormatProperty(this=exp.Literal.string("parquet")),
|
exp.FileFormatProperty(this=exp.Literal.string("parquet")),
|
||||||
exp.LocationProperty(this=exp.Literal.string("s3://foo")),
|
exp.LocationProperty(this=exp.Literal.string("s3://foo")),
|
||||||
|
exp.PartitionedByProperty(
|
||||||
|
this=exp.Schema(expressions=[exp.to_column("partition_col")])
|
||||||
|
),
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
expression=exp.select("1"),
|
expression=exp.select("1"),
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
ctas_hive.sql(dialect=self.dialect, identify=True),
|
ctas_hive.sql(dialect=self.dialect, identify=True),
|
||||||
"CREATE TABLE \"foo\".\"bar\" WITH (format='parquet', external_location='s3://foo') AS SELECT 1",
|
"CREATE TABLE \"foo\".\"bar\" WITH (format='parquet', external_location='s3://foo', partitioned_by=ARRAY['partition_col']) AS SELECT 1",
|
||||||
)
|
)
|
||||||
|
|
||||||
ctas_iceberg = exp.Create(
|
ctas_iceberg = exp.Create(
|
||||||
|
@ -223,11 +227,14 @@ class TestAthena(Validator):
|
||||||
expressions=[
|
expressions=[
|
||||||
exp.Property(this=exp.var("table_type"), value=exp.Literal.string("iceberg")),
|
exp.Property(this=exp.var("table_type"), value=exp.Literal.string("iceberg")),
|
||||||
exp.LocationProperty(this=exp.Literal.string("s3://foo")),
|
exp.LocationProperty(this=exp.Literal.string("s3://foo")),
|
||||||
|
exp.PartitionedByProperty(
|
||||||
|
this=exp.Schema(expressions=[exp.to_column("partition_col")])
|
||||||
|
),
|
||||||
]
|
]
|
||||||
),
|
),
|
||||||
expression=exp.select("1"),
|
expression=exp.select("1"),
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
ctas_iceberg.sql(dialect=self.dialect, identify=True),
|
ctas_iceberg.sql(dialect=self.dialect, identify=True),
|
||||||
"CREATE TABLE \"foo\".\"bar\" WITH (table_type='iceberg', location='s3://foo') AS SELECT 1",
|
"CREATE TABLE \"foo\".\"bar\" WITH (table_type='iceberg', location='s3://foo', partitioning=ARRAY['partition_col']) AS SELECT 1",
|
||||||
)
|
)
|
||||||
|
|
|
@ -1572,14 +1572,30 @@ WHERE
|
||||||
"SAFE_DIVIDE(x, y)",
|
"SAFE_DIVIDE(x, y)",
|
||||||
write={
|
write={
|
||||||
"bigquery": "SAFE_DIVIDE(x, y)",
|
"bigquery": "SAFE_DIVIDE(x, y)",
|
||||||
"duckdb": "IF((y) <> 0, (x) / (y), NULL)",
|
"duckdb": "CASE WHEN y <> 0 THEN x / y ELSE NULL END",
|
||||||
"presto": "IF((y) <> 0, (x) / (y), NULL)",
|
"presto": "IF(y <> 0, CAST(x AS DOUBLE) / y, NULL)",
|
||||||
"trino": "IF((y) <> 0, (x) / (y), NULL)",
|
"trino": "IF(y <> 0, CAST(x AS DOUBLE) / y, NULL)",
|
||||||
"hive": "IF((y) <> 0, (x) / (y), NULL)",
|
"hive": "IF(y <> 0, x / y, NULL)",
|
||||||
"spark2": "IF((y) <> 0, (x) / (y), NULL)",
|
"spark2": "IF(y <> 0, x / y, NULL)",
|
||||||
"spark": "IF((y) <> 0, (x) / (y), NULL)",
|
"spark": "IF(y <> 0, x / y, NULL)",
|
||||||
"databricks": "IF((y) <> 0, (x) / (y), NULL)",
|
"databricks": "IF(y <> 0, x / y, NULL)",
|
||||||
"snowflake": "IFF((y) <> 0, (x) / (y), NULL)",
|
"snowflake": "IFF(y <> 0, x / y, NULL)",
|
||||||
|
"postgres": "CASE WHEN y <> 0 THEN CAST(x AS DOUBLE PRECISION) / y ELSE NULL END",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"SAFE_DIVIDE(x + 1, 2 * y)",
|
||||||
|
write={
|
||||||
|
"bigquery": "SAFE_DIVIDE(x + 1, 2 * y)",
|
||||||
|
"duckdb": "CASE WHEN (2 * y) <> 0 THEN (x + 1) / (2 * y) ELSE NULL END",
|
||||||
|
"presto": "IF((2 * y) <> 0, CAST((x + 1) AS DOUBLE) / (2 * y), NULL)",
|
||||||
|
"trino": "IF((2 * y) <> 0, CAST((x + 1) AS DOUBLE) / (2 * y), NULL)",
|
||||||
|
"hive": "IF((2 * y) <> 0, (x + 1) / (2 * y), NULL)",
|
||||||
|
"spark2": "IF((2 * y) <> 0, (x + 1) / (2 * y), NULL)",
|
||||||
|
"spark": "IF((2 * y) <> 0, (x + 1) / (2 * y), NULL)",
|
||||||
|
"databricks": "IF((2 * y) <> 0, (x + 1) / (2 * y), NULL)",
|
||||||
|
"snowflake": "IFF((2 * y) <> 0, (x + 1) / (2 * y), NULL)",
|
||||||
|
"postgres": "CASE WHEN (2 * y) <> 0 THEN CAST((x + 1) AS DOUBLE PRECISION) / (2 * y) ELSE NULL END",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
@ -1591,11 +1607,11 @@ WHERE
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"SELECT STRPOS('foo@example.com', '@')",
|
"SELECT INSTR('foo@example.com', '@')",
|
||||||
write={
|
write={
|
||||||
"bigquery": "SELECT STRPOS('foo@example.com', '@')",
|
"bigquery": "SELECT INSTR('foo@example.com', '@')",
|
||||||
"duckdb": "SELECT STRPOS('foo@example.com', '@')",
|
"duckdb": "SELECT STRPOS('foo@example.com', '@')",
|
||||||
"snowflake": "SELECT POSITION('@', 'foo@example.com')",
|
"snowflake": "SELECT CHARINDEX('@', 'foo@example.com')",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
@ -2239,8 +2255,8 @@ OPTIONS (
|
||||||
r"REGEXP_EXTRACT(svc_plugin_output, '\\\\\\((.*)')",
|
r"REGEXP_EXTRACT(svc_plugin_output, '\\\\\\((.*)')",
|
||||||
)
|
)
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
r"REGEXP_SUBSTR(value, pattern, position, occurence)",
|
r"REGEXP_SUBSTR(value, pattern, position, occurrence)",
|
||||||
r"REGEXP_EXTRACT(value, pattern, position, occurence)",
|
r"REGEXP_EXTRACT(value, pattern, position, occurrence)",
|
||||||
)
|
)
|
||||||
|
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
|
|
@ -85,8 +85,8 @@ class TestClickhouse(Validator):
|
||||||
self.validate_identity("SELECT exponentialTimeDecayedAvg(60)(a, b)")
|
self.validate_identity("SELECT exponentialTimeDecayedAvg(60)(a, b)")
|
||||||
self.validate_identity("levenshteinDistance(col1, col2)", "editDistance(col1, col2)")
|
self.validate_identity("levenshteinDistance(col1, col2)", "editDistance(col1, col2)")
|
||||||
self.validate_identity("SELECT * FROM foo WHERE x GLOBAL IN (SELECT * FROM bar)")
|
self.validate_identity("SELECT * FROM foo WHERE x GLOBAL IN (SELECT * FROM bar)")
|
||||||
self.validate_identity("position(haystack, needle)")
|
self.validate_identity("POSITION(haystack, needle)")
|
||||||
self.validate_identity("position(haystack, needle, position)")
|
self.validate_identity("POSITION(haystack, needle, position)")
|
||||||
self.validate_identity("CAST(x AS DATETIME)", "CAST(x AS DateTime)")
|
self.validate_identity("CAST(x AS DATETIME)", "CAST(x AS DateTime)")
|
||||||
self.validate_identity("CAST(x AS TIMESTAMPTZ)", "CAST(x AS DateTime)")
|
self.validate_identity("CAST(x AS TIMESTAMPTZ)", "CAST(x AS DateTime)")
|
||||||
self.validate_identity("CAST(x as MEDIUMINT)", "CAST(x AS Int32)")
|
self.validate_identity("CAST(x as MEDIUMINT)", "CAST(x AS Int32)")
|
||||||
|
@ -398,9 +398,8 @@ class TestClickhouse(Validator):
|
||||||
"clickhouse": "SELECT quantileIf(0.5)(a, TRUE)",
|
"clickhouse": "SELECT quantileIf(0.5)(a, TRUE)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_identity(
|
||||||
"SELECT position(needle IN haystack)",
|
"SELECT POSITION(needle IN haystack)", "SELECT POSITION(haystack, needle)"
|
||||||
write={"clickhouse": "SELECT position(haystack, needle)"},
|
|
||||||
)
|
)
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"SELECT * FROM x LIMIT 10 SETTINGS max_results = 100, result = 'break'"
|
"SELECT * FROM x LIMIT 10 SETTINGS max_results = 100, result = 'break'"
|
||||||
|
@ -742,6 +741,9 @@ class TestClickhouse(Validator):
|
||||||
"CREATE TABLE a ENGINE=Memory AS SELECT 1 AS c COMMENT 'foo'",
|
"CREATE TABLE a ENGINE=Memory AS SELECT 1 AS c COMMENT 'foo'",
|
||||||
"CREATE TABLE a ENGINE=Memory AS (SELECT 1 AS c) COMMENT 'foo'",
|
"CREATE TABLE a ENGINE=Memory AS (SELECT 1 AS c) COMMENT 'foo'",
|
||||||
)
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
'CREATE TABLE t1 ("x" UInt32, "y" Dynamic, "z" Dynamic(max_types = 10)) ENGINE=MergeTree ORDER BY x'
|
||||||
|
)
|
||||||
|
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"CREATE DATABASE x",
|
"CREATE DATABASE x",
|
||||||
|
|
|
@ -314,7 +314,7 @@ class TestDialect(Validator):
|
||||||
"materialize": "CAST(a AS SMALLINT)",
|
"materialize": "CAST(a AS SMALLINT)",
|
||||||
"mysql": "CAST(a AS SIGNED)",
|
"mysql": "CAST(a AS SIGNED)",
|
||||||
"hive": "CAST(a AS SMALLINT)",
|
"hive": "CAST(a AS SMALLINT)",
|
||||||
"oracle": "CAST(a AS NUMBER)",
|
"oracle": "CAST(a AS SMALLINT)",
|
||||||
"postgres": "CAST(a AS SMALLINT)",
|
"postgres": "CAST(a AS SMALLINT)",
|
||||||
"presto": "CAST(a AS SMALLINT)",
|
"presto": "CAST(a AS SMALLINT)",
|
||||||
"redshift": "CAST(a AS SMALLINT)",
|
"redshift": "CAST(a AS SMALLINT)",
|
||||||
|
@ -374,10 +374,10 @@ class TestDialect(Validator):
|
||||||
"mysql": "TIMESTAMP(a)",
|
"mysql": "TIMESTAMP(a)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all("CAST(a AS TINYINT)", write={"oracle": "CAST(a AS NUMBER)"})
|
self.validate_all("CAST(a AS TINYINT)", write={"oracle": "CAST(a AS SMALLINT)"})
|
||||||
self.validate_all("CAST(a AS SMALLINT)", write={"oracle": "CAST(a AS NUMBER)"})
|
self.validate_all("CAST(a AS SMALLINT)", write={"oracle": "CAST(a AS SMALLINT)"})
|
||||||
self.validate_all("CAST(a AS BIGINT)", write={"oracle": "CAST(a AS NUMBER)"})
|
self.validate_all("CAST(a AS BIGINT)", write={"oracle": "CAST(a AS INT)"})
|
||||||
self.validate_all("CAST(a AS INT)", write={"oracle": "CAST(a AS NUMBER)"})
|
self.validate_all("CAST(a AS INT)", write={"oracle": "CAST(a AS INT)"})
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"CAST(a AS DECIMAL)",
|
"CAST(a AS DECIMAL)",
|
||||||
read={"oracle": "CAST(a AS NUMBER)"},
|
read={"oracle": "CAST(a AS NUMBER)"},
|
||||||
|
@ -1682,42 +1682,202 @@ class TestDialect(Validator):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"POSITION(needle IN haystack)",
|
"STR_POSITION(haystack, needle)",
|
||||||
write={
|
read={
|
||||||
"drill": "STRPOS(haystack, needle)",
|
"athena": "POSITION(needle in haystack)",
|
||||||
"duckdb": "STRPOS(haystack, needle)",
|
"clickhouse": "POSITION(needle in haystack)",
|
||||||
"postgres": "STRPOS(haystack, needle)",
|
"databricks": "POSITION(needle in haystack)",
|
||||||
"presto": "STRPOS(haystack, needle)",
|
"drill": "POSITION(needle in haystack)",
|
||||||
"spark": "LOCATE(needle, haystack)",
|
"duckdb": "POSITION(needle in haystack)",
|
||||||
"clickhouse": "position(haystack, needle)",
|
"materialize": "POSITION(needle in haystack)",
|
||||||
"snowflake": "POSITION(needle, haystack)",
|
"mysql": "POSITION(needle in haystack)",
|
||||||
"mysql": "LOCATE(needle, haystack)",
|
"postgres": "POSITION(needle in haystack)",
|
||||||
|
"presto": "POSITION(needle in haystack)",
|
||||||
|
"redshift": "POSITION(needle in haystack)",
|
||||||
|
"risingwave": "POSITION(needle in haystack)",
|
||||||
|
"snowflake": "POSITION(needle in haystack)",
|
||||||
|
"spark": "POSITION(needle in haystack)",
|
||||||
|
"spark2": "POSITION(needle in haystack)",
|
||||||
|
"teradata": "POSITION(needle in haystack)",
|
||||||
|
"trino": "POSITION(needle in haystack)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"STR_POSITION(haystack, needle)",
|
"STR_POSITION(haystack, needle)",
|
||||||
write={
|
read={
|
||||||
|
"clickhouse": "POSITION(haystack, needle)",
|
||||||
|
"databricks": "POSITION(needle, haystack)",
|
||||||
|
"snowflake": "POSITION(needle, haystack)",
|
||||||
|
"spark2": "POSITION(needle, haystack)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle)",
|
||||||
|
read={
|
||||||
|
"athena": "STRPOS(haystack, needle)",
|
||||||
|
"bigquery": "STRPOS(haystack, needle)",
|
||||||
"drill": "STRPOS(haystack, needle)",
|
"drill": "STRPOS(haystack, needle)",
|
||||||
"duckdb": "STRPOS(haystack, needle)",
|
"duckdb": "STRPOS(haystack, needle)",
|
||||||
"postgres": "STRPOS(haystack, needle)",
|
"postgres": "STRPOS(haystack, needle)",
|
||||||
"presto": "STRPOS(haystack, needle)",
|
"presto": "STRPOS(haystack, needle)",
|
||||||
"bigquery": "STRPOS(haystack, needle)",
|
"redshift": "STRPOS(haystack, needle)",
|
||||||
"spark": "LOCATE(needle, haystack)",
|
"trino": "STRPOS(haystack, needle)",
|
||||||
"clickhouse": "position(haystack, needle)",
|
|
||||||
"snowflake": "POSITION(needle, haystack)",
|
|
||||||
"mysql": "LOCATE(needle, haystack)",
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"POSITION(needle, haystack, pos)",
|
"STR_POSITION(haystack, needle)",
|
||||||
|
read={
|
||||||
|
"bigquery": "INSTR(haystack, needle)",
|
||||||
|
"databricks": "INSTR(haystack, needle)",
|
||||||
|
"doris": "INSTR(haystack, needle)",
|
||||||
|
"duckdb": "INSTR(haystack, needle)",
|
||||||
|
"hive": "INSTR(haystack, needle)",
|
||||||
|
"mysql": "INSTR(haystack, needle)",
|
||||||
|
"oracle": "INSTR(haystack, needle)",
|
||||||
|
"spark": "INSTR(haystack, needle)",
|
||||||
|
"spark2": "INSTR(haystack, needle)",
|
||||||
|
"sqlite": "INSTR(haystack, needle)",
|
||||||
|
"starrocks": "INSTR(haystack, needle)",
|
||||||
|
"teradata": "INSTR(haystack, needle)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle)",
|
||||||
|
read={
|
||||||
|
"clickhouse": "LOCATE(needle, haystack)",
|
||||||
|
"databricks": "LOCATE(needle, haystack)",
|
||||||
|
"doris": "LOCATE(needle, haystack)",
|
||||||
|
"hive": "LOCATE(needle, haystack)",
|
||||||
|
"mysql": "LOCATE(needle, haystack)",
|
||||||
|
"spark": "LOCATE(needle, haystack)",
|
||||||
|
"spark2": "LOCATE(needle, haystack)",
|
||||||
|
"starrocks": "LOCATE(needle, haystack)",
|
||||||
|
"teradata": "LOCATE(needle, haystack)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle)",
|
||||||
|
read={
|
||||||
|
"athena": "CHARINDEX(needle, haystack)",
|
||||||
|
"databricks": "CHARINDEX(needle, haystack)",
|
||||||
|
"snowflake": "CHARINDEX(needle, haystack)",
|
||||||
|
"tsql": "CHARINDEX(needle, haystack)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle)",
|
||||||
|
read={
|
||||||
|
"tableau": "FIND(haystack, needle)",
|
||||||
|
},
|
||||||
write={
|
write={
|
||||||
"drill": "`IF`(STRPOS(SUBSTR(haystack, pos), needle) = 0, 0, STRPOS(SUBSTR(haystack, pos), needle) + pos - 1)",
|
"athena": "STRPOS(haystack, needle)",
|
||||||
"presto": "IF(STRPOS(SUBSTR(haystack, pos), needle) = 0, 0, STRPOS(SUBSTR(haystack, pos), needle) + pos - 1)",
|
"bigquery": "INSTR(haystack, needle)",
|
||||||
"postgres": "CASE WHEN STRPOS(SUBSTR(haystack, pos), needle) = 0 THEN 0 ELSE STRPOS(SUBSTR(haystack, pos), needle) + pos - 1 END",
|
"clickhouse": "POSITION(haystack, needle)",
|
||||||
"spark": "LOCATE(needle, haystack, pos)",
|
"databricks": "LOCATE(needle, haystack)",
|
||||||
"clickhouse": "position(haystack, needle, pos)",
|
"doris": "LOCATE(needle, haystack)",
|
||||||
"snowflake": "POSITION(needle, haystack, pos)",
|
"drill": "STRPOS(haystack, needle)",
|
||||||
"mysql": "LOCATE(needle, haystack, pos)",
|
"duckdb": "STRPOS(haystack, needle)",
|
||||||
|
"hive": "LOCATE(needle, haystack)",
|
||||||
|
"materialize": "POSITION(needle IN haystack)",
|
||||||
|
"mysql": "LOCATE(needle, haystack)",
|
||||||
|
"oracle": "INSTR(haystack, needle)",
|
||||||
|
"postgres": "POSITION(needle IN haystack)",
|
||||||
|
"presto": "STRPOS(haystack, needle)",
|
||||||
|
"redshift": "POSITION(needle IN haystack)",
|
||||||
|
"risingwave": "POSITION(needle IN haystack)",
|
||||||
|
"snowflake": "CHARINDEX(needle, haystack)",
|
||||||
|
"spark": "LOCATE(needle, haystack)",
|
||||||
|
"spark2": "LOCATE(needle, haystack)",
|
||||||
|
"sqlite": "INSTR(haystack, needle)",
|
||||||
|
"tableau": "FIND(haystack, needle)",
|
||||||
|
"teradata": "INSTR(haystack, needle)",
|
||||||
|
"trino": "STRPOS(haystack, needle)",
|
||||||
|
"tsql": "CHARINDEX(needle, haystack)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position)",
|
||||||
|
read={
|
||||||
|
"clickhouse": "POSITION(haystack, needle, position)",
|
||||||
|
"databricks": "POSITION(needle, haystack, position)",
|
||||||
|
"snowflake": "POSITION(needle, haystack, position)",
|
||||||
|
"spark": "POSITION(needle, haystack, position)",
|
||||||
|
"spark2": "POSITION(needle, haystack, position)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position)",
|
||||||
|
read={
|
||||||
|
"doris": "LOCATE(needle, haystack, position)",
|
||||||
|
"hive": "LOCATE(needle, haystack, position)",
|
||||||
|
"mysql": "LOCATE(needle, haystack, position)",
|
||||||
|
"spark": "LOCATE(needle, haystack, position)",
|
||||||
|
"spark2": "LOCATE(needle, haystack, position)",
|
||||||
|
"starrocks": "LOCATE(needle, haystack, position)",
|
||||||
|
"teradata": "LOCATE(needle, haystack, position)",
|
||||||
|
"clickhouse": "LOCATE(needle, haystack, position)",
|
||||||
|
"databricks": "LOCATE(needle, haystack, position)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position)",
|
||||||
|
read={
|
||||||
|
"bigquery": "INSTR(haystack, needle, position)",
|
||||||
|
"doris": "INSTR(haystack, needle, position)",
|
||||||
|
"oracle": "INSTR(haystack, needle, position)",
|
||||||
|
"teradata": "INSTR(haystack, needle, position)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position)",
|
||||||
|
read={
|
||||||
|
"databricks": "CHARINDEX(needle, haystack, position)",
|
||||||
|
"snowflake": "CHARINDEX(needle, haystack, position)",
|
||||||
|
"tsql": "CHARINDEX(needle, haystack, position)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position)",
|
||||||
|
write={
|
||||||
|
"athena": "IF(STRPOS(SUBSTRING(haystack, position), needle) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle) + position - 1)",
|
||||||
|
"bigquery": "INSTR(haystack, needle, position)",
|
||||||
|
"clickhouse": "POSITION(haystack, needle, position)",
|
||||||
|
"databricks": "LOCATE(needle, haystack, position)",
|
||||||
|
"doris": "LOCATE(needle, haystack, position)",
|
||||||
|
"drill": "`IF`(STRPOS(SUBSTRING(haystack, position), needle) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle) + position - 1)",
|
||||||
|
"duckdb": "CASE WHEN STRPOS(SUBSTRING(haystack, position), needle) = 0 THEN 0 ELSE STRPOS(SUBSTRING(haystack, position), needle) + position - 1 END",
|
||||||
|
"hive": "LOCATE(needle, haystack, position)",
|
||||||
|
"materialize": "CASE WHEN POSITION(needle IN SUBSTRING(haystack FROM position)) = 0 THEN 0 ELSE POSITION(needle IN SUBSTRING(haystack FROM position)) + position - 1 END",
|
||||||
|
"mysql": "LOCATE(needle, haystack, position)",
|
||||||
|
"oracle": "INSTR(haystack, needle, position)",
|
||||||
|
"postgres": "CASE WHEN POSITION(needle IN SUBSTRING(haystack FROM position)) = 0 THEN 0 ELSE POSITION(needle IN SUBSTRING(haystack FROM position)) + position - 1 END",
|
||||||
|
"presto": "IF(STRPOS(SUBSTRING(haystack, position), needle) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle) + position - 1)",
|
||||||
|
"redshift": "CASE WHEN POSITION(needle IN SUBSTRING(haystack FROM position)) = 0 THEN 0 ELSE POSITION(needle IN SUBSTRING(haystack FROM position)) + position - 1 END",
|
||||||
|
"risingwave": "CASE WHEN POSITION(needle IN SUBSTRING(haystack FROM position)) = 0 THEN 0 ELSE POSITION(needle IN SUBSTRING(haystack FROM position)) + position - 1 END",
|
||||||
|
"snowflake": "CHARINDEX(needle, haystack, position)",
|
||||||
|
"spark": "LOCATE(needle, haystack, position)",
|
||||||
|
"spark2": "LOCATE(needle, haystack, position)",
|
||||||
|
"sqlite": "IIF(INSTR(SUBSTRING(haystack, position), needle) = 0, 0, INSTR(SUBSTRING(haystack, position), needle) + position - 1)",
|
||||||
|
"tableau": "IF FIND(SUBSTRING(haystack, position), needle) = 0 THEN 0 ELSE FIND(SUBSTRING(haystack, position), needle) + position - 1 END",
|
||||||
|
"teradata": "INSTR(haystack, needle, position)",
|
||||||
|
"trino": "IF(STRPOS(SUBSTRING(haystack, position), needle) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle) + position - 1)",
|
||||||
|
"tsql": "CHARINDEX(needle, haystack, position)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"STR_POSITION(haystack, needle, position, occurrence)",
|
||||||
|
read={
|
||||||
|
"bigquery": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
"oracle": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
"teradata": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
},
|
||||||
|
write={
|
||||||
|
"bigquery": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
"oracle": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
"presto": "IF(STRPOS(SUBSTRING(haystack, position), needle, occurrence) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle, occurrence) + position - 1)",
|
||||||
|
"tableau": "IF FINDNTH(SUBSTRING(haystack, position), needle, occurrence) = 0 THEN 0 ELSE FINDNTH(SUBSTRING(haystack, position), needle, occurrence) + position - 1 END",
|
||||||
|
"teradata": "INSTR(haystack, needle, position, occurrence)",
|
||||||
|
"trino": "IF(STRPOS(SUBSTRING(haystack, position), needle, occurrence) = 0, 0, STRPOS(SUBSTRING(haystack, position), needle, occurrence) + position - 1)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
|
|
@ -903,6 +903,13 @@ class TestDuckDB(Validator):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT REGEXP_MATCHES('ThOmAs', 'thomas', 'i')",
|
||||||
|
read={
|
||||||
|
"postgres": "SELECT 'ThOmAs' ~* 'thomas'",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def test_array_index(self):
|
def test_array_index(self):
|
||||||
with self.assertLogs(helper_logger) as cm:
|
with self.assertLogs(helper_logger) as cm:
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
|
|
@ -588,8 +588,8 @@ class TestHive(Validator):
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"LOCATE('a', x, 3)",
|
"LOCATE('a', x, 3)",
|
||||||
write={
|
write={
|
||||||
"duckdb": "CASE WHEN STRPOS(SUBSTR(x, 3), 'a') = 0 THEN 0 ELSE STRPOS(SUBSTR(x, 3), 'a') + 3 - 1 END",
|
"duckdb": "CASE WHEN STRPOS(SUBSTRING(x, 3), 'a') = 0 THEN 0 ELSE STRPOS(SUBSTRING(x, 3), 'a') + 3 - 1 END",
|
||||||
"presto": "IF(STRPOS(SUBSTR(x, 3), 'a') = 0, 0, STRPOS(SUBSTR(x, 3), 'a') + 3 - 1)",
|
"presto": "IF(STRPOS(SUBSTRING(x, 3), 'a') = 0, 0, STRPOS(SUBSTRING(x, 3), 'a') + 3 - 1)",
|
||||||
"hive": "LOCATE('a', x, 3)",
|
"hive": "LOCATE('a', x, 3)",
|
||||||
"spark": "LOCATE('a', x, 3)",
|
"spark": "LOCATE('a', x, 3)",
|
||||||
},
|
},
|
||||||
|
@ -740,6 +740,7 @@ class TestHive(Validator):
|
||||||
"presto": "SET_AGG(x)",
|
"presto": "SET_AGG(x)",
|
||||||
"snowflake": "ARRAY_UNIQUE_AGG(x)",
|
"snowflake": "ARRAY_UNIQUE_AGG(x)",
|
||||||
"spark": "COLLECT_SET(x)",
|
"spark": "COLLECT_SET(x)",
|
||||||
|
"trino": "ARRAY_AGG(DISTINCT x)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
|
|
|
@ -394,6 +394,9 @@ JOIN departments
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"XMLTABLE('x' RETURNING SEQUENCE BY REF COLUMNS a VARCHAR2, b FLOAT)"
|
"XMLTABLE('x' RETURNING SEQUENCE BY REF COLUMNS a VARCHAR2, b FLOAT)"
|
||||||
)
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
"SELECT x.* FROM example t, XMLTABLE(XMLNAMESPACES(DEFAULT 'http://example.com/default', 'http://example.com/ns1' AS \"ns1\"), '/root/data' PASSING t.xml COLUMNS id NUMBER PATH '@id', value VARCHAR2(100) PATH 'ns1:value/text()') x"
|
||||||
|
)
|
||||||
|
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"""SELECT warehouse_name warehouse,
|
"""SELECT warehouse_name warehouse,
|
||||||
|
@ -513,10 +516,10 @@ FROM JSON_TABLE(res, '$.info[*]' COLUMNS(
|
||||||
LEVEL,
|
LEVEL,
|
||||||
SYS_CONNECT_BY_PATH(last_name, '/') AS "Path"
|
SYS_CONNECT_BY_PATH(last_name, '/') AS "Path"
|
||||||
FROM employees
|
FROM employees
|
||||||
START WITH last_name = 'King'
|
|
||||||
CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 4
|
|
||||||
WHERE
|
WHERE
|
||||||
level <= 3 AND department_id = 80"""
|
level <= 3 AND department_id = 80
|
||||||
|
START WITH last_name = 'King'
|
||||||
|
CONNECT BY PRIOR employee_id = manager_id AND LEVEL <= 4"""
|
||||||
|
|
||||||
for query in (f"{body}{start}{connect}", f"{body}{connect}{start}"):
|
for query in (f"{body}{start}{connect}", f"{body}{connect}{start}"):
|
||||||
self.validate_identity(query, pretty, pretty=True)
|
self.validate_identity(query, pretty, pretty=True)
|
||||||
|
|
|
@ -76,7 +76,10 @@ class TestPostgres(Validator):
|
||||||
self.validate_identity("SELECT CURRENT_USER")
|
self.validate_identity("SELECT CURRENT_USER")
|
||||||
self.validate_identity("SELECT * FROM ONLY t1")
|
self.validate_identity("SELECT * FROM ONLY t1")
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"SELECT id, name FROM XMLTABLE('/root/user' PASSING xml_data COLUMNS id INT PATH '@id', name TEXT PATH 'name/text()') AS t"
|
"SELECT id, name FROM xml_data AS t, XMLTABLE('/root/user' PASSING t.xml COLUMNS id INT PATH '@id', name TEXT PATH 'name/text()') AS x"
|
||||||
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
"SELECT id, value FROM xml_content AS t, XMLTABLE(XMLNAMESPACES('http://example.com/ns1' AS ns1, 'http://example.com/ns2' AS ns2), '/root/data' PASSING t.xml COLUMNS id INT PATH '@ns1:id', value TEXT PATH 'ns2:value/text()') AS x"
|
||||||
)
|
)
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"SELECT * FROM t WHERE some_column >= CURRENT_DATE + INTERVAL '1 day 1 hour' AND some_another_column IS TRUE"
|
"SELECT * FROM t WHERE some_column >= CURRENT_DATE + INTERVAL '1 day 1 hour' AND some_another_column IS TRUE"
|
||||||
|
@ -830,6 +833,10 @@ class TestPostgres(Validator):
|
||||||
"/* + some comment */ SELECT b.foo, b.bar FROM baz AS b",
|
"/* + some comment */ SELECT b.foo, b.bar FROM baz AS b",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_identity(
|
||||||
|
"SELECT PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY a) FILTER(WHERE CAST(b AS BOOLEAN)) AS mean_value FROM (VALUES (0, 't')) AS fake_data(a, b)"
|
||||||
|
)
|
||||||
|
|
||||||
def test_ddl(self):
|
def test_ddl(self):
|
||||||
# Checks that user-defined types are parsed into DataType instead of Identifier
|
# Checks that user-defined types are parsed into DataType instead of Identifier
|
||||||
self.parse_one("CREATE TABLE t (a udt)").this.expressions[0].args["kind"].assert_is(
|
self.parse_one("CREATE TABLE t (a udt)").this.expressions[0].args["kind"].assert_is(
|
||||||
|
|
|
@ -204,14 +204,14 @@ class TestPresto(Validator):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"STRPOS('ABC', 'A', 3)",
|
"STRPOS(haystack, needle, occurrence)",
|
||||||
read={
|
|
||||||
"trino": "STRPOS('ABC', 'A', 3)",
|
|
||||||
},
|
|
||||||
write={
|
write={
|
||||||
"presto": "STRPOS('ABC', 'A', 3)",
|
"bigquery": "INSTR(haystack, needle, 1, occurrence)",
|
||||||
"trino": "STRPOS('ABC', 'A', 3)",
|
"oracle": "INSTR(haystack, needle, 1, occurrence)",
|
||||||
"snowflake": "POSITION('A', 'ABC')",
|
"presto": "STRPOS(haystack, needle, occurrence)",
|
||||||
|
"tableau": "FINDNTH(haystack, needle, occurrence)",
|
||||||
|
"trino": "STRPOS(haystack, needle, occurrence)",
|
||||||
|
"teradata": "INSTR(haystack, needle, 1, occurrence)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,6 @@ class TestSnowflake(Validator):
|
||||||
self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR) RENAME FIELDS)")
|
self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR) RENAME FIELDS)")
|
||||||
self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR, y VARCHAR) ADD FIELDS)")
|
self.validate_identity("SELECT CAST(obj AS OBJECT(x CHAR, y VARCHAR) ADD FIELDS)")
|
||||||
self.validate_identity("SELECT TO_TIMESTAMP(123.4)").selects[0].assert_is(exp.Anonymous)
|
self.validate_identity("SELECT TO_TIMESTAMP(123.4)").selects[0].assert_is(exp.Anonymous)
|
||||||
self.validate_identity("SELECT TO_TIME(x) FROM t")
|
|
||||||
self.validate_identity("SELECT TO_TIMESTAMP(x) FROM t")
|
self.validate_identity("SELECT TO_TIMESTAMP(x) FROM t")
|
||||||
self.validate_identity("SELECT TO_TIMESTAMP_NTZ(x) FROM t")
|
self.validate_identity("SELECT TO_TIMESTAMP_NTZ(x) FROM t")
|
||||||
self.validate_identity("SELECT TO_TIMESTAMP_LTZ(x) FROM t")
|
self.validate_identity("SELECT TO_TIMESTAMP_LTZ(x) FROM t")
|
||||||
|
@ -105,6 +104,9 @@ class TestSnowflake(Validator):
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"SELECT * FROM DATA AS DATA_L ASOF JOIN DATA AS DATA_R MATCH_CONDITION (DATA_L.VAL > DATA_R.VAL) ON DATA_L.ID = DATA_R.ID"
|
"SELECT * FROM DATA AS DATA_L ASOF JOIN DATA AS DATA_R MATCH_CONDITION (DATA_L.VAL > DATA_R.VAL) ON DATA_L.ID = DATA_R.ID"
|
||||||
)
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
"""SELECT TO_TIMESTAMP('2025-01-16T14:45:30.123+0500', 'yyyy-mm-DD"T"hh24:mi:ss.ff3TZHTZM')"""
|
||||||
|
)
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"WITH t (SELECT 1 AS c) SELECT c FROM t",
|
"WITH t (SELECT 1 AS c) SELECT c FROM t",
|
||||||
"WITH t AS (SELECT 1 AS c) SELECT c FROM t",
|
"WITH t AS (SELECT 1 AS c) SELECT c FROM t",
|
||||||
|
@ -294,6 +296,13 @@ class TestSnowflake(Validator):
|
||||||
"SELECT * RENAME (a AS b), c AS d FROM xxx",
|
"SELECT * RENAME (a AS b), c AS d FROM xxx",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT TO_TIMESTAMP('2025-01-16 14:45:30.123', 'yyyy-mm-DD hh24:mi:ss.ff6')",
|
||||||
|
write={
|
||||||
|
"": "SELECT STR_TO_TIME('2025-01-16 14:45:30.123', '%Y-%m-%d %H:%M:%S.%f')",
|
||||||
|
"snowflake": "SELECT TO_TIMESTAMP('2025-01-16 14:45:30.123', 'yyyy-mm-DD hh24:mi:ss.ff6')",
|
||||||
|
},
|
||||||
|
)
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"ARRAY_CONSTRUCT_COMPACT(1, null, 2)",
|
"ARRAY_CONSTRUCT_COMPACT(1, null, 2)",
|
||||||
write={
|
write={
|
||||||
|
@ -720,13 +729,6 @@ class TestSnowflake(Validator):
|
||||||
"spark": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMP)",
|
"spark": "SELECT CAST('2013-04-05 01:02:03' AS TIMESTAMP)",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
self.validate_all(
|
|
||||||
"SELECT TO_TIME('12:05:00')",
|
|
||||||
write={
|
|
||||||
"bigquery": "SELECT CAST('12:05:00' AS TIME)",
|
|
||||||
"snowflake": "SELECT CAST('12:05:00' AS TIME)",
|
|
||||||
},
|
|
||||||
)
|
|
||||||
self.validate_all(
|
self.validate_all(
|
||||||
"SELECT TO_TIMESTAMP('04/05/2013 01:02:03', 'mm/DD/yyyy hh24:mi:ss')",
|
"SELECT TO_TIMESTAMP('04/05/2013 01:02:03', 'mm/DD/yyyy hh24:mi:ss')",
|
||||||
read={
|
read={
|
||||||
|
@ -1286,6 +1288,37 @@ class TestSnowflake(Validator):
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_identity("SELECT TO_TIME(x) FROM t")
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT TO_TIME('12:05:00')",
|
||||||
|
write={
|
||||||
|
"bigquery": "SELECT CAST('12:05:00' AS TIME)",
|
||||||
|
"snowflake": "SELECT CAST('12:05:00' AS TIME)",
|
||||||
|
"duckdb": "SELECT CAST('12:05:00' AS TIME)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT TO_TIME(CONVERT_TIMEZONE('UTC', 'US/Pacific', '2024-08-06 09:10:00.000')) AS pst_time",
|
||||||
|
write={
|
||||||
|
"snowflake": "SELECT TO_TIME(CONVERT_TIMEZONE('UTC', 'US/Pacific', '2024-08-06 09:10:00.000')) AS pst_time",
|
||||||
|
"duckdb": "SELECT CAST(CAST('2024-08-06 09:10:00.000' AS TIMESTAMP) AT TIME ZONE 'UTC' AT TIME ZONE 'US/Pacific' AS TIME) AS pst_time",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT TO_TIME('11.15.00', 'hh24.mi.ss')",
|
||||||
|
write={
|
||||||
|
"snowflake": "SELECT TO_TIME('11.15.00', 'hh24.mi.ss')",
|
||||||
|
"duckdb": "SELECT CAST(STRPTIME('11.15.00', '%H.%M.%S') AS TIME)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
self.validate_all(
|
||||||
|
"SELECT TRY_TO_TIME('11.15.00', 'hh24.mi.ss')",
|
||||||
|
write={
|
||||||
|
"snowflake": "SELECT TRY_TO_TIME('11.15.00', 'hh24.mi.ss')",
|
||||||
|
"duckdb": "SELECT CAST(STRPTIME('11.15.00', '%H.%M.%S') AS TIME)",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def test_semi_structured_types(self):
|
def test_semi_structured_types(self):
|
||||||
self.validate_identity("SELECT CAST(a AS VARIANT)")
|
self.validate_identity("SELECT CAST(a AS VARIANT)")
|
||||||
self.validate_identity("SELECT CAST(a AS ARRAY)")
|
self.validate_identity("SELECT CAST(a AS ARRAY)")
|
||||||
|
|
|
@ -7,6 +7,7 @@ class TestSQLite(Validator):
|
||||||
dialect = "sqlite"
|
dialect = "sqlite"
|
||||||
|
|
||||||
def test_sqlite(self):
|
def test_sqlite(self):
|
||||||
|
self.validate_identity("UNHEX(a, b)")
|
||||||
self.validate_identity("SELECT DATE()")
|
self.validate_identity("SELECT DATE()")
|
||||||
self.validate_identity("SELECT DATE('now', 'start of month', '+1 month', '-1 day')")
|
self.validate_identity("SELECT DATE('now', 'start of month', '+1 month', '-1 day')")
|
||||||
self.validate_identity("SELECT DATETIME(1092941466, 'unixepoch')")
|
self.validate_identity("SELECT DATETIME(1092941466, 'unixepoch')")
|
||||||
|
|
|
@ -9,6 +9,13 @@ class TestTrino(Validator):
|
||||||
self.validate_identity("JSON_QUERY(content, 'lax $.HY.*')")
|
self.validate_identity("JSON_QUERY(content, 'lax $.HY.*')")
|
||||||
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITH UNCONDITIONAL WRAPPER)")
|
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITH UNCONDITIONAL WRAPPER)")
|
||||||
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITHOUT CONDITIONAL WRAPPER)")
|
self.validate_identity("JSON_QUERY(content, 'strict $.HY.*' WITHOUT CONDITIONAL WRAPPER)")
|
||||||
|
self.validate_identity("JSON_QUERY(description, 'strict $.comment' KEEP QUOTES)")
|
||||||
|
self.validate_identity(
|
||||||
|
"JSON_QUERY(description, 'strict $.comment' OMIT QUOTES ON SCALAR STRING)"
|
||||||
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
"JSON_QUERY(content, 'strict $.HY.*' WITH UNCONDITIONAL WRAPPER KEEP QUOTES)"
|
||||||
|
)
|
||||||
|
|
||||||
def test_listagg(self):
|
def test_listagg(self):
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
|
|
|
@ -184,7 +184,7 @@ class TestTSQL(Validator):
|
||||||
"tsql": "CREATE TABLE #mytemptable (a INTEGER)",
|
"tsql": "CREATE TABLE #mytemptable (a INTEGER)",
|
||||||
"snowflake": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
"snowflake": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
||||||
"duckdb": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
"duckdb": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
||||||
"oracle": "CREATE GLOBAL TEMPORARY TABLE mytemptable (a NUMBER)",
|
"oracle": "CREATE GLOBAL TEMPORARY TABLE mytemptable (a INT)",
|
||||||
"hive": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
"hive": "CREATE TEMPORARY TABLE mytemptable (a INT)",
|
||||||
"spark2": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
|
"spark2": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
|
||||||
"spark": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
|
"spark": "CREATE TEMPORARY TABLE mytemptable (a INT) USING PARQUET",
|
||||||
|
@ -436,6 +436,13 @@ class TestTSQL(Validator):
|
||||||
"'a' + 'b'",
|
"'a' + 'b'",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_identity(
|
||||||
|
"CREATE TABLE db.t1 (a INTEGER, b VARCHAR(50), CONSTRAINT c PRIMARY KEY (a DESC))",
|
||||||
|
)
|
||||||
|
self.validate_identity(
|
||||||
|
"CREATE TABLE db.t1 (a INTEGER, b INTEGER, CONSTRAINT c PRIMARY KEY (a DESC, b))"
|
||||||
|
)
|
||||||
|
|
||||||
def test_option(self):
|
def test_option(self):
|
||||||
possible_options = [
|
possible_options = [
|
||||||
"HASH GROUP",
|
"HASH GROUP",
|
||||||
|
@ -836,6 +843,7 @@ class TestTSQL(Validator):
|
||||||
f"UNIQUE {clustered_keyword} ([internal_id] ASC))",
|
f"UNIQUE {clustered_keyword} ([internal_id] ASC))",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.validate_identity("CREATE SCHEMA testSchema")
|
||||||
self.validate_identity("CREATE VIEW t AS WITH cte AS (SELECT 1 AS c) SELECT c FROM cte")
|
self.validate_identity("CREATE VIEW t AS WITH cte AS (SELECT 1 AS c) SELECT c FROM cte")
|
||||||
self.validate_identity(
|
self.validate_identity(
|
||||||
"ALTER TABLE tbl SET SYSTEM_VERSIONING=ON(HISTORY_TABLE=db.tbl, DATA_CONSISTENCY_CHECK=OFF, HISTORY_RETENTION_PERIOD=5 DAYS)"
|
"ALTER TABLE tbl SET SYSTEM_VERSIONING=ON(HISTORY_TABLE=db.tbl, DATA_CONSISTENCY_CHECK=OFF, HISTORY_RETENTION_PERIOD=5 DAYS)"
|
||||||
|
|
48
tests/fixtures/optimizer/qualify_columns.sql
vendored
48
tests/fixtures/optimizer/qualify_columns.sql
vendored
|
@ -227,7 +227,32 @@ START WITH title = 'President'
|
||||||
CONNECT BY manager_ID = PRIOR employee_id
|
CONNECT BY manager_ID = PRIOR employee_id
|
||||||
ORDER BY
|
ORDER BY
|
||||||
employee_ID NULLS LAST;
|
employee_ID NULLS LAST;
|
||||||
WITH EMPLOYEES AS (SELECT T.TITLE AS TITLE, T.EMPLOYEE_ID AS EMPLOYEE_ID, T.MANAGER_ID AS MANAGER_ID FROM (VALUES ('President', 1, NULL), ('Vice President Engineering', 10, 1), ('Programmer', 100, 10), ('QA Engineer', 101, 10), ('Vice President HR', 20, 1), ('Health Insurance Analyst', 200, 20)) AS T(TITLE, EMPLOYEE_ID, MANAGER_ID)) SELECT EMPLOYEES.EMPLOYEE_ID AS EMPLOYEE_ID, EMPLOYEES.MANAGER_ID AS MANAGER_ID, EMPLOYEES.TITLE AS TITLE, EMPLOYEES.LEVEL AS LEVEL FROM EMPLOYEES AS EMPLOYEES START WITH EMPLOYEES.TITLE = 'President' CONNECT BY EMPLOYEES.MANAGER_ID = PRIOR EMPLOYEES.EMPLOYEE_ID ORDER BY EMPLOYEE_ID;
|
WITH EMPLOYEES AS (SELECT T.TITLE AS TITLE, T.EMPLOYEE_ID AS EMPLOYEE_ID, T.MANAGER_ID AS MANAGER_ID FROM (VALUES ('President', 1, NULL), ('Vice President Engineering', 10, 1), ('Programmer', 100, 10), ('QA Engineer', 101, 10), ('Vice President HR', 20, 1), ('Health Insurance Analyst', 200, 20)) AS T(TITLE, EMPLOYEE_ID, MANAGER_ID)) SELECT EMPLOYEES.EMPLOYEE_ID AS EMPLOYEE_ID, EMPLOYEES.MANAGER_ID AS MANAGER_ID, EMPLOYEES.TITLE AS TITLE, LEVEL AS LEVEL FROM EMPLOYEES AS EMPLOYEES START WITH EMPLOYEES.TITLE = 'President' CONNECT BY EMPLOYEES.MANAGER_ID = PRIOR EMPLOYEES.EMPLOYEE_ID ORDER BY EMPLOYEE_ID;
|
||||||
|
|
||||||
|
# execute: false
|
||||||
|
# dialect: oracle
|
||||||
|
WITH
|
||||||
|
t1 AS (
|
||||||
|
SELECT
|
||||||
|
1 AS c1,
|
||||||
|
1 AS c2,
|
||||||
|
'Y' AS TOP_PARENT_INDICATOR,
|
||||||
|
1 AS id
|
||||||
|
FROM DUAL
|
||||||
|
),
|
||||||
|
t2 AS (
|
||||||
|
SELECT
|
||||||
|
1 AS c2,
|
||||||
|
2 AS id
|
||||||
|
FROM DUAL
|
||||||
|
)
|
||||||
|
SELECT t1.c1
|
||||||
|
FROM t1
|
||||||
|
LEFT JOIN t2 ON t1.c2 = t2.c2
|
||||||
|
WHERE (t1.TOP_PARENT_INDICATOR = 'Y' OR LEVEL = 1)
|
||||||
|
START WITH (t1.id IS NOT NULL)
|
||||||
|
CONNECT BY PRIOR t1.id = t2.id;
|
||||||
|
WITH T1 AS (SELECT 1 AS C1, 1 AS C2, 'Y' AS TOP_PARENT_INDICATOR, 1 AS ID FROM DUAL DUAL), T2 AS (SELECT 1 AS C2, 2 AS ID FROM DUAL DUAL) SELECT T1.C1 AS C1 FROM T1 T1 LEFT JOIN T2 T2 ON T1.C2 = T2.C2 WHERE (T1.TOP_PARENT_INDICATOR = 'Y' OR LEVEL = 1) START WITH (NOT T1.ID IS NULL) CONNECT BY PRIOR T1.ID = T2.ID;
|
||||||
|
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
-- Derived tables
|
-- Derived tables
|
||||||
|
@ -786,3 +811,24 @@ SELECT X.A AS FOO FROM X AS X GROUP BY X.A = 1;
|
||||||
# execute: false
|
# execute: false
|
||||||
SELECT x.a AS foo FROM x WHERE foo = 1;
|
SELECT x.a AS foo FROM x WHERE foo = 1;
|
||||||
SELECT X.A AS FOO FROM X AS X WHERE X.A = 1;
|
SELECT X.A AS FOO FROM X AS X WHERE X.A = 1;
|
||||||
|
|
||||||
|
|
||||||
|
--------------------------------------
|
||||||
|
-- SEMI / ANTI Joins
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
# title: SEMI JOIN table is excluded from the scope
|
||||||
|
SELECT * FROM x SEMI JOIN y USING (b);
|
||||||
|
SELECT x.a AS a, x.b AS b FROM x AS x SEMI JOIN y AS y ON x.b = y.b;
|
||||||
|
|
||||||
|
# title: ANTI JOIN table is excluded from the scope
|
||||||
|
SELECT * FROM x ANTI JOIN y USING (b);
|
||||||
|
SELECT x.a AS a, x.b AS b FROM x AS x ANTI JOIN y AS y ON x.b = y.b;
|
||||||
|
|
||||||
|
# title: SEMI + normal joins reinclude the table on scope
|
||||||
|
SELECT * FROM x SEMI JOIN y USING (b) JOIN y USING (b);
|
||||||
|
SELECT x.a AS a, COALESCE(x.b, y_2.b) AS b, y_2.c AS c FROM x AS x SEMI JOIN y AS y ON x.b = y.b JOIN y AS y_2 ON x.b = y_2.b;
|
||||||
|
|
||||||
|
# title: ANTI + normal joins reinclude the table on scope
|
||||||
|
SELECT * FROM x ANTI JOIN y USING (b) JOIN y USING (b);
|
||||||
|
SELECT x.a AS a, COALESCE(x.b, y_2.b) AS b, y_2.c AS c FROM x AS x ANTI JOIN y AS y ON x.b = y.b JOIN y AS y_2 ON x.b = y_2.b;
|
||||||
|
|
|
@ -12,3 +12,4 @@ SELECT a, SUM(b) FROM x GROUP BY 3;
|
||||||
SELECT p FROM (SELECT x from xx) y CROSS JOIN yy CROSS JOIN zz
|
SELECT p FROM (SELECT x from xx) y CROSS JOIN yy CROSS JOIN zz
|
||||||
SELECT a FROM (SELECT * FROM x CROSS JOIN y);
|
SELECT a FROM (SELECT * FROM x CROSS JOIN y);
|
||||||
SELECT x FROM tbl AS tbl(a);
|
SELECT x FROM tbl AS tbl(a);
|
||||||
|
SELECT a JOIN b USING (a);
|
||||||
|
|
3
tests/fixtures/optimizer/simplify.sql
vendored
3
tests/fixtures/optimizer/simplify.sql
vendored
|
@ -363,6 +363,9 @@ x * (1 - y);
|
||||||
ANY(t.value);
|
ANY(t.value);
|
||||||
ANY(t.value);
|
ANY(t.value);
|
||||||
|
|
||||||
|
SELECT (ARRAY_AGG(foo))[1];
|
||||||
|
SELECT (ARRAY_AGG(foo))[1];
|
||||||
|
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
-- Literals
|
-- Literals
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
|
|
@ -668,7 +668,7 @@ FROM tbl1""",
|
||||||
)
|
)
|
||||||
self.validate(
|
self.validate(
|
||||||
"WITH A(filter) AS (VALUES 1, 2, 3) SELECT * FROM A WHERE filter >= 2",
|
"WITH A(filter) AS (VALUES 1, 2, 3) SELECT * FROM A WHERE filter >= 2",
|
||||||
"WITH A(filter) AS (VALUES (1), (2), (3)) SELECT * FROM A WHERE filter >= 2",
|
"WITH A(filter) AS (SELECT * FROM (VALUES (1), (2), (3)) AS _values) SELECT * FROM A WHERE filter >= 2",
|
||||||
read="presto",
|
read="presto",
|
||||||
)
|
)
|
||||||
self.validate(
|
self.validate(
|
||||||
|
|
Loading…
Add table
Reference in a new issue