1
0
Fork 0

Merging upstream version 0.3.1.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-03-20 08:26:51 +01:00
parent ef493ccbe5
commit 483c153286
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
14 changed files with 98 additions and 3155 deletions

View file

@ -1,47 +0,0 @@
name: Build and Publish Package
on:
pull_request:
branches:
- main
types:
- closed
jobs:
publish-package:
if: ${{ github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'release/v') }}
runs-on: ubuntu-latest
steps:
- name: Check out the main branch
uses: actions/checkout@v4
with:
ref: main
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.6.1
- name: Configure poetry
run: poetry config --no-interaction pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
- name: Get this package's Version
id: package_version
run: echo "package_version=$(poetry version --short)" >> $GITHUB_OUTPUT
- name: Build package
run: poetry build --no-interaction
- name: Publish package to PyPI
run: poetry publish --no-interaction
- name: Create a Github Release
uses: softprops/action-gh-release@v1
with:
tag_name: v${{ steps.package_version.outputs.package_version }}
target_commitish: main
token: ${{ secrets.GH_RELEASE_TOKEN }}
body_path: CHANGELOG.md
files: |
LICENSE
dist/*harlequin*.whl
dist/*harlequin*.tar.gz

View file

@ -1,57 +0,0 @@
name: Create Release Branch
on:
workflow_dispatch:
inputs:
newVersion:
description: A version number for this release (e.g., "0.1.0")
required: true
jobs:
prepare-release:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
steps:
- name: Check out the main branch
uses: actions/checkout@v4
with:
ref: main
- name: Set up Python 3.10
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.6.1
- name: Create release branch
run: |
git checkout -b release/v${{ github.event.inputs.newVersion }}
git push --set-upstream origin release/v${{ github.event.inputs.newVersion }}
- name: Bump version
run: poetry version ${{ github.event.inputs.newVersion }} --no-interaction
- name: Ensure package can be built
run: poetry build --no-interaction
- name: Update CHANGELOG
uses: thomaseizinger/keep-a-changelog-new-release@v1
with:
version: ${{ github.event.inputs.newVersion }}
- name: Commit Changes
uses: stefanzweifel/git-auto-commit-action@v5
with:
commit_message: Bumps version to ${{ github.event.inputs.newVersion }}
- name: Create pull request into main
uses: thomaseizinger/create-pull-request@1.3.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
head: release/v${{ github.event.inputs.newVersion }}
base: main
title: v${{ github.event.inputs.newVersion }}
body: >
This PR was automatically generated. It bumps the version number
in pyproject.toml and updates CHANGELOG.md. You may have to close
this PR and reopen it to get the required checks to run.

165
.gitignore vendored
View file

@ -1,165 +0,0 @@
# test data
sqlserver_data/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.python-version
.Python
Pipfile
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

View file

@ -1,39 +0,0 @@
# harlequin-odbc CHANGELOG
All notable changes to this project will be documented in this file.
## [Unreleased]
## [0.3.0] - 2025-02-25
- The Data Catalog now displays all databases on the connected server, not just the currently-connected database ([tconbeer/harlequin#415](https://github.com/tconbeer/harlequin/discussions/415)).
- Columns in the Data Catalog are now fetched lazily ([#12](https://github.com/tconbeer/harlequin-odbc/issues/12), [#13](https://github.com/tconbeer/harlequin-odbc/issues/13)).
- Data Catalog items now support basic interactions ([#14](https://github.com/tconbeer/harlequin-odbc/issues/14)).
## [0.2.0] - 2025-01-08
- Drops support for Python 3.8
- Adds support for Python 3.13
- Adds support for Harlequin 2.X
## [0.1.1] - 2024-01-09
### Bug Fixes
- Renames package to use hyphen.
## [0.1.0] - 2024-01-09
### Features
- Adds a basic ODBC adapter.
[Unreleased]: https://github.com/tconbeer/harlequin-odbc/compare/0.3.0...HEAD
[0.3.0]: https://github.com/tconbeer/harlequin-odbc/compare/0.2.0...0.3.0
[0.2.0]: https://github.com/tconbeer/harlequin-odbc/compare/0.1.1...0.2.0
[0.1.1]: https://github.com/tconbeer/harlequin-odbc/compare/0.1.0...0.1.1
[0.1.0]: https://github.com/tconbeer/harlequin-odbc/compare/dbe2dbd1da1930117c1572ca751d9cd9d43928b6...0.1.0

View file

@ -1,24 +0,0 @@
.PHONY: check
check:
ruff format .
ruff check . --fix
mypy
pytest
.PHONY: init
init:
docker-compose up -d
.PHONY: clean
clean:
docker-compose down
.PHONY: serve
serve:
harlequin -P None -a odbc "${ODBC_CONN_STR}"
.PHONY: lint
lint:
ruff format .
ruff check . --fix
mypy

85
PKG-INFO Normal file
View file

@ -0,0 +1,85 @@
Metadata-Version: 2.1
Name: harlequin-odbc
Version: 0.3.1
Summary: A Harlequin adapter for ODBC drivers.
License: MIT
Author: Ted Conbeer
Author-email: tconbeer@users.noreply.github.com
Requires-Python: >=3.9,<3.14
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Requires-Dist: harlequin (>=1.25,<3)
Requires-Dist: pyodbc (>=5.0,<6.0)
Description-Content-Type: text/markdown
# harlequin-odbc
This repo provides the ODBC adapter for Harlequin.
## Installation
`harlequin-odbc` depends on `harlequin`, so installing this package will also install Harlequin.
### Pre-requisites
You will need an ODBC driver manager installed on your OS. Windows has one built-in, but for Unix-based OSes, you will need to download and install one before installing `harlequin-odbc`. You can install unixODBC with `brew install unixodbc` or `sudo apt install unixodbc`. See the [pyodbc docs](https://github.com/mkleehammer/pyodbc/wiki/Install) for more info.
Additionally, you will need to install the ODBC driver for your specific database (e.g., `ODBC Driver 18 for SQL Server` for MS SQL Server). For more information, see the docs for your specific database.
### Using pip
To install this adapter into an activated virtual environment:
```bash
pip install harlequin-odbc
```
### Using poetry
```bash
poetry add harlequin-odbc
```
### Using pipx
If you do not already have Harlequin installed:
```bash
pip install harlequin-odbc
```
If you would like to add the ODBC adapter to an existing Harlequin installation:
```bash
pipx inject harlequin harlequin-odbc
```
### As an Extra
Alternatively, you can install Harlequin with the `odbc` extra:
```bash
pip install harlequin[odbc]
```
```bash
poetry add harlequin[odbc]
```
```bash
pipx install harlequin[odbc]
```
## Usage and Configuration
You can open Harlequin with the ODBC adapter by selecting it with the `-a` option and passing an ODBC connection string:
```bash
harlequin -a odbc 'Driver={ODBC Driver 18 for SQL Server};Server=tcp:harlequin-example.database.windows.net,1433;Database=dev;Uid=harlequin;Pwd=my_secret;Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'
```
The ODBC adapter does not accept other options.
For more information, see the [Harlequin Docs](https://harlequin.sh/docs/odbc/index).

View file

@ -1,14 +0,0 @@
services:
db:
image: mcr.microsoft.com/mssql/server:2019-latest
restart: always
environment:
ACCEPT_EULA: Y
MSSQL_SA_PASSWORD: for-testing
volumes:
- ./sqlserver_data/data:/var/opt/mssql/data
- ./sqlserver_data/log:/var/opt/mssql/log
- ./sqlserver_data/secrets:/var/opt/mssql/secrets
ports:
- 1433:1433

1557
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "harlequin-odbc"
version = "0.3.0"
version = "0.3.1"
description = "A Harlequin adapter for ODBC drivers."
authors = ["Ted Conbeer <tconbeer@users.noreply.github.com>"]
license = "MIT"

View file

@ -148,12 +148,19 @@ class HarlequinOdbcConnection(HarlequinConnection):
cur = self.aux_conn.cursor()
catalog: dict[str, dict[str, list[tuple[str, str]]]] = {}
for db_name, schema_name, rel_name, rel_type, *_ in cur.tables(catalog="%"):
if db_name is None:
continue
if db_name not in catalog:
catalog[db_name] = {schema_name: [(rel_name, rel_type)]}
elif schema_name not in catalog[db_name]:
catalog[db_name][schema_name] = [(rel_name, rel_type)]
else:
catalog[db_name][schema_name].append((rel_name, rel_type))
catalog[db_name] = dict()
if schema_name is None:
continue
if schema_name not in catalog[db_name]:
catalog[db_name][schema_name] = list()
if rel_name is not None:
catalog[db_name][schema_name].append((rel_name, rel_type or ""))
return catalog
def _list_columns_in_relation(

File diff suppressed because it is too large Load diff

View file

@ -1,27 +0,0 @@
from __future__ import annotations
from typing import Generator
import pyodbc
import pytest
from harlequin_odbc.adapter import (
HarlequinOdbcAdapter,
HarlequinOdbcConnection,
)
MASTER_DB_CONN = "Driver={ODBC Driver 18 for SQL Server};Server=tcp:localhost,1433;Database=master;Uid=sa;Pwd={for-testing};Encrypt=yes;TrustServerCertificate=yes;Connection Timeout=5;" # noqa: E501
TEST_DB_CONN = "Driver={ODBC Driver 18 for SQL Server};Server=tcp:localhost,1433;Database=test;Uid=sa;Pwd={for-testing};Encrypt=yes;TrustServerCertificate=yes;Connection Timeout=5;" # noqa: E501
@pytest.fixture
def connection() -> Generator[HarlequinOdbcConnection, None, None]:
master_conn = pyodbc.connect(MASTER_DB_CONN, autocommit=True)
cur = master_conn.cursor()
cur.execute("drop database if exists test;")
cur.execute("create database test;")
cur.close()
master_conn.close()
conn = HarlequinOdbcAdapter(conn_str=(TEST_DB_CONN,)).connect()
yield conn
conn.close()

View file

@ -1,103 +0,0 @@
import os
import sys
from typing import Generator
import pytest
from harlequin.adapter import HarlequinAdapter, HarlequinConnection, HarlequinCursor
from harlequin.catalog import Catalog, CatalogItem
from harlequin.exception import HarlequinConnectionError, HarlequinQueryError
from textual_fastdatatable.backend import create_backend
from harlequin_odbc.adapter import (
HarlequinOdbcAdapter,
HarlequinOdbcConnection,
HarlequinOdbcCursor,
)
if sys.version_info < (3, 10):
from importlib_metadata import entry_points
else:
from importlib.metadata import entry_points
CONN_STR = os.environ["ODBC_CONN_STR"]
def test_plugin_discovery() -> None:
PLUGIN_NAME = "odbc"
eps = entry_points(group="harlequin.adapter")
assert eps[PLUGIN_NAME]
adapter_cls = eps[PLUGIN_NAME].load()
assert issubclass(adapter_cls, HarlequinAdapter)
assert adapter_cls == HarlequinOdbcAdapter
def test_connect() -> None:
conn = HarlequinOdbcAdapter(conn_str=(CONN_STR,)).connect()
assert isinstance(conn, HarlequinConnection)
def test_init_extra_kwargs() -> None:
assert HarlequinOdbcAdapter(conn_str=(CONN_STR,), foo=1, bar="baz").connect()
def test_connect_raises_connection_error() -> None:
with pytest.raises(HarlequinConnectionError):
_ = HarlequinOdbcAdapter(conn_str=("foo",)).connect()
@pytest.fixture
def connection() -> Generator[HarlequinOdbcConnection, None, None]:
conn = HarlequinOdbcAdapter(conn_str=(CONN_STR,)).connect()
conn.execute("drop schema if exists test;")
conn.execute("create schema test;")
yield conn
conn.execute("drop table if exists test.foo;")
conn.execute("drop schema if exists test;")
def test_get_catalog(connection: HarlequinOdbcConnection) -> None:
catalog = connection.get_catalog()
assert isinstance(catalog, Catalog)
assert catalog.items
assert isinstance(catalog.items[0], CatalogItem)
def test_execute_ddl(connection: HarlequinOdbcConnection) -> None:
cur = connection.execute("create table test.foo (a int)")
assert cur is None
def test_execute_select(connection: HarlequinOdbcConnection) -> None:
cur = connection.execute("select 1 as a")
assert isinstance(cur, HarlequinOdbcCursor)
# assert cur.columns() == [("a", "##")]
data = cur.fetchall()
backend = create_backend(data)
assert backend.column_count == 1
assert backend.row_count == 1
def test_execute_select_dupe_cols(connection: HarlequinOdbcConnection) -> None:
cur = connection.execute("select 1 as a, 2 as a, 3 as a")
assert isinstance(cur, HarlequinCursor)
assert len(cur.columns()) == 3
data = cur.fetchall()
backend = create_backend(data)
assert backend.column_count == 3
assert backend.row_count == 1
def test_set_limit(connection: HarlequinOdbcConnection) -> None:
cur = connection.execute("select 1 as a union all select 2 union all select 3")
assert isinstance(cur, HarlequinCursor)
cur = cur.set_limit(2)
assert isinstance(cur, HarlequinCursor)
data = cur.fetchall()
backend = create_backend(data)
assert backend.column_count == 1
assert backend.row_count == 2
def test_execute_raises_query_error(connection: HarlequinOdbcConnection) -> None:
with pytest.raises(HarlequinQueryError):
_ = connection.execute("selec;")

View file

@ -1,99 +0,0 @@
from typing import Generator
import pytest
from harlequin.catalog import InteractiveCatalogItem
from harlequin_odbc.adapter import HarlequinOdbcConnection
from harlequin_odbc.catalog import (
ColumnCatalogItem,
DatabaseCatalogItem,
RelationCatalogItem,
SchemaCatalogItem,
TableCatalogItem,
ViewCatalogItem,
)
@pytest.fixture
def connection_with_objects(
connection: HarlequinOdbcConnection,
) -> Generator[HarlequinOdbcConnection, None, None]:
connection.execute("create schema one")
connection.execute("select 1 as a, '2' as b into one.foo")
connection.execute("select 1 as a, '2' as b into one.bar")
connection.execute("select 1 as a, '2' as b into one.baz")
connection.execute("create schema two")
connection.execute("create view two.qux as select * from one.foo")
connection.execute("create schema three")
yield connection
connection.execute("drop table one.foo")
connection.execute("drop table one.bar")
connection.execute("drop table one.baz")
connection.execute("drop schema one")
connection.execute("drop view two.qux")
connection.execute("drop schema two")
connection.execute("drop schema three")
def test_catalog(connection_with_objects: HarlequinOdbcConnection) -> None:
conn = connection_with_objects
catalog = conn.get_catalog()
# at least two databases, postgres and test
assert len(catalog.items) >= 2
[test_db_item] = filter(lambda item: item.label == "test", catalog.items)
assert isinstance(test_db_item, InteractiveCatalogItem)
assert isinstance(test_db_item, DatabaseCatalogItem)
assert test_db_item.children
assert test_db_item.loaded
schema_items = test_db_item.children
assert all(isinstance(item, SchemaCatalogItem) for item in schema_items)
[schema_one_item] = filter(lambda item: item.label == "one", schema_items)
assert isinstance(schema_one_item, SchemaCatalogItem)
assert schema_one_item.children
assert schema_one_item.loaded
table_items = schema_one_item.children
assert all(isinstance(item, RelationCatalogItem) for item in table_items)
[foo_item] = filter(lambda item: item.label == "foo", table_items)
assert isinstance(foo_item, TableCatalogItem)
assert not foo_item.children
assert not foo_item.loaded
foo_column_items = foo_item.fetch_children()
assert all(isinstance(item, ColumnCatalogItem) for item in foo_column_items)
[schema_two_item] = filter(lambda item: item.label == "two", schema_items)
assert isinstance(schema_two_item, SchemaCatalogItem)
assert schema_two_item.children
assert schema_two_item.loaded
view_items = schema_two_item.children
assert all(isinstance(item, ViewCatalogItem) for item in view_items)
[qux_item] = filter(lambda item: item.label == "qux", view_items)
assert isinstance(qux_item, ViewCatalogItem)
assert not qux_item.children
assert not qux_item.loaded
qux_column_items = qux_item.fetch_children()
assert all(isinstance(item, ColumnCatalogItem) for item in qux_column_items)
assert [item.label for item in foo_column_items] == [
item.label for item in qux_column_items
]
# ensure calling fetch_children on cols doesn't raise
children_items = foo_column_items[0].fetch_children()
assert not children_items
# empty schemas don't appear in the catalog
schema_three_items = list(filter(lambda item: item.label == "three", schema_items))
assert not schema_three_items