1
0
Fork 0

Adding upstream version 1.2+20240521.

Signed-off-by: Daniel Baumann <daniel@debian.org>
This commit is contained in:
Daniel Baumann 2025-02-05 14:24:15 +01:00
parent 6b2864e4b9
commit 8512f66c5a
Signed by: daniel
GPG key ID: FBB4F0E80A80222F
229 changed files with 19561 additions and 0 deletions

2038
src/aristaproto/__init__.py Normal file

File diff suppressed because it is too large Load diff

14
src/aristaproto/_types.py Normal file
View file

@ -0,0 +1,14 @@
from typing import (
TYPE_CHECKING,
TypeVar,
)
if TYPE_CHECKING:
from grpclib._typing import IProtoMessage
from . import Message
# Bound type variable to allow methods to return `self` of subclasses
T = TypeVar("T", bound="Message")
ST = TypeVar("ST", bound="IProtoMessage")

View file

@ -0,0 +1,4 @@
from importlib import metadata
__version__ = metadata.version("aristaproto")

143
src/aristaproto/casing.py Normal file
View file

@ -0,0 +1,143 @@
import keyword
import re
# Word delimiters and symbols that will not be preserved when re-casing.
# language=PythonRegExp
SYMBOLS = "[^a-zA-Z0-9]*"
# Optionally capitalized word.
# language=PythonRegExp
WORD = "[A-Z]*[a-z]*[0-9]*"
# Uppercase word, not followed by lowercase letters.
# language=PythonRegExp
WORD_UPPER = "[A-Z]+(?![a-z])[0-9]*"
def safe_snake_case(value: str) -> str:
"""Snake case a value taking into account Python keywords."""
value = snake_case(value)
value = sanitize_name(value)
return value
def snake_case(value: str, strict: bool = True) -> str:
"""
Join words with an underscore into lowercase and remove symbols.
Parameters
-----------
value: :class:`str`
The value to convert.
strict: :class:`bool`
Whether or not to force single underscores.
Returns
--------
:class:`str`
The value in snake_case.
"""
def substitute_word(symbols: str, word: str, is_start: bool) -> str:
if not word:
return ""
if strict:
delimiter_count = 0 if is_start else 1 # Single underscore if strict.
elif is_start:
delimiter_count = len(symbols)
elif word.isupper() or word.islower():
delimiter_count = max(
1, len(symbols)
) # Preserve all delimiters if not strict.
else:
delimiter_count = len(symbols) + 1 # Extra underscore for leading capital.
return ("_" * delimiter_count) + word.lower()
snake = re.sub(
f"(^)?({SYMBOLS})({WORD_UPPER}|{WORD})",
lambda groups: substitute_word(groups[2], groups[3], groups[1] is not None),
value,
)
return snake
def pascal_case(value: str, strict: bool = True) -> str:
"""
Capitalize each word and remove symbols.
Parameters
-----------
value: :class:`str`
The value to convert.
strict: :class:`bool`
Whether or not to output only alphanumeric characters.
Returns
--------
:class:`str`
The value in PascalCase.
"""
def substitute_word(symbols, word):
if strict:
return word.capitalize() # Remove all delimiters
if word.islower():
delimiter_length = len(symbols[:-1]) # Lose one delimiter
else:
delimiter_length = len(symbols) # Preserve all delimiters
return ("_" * delimiter_length) + word.capitalize()
return re.sub(
f"({SYMBOLS})({WORD_UPPER}|{WORD})",
lambda groups: substitute_word(groups[1], groups[2]),
value,
)
def camel_case(value: str, strict: bool = True) -> str:
"""
Capitalize all words except first and remove symbols.
Parameters
-----------
value: :class:`str`
The value to convert.
strict: :class:`bool`
Whether or not to output only alphanumeric characters.
Returns
--------
:class:`str`
The value in camelCase.
"""
return lowercase_first(pascal_case(value, strict=strict))
def lowercase_first(value: str) -> str:
"""
Lower cases the first character of the value.
Parameters
----------
value: :class:`str`
The value to lower case.
Returns
-------
:class:`str`
The lower cased string.
"""
return value[0:1].lower() + value[1:]
def sanitize_name(value: str) -> str:
# https://www.python.org/dev/peps/pep-0008/#descriptive-naming-styles
if keyword.iskeyword(value):
return f"{value}_"
if not value.isidentifier():
return f"_{value}"
return value

View file

View file

@ -0,0 +1,176 @@
import os
import re
from typing import (
Dict,
List,
Set,
Tuple,
Type,
)
from ..casing import safe_snake_case
from ..lib.google import protobuf as google_protobuf
from .naming import pythonize_class_name
WRAPPER_TYPES: Dict[str, Type] = {
".google.protobuf.DoubleValue": google_protobuf.DoubleValue,
".google.protobuf.FloatValue": google_protobuf.FloatValue,
".google.protobuf.Int32Value": google_protobuf.Int32Value,
".google.protobuf.Int64Value": google_protobuf.Int64Value,
".google.protobuf.UInt32Value": google_protobuf.UInt32Value,
".google.protobuf.UInt64Value": google_protobuf.UInt64Value,
".google.protobuf.BoolValue": google_protobuf.BoolValue,
".google.protobuf.StringValue": google_protobuf.StringValue,
".google.protobuf.BytesValue": google_protobuf.BytesValue,
}
def parse_source_type_name(field_type_name: str) -> Tuple[str, str]:
"""
Split full source type name into package and type name.
E.g. 'root.package.Message' -> ('root.package', 'Message')
'root.Message.SomeEnum' -> ('root', 'Message.SomeEnum')
"""
package_match = re.match(r"^\.?([^A-Z]+)\.(.+)", field_type_name)
if package_match:
package = package_match.group(1)
name = package_match.group(2)
else:
package = ""
name = field_type_name.lstrip(".")
return package, name
def get_type_reference(
*,
package: str,
imports: set,
source_type: str,
unwrap: bool = True,
pydantic: bool = False,
) -> str:
"""
Return a Python type name for a proto type reference. Adds the import if
necessary. Unwraps well known type if required.
"""
if unwrap:
if source_type in WRAPPER_TYPES:
wrapped_type = type(WRAPPER_TYPES[source_type]().value)
return f"Optional[{wrapped_type.__name__}]"
if source_type == ".google.protobuf.Duration":
return "timedelta"
elif source_type == ".google.protobuf.Timestamp":
return "datetime"
source_package, source_type = parse_source_type_name(source_type)
current_package: List[str] = package.split(".") if package else []
py_package: List[str] = source_package.split(".") if source_package else []
py_type: str = pythonize_class_name(source_type)
compiling_google_protobuf = current_package == ["google", "protobuf"]
importing_google_protobuf = py_package == ["google", "protobuf"]
if importing_google_protobuf and not compiling_google_protobuf:
py_package = (
["aristaproto", "lib"] + (["pydantic"] if pydantic else []) + py_package
)
if py_package[:1] == ["aristaproto"]:
return reference_absolute(imports, py_package, py_type)
if py_package == current_package:
return reference_sibling(py_type)
if py_package[: len(current_package)] == current_package:
return reference_descendent(current_package, imports, py_package, py_type)
if current_package[: len(py_package)] == py_package:
return reference_ancestor(current_package, imports, py_package, py_type)
return reference_cousin(current_package, imports, py_package, py_type)
def reference_absolute(imports: Set[str], py_package: List[str], py_type: str) -> str:
"""
Returns a reference to a python type located in the root, i.e. sys.path.
"""
string_import = ".".join(py_package)
string_alias = safe_snake_case(string_import)
imports.add(f"import {string_import} as {string_alias}")
return f'"{string_alias}.{py_type}"'
def reference_sibling(py_type: str) -> str:
"""
Returns a reference to a python type within the same package as the current package.
"""
return f'"{py_type}"'
def reference_descendent(
current_package: List[str], imports: Set[str], py_package: List[str], py_type: str
) -> str:
"""
Returns a reference to a python type in a package that is a descendent of the
current package, and adds the required import that is aliased to avoid name
conflicts.
"""
importing_descendent = py_package[len(current_package) :]
string_from = ".".join(importing_descendent[:-1])
string_import = importing_descendent[-1]
if string_from:
string_alias = "_".join(importing_descendent)
imports.add(f"from .{string_from} import {string_import} as {string_alias}")
return f'"{string_alias}.{py_type}"'
else:
imports.add(f"from . import {string_import}")
return f'"{string_import}.{py_type}"'
def reference_ancestor(
current_package: List[str], imports: Set[str], py_package: List[str], py_type: str
) -> str:
"""
Returns a reference to a python type in a package which is an ancestor to the
current package, and adds the required import that is aliased (if possible) to avoid
name conflicts.
Adds trailing __ to avoid name mangling (python.org/dev/peps/pep-0008/#id34).
"""
distance_up = len(current_package) - len(py_package)
if py_package:
string_import = py_package[-1]
string_alias = f"_{'_' * distance_up}{string_import}__"
string_from = f"..{'.' * distance_up}"
imports.add(f"from {string_from} import {string_import} as {string_alias}")
return f'"{string_alias}.{py_type}"'
else:
string_alias = f"{'_' * distance_up}{py_type}__"
imports.add(f"from .{'.' * distance_up} import {py_type} as {string_alias}")
return f'"{string_alias}"'
def reference_cousin(
current_package: List[str], imports: Set[str], py_package: List[str], py_type: str
) -> str:
"""
Returns a reference to a python type in a package that is not descendent, ancestor
or sibling, and adds the required import that is aliased to avoid name conflicts.
"""
shared_ancestry = os.path.commonprefix([current_package, py_package]) # type: ignore
distance_up = len(current_package) - len(shared_ancestry)
string_from = f".{'.' * distance_up}" + ".".join(
py_package[len(shared_ancestry) : -1]
)
string_import = py_package[-1]
# Add trailing __ to avoid name mangling (python.org/dev/peps/pep-0008/#id34)
string_alias = (
f"{'_' * distance_up}"
+ safe_snake_case(".".join(py_package[len(shared_ancestry) :]))
+ "__"
)
imports.add(f"from {string_from} import {string_import} as {string_alias}")
return f'"{string_alias}.{py_type}"'

View file

@ -0,0 +1,21 @@
from aristaproto import casing
def pythonize_class_name(name: str) -> str:
return casing.pascal_case(name)
def pythonize_field_name(name: str) -> str:
return casing.safe_snake_case(name)
def pythonize_method_name(name: str) -> str:
return casing.safe_snake_case(name)
def pythonize_enum_member_name(name: str, enum_name: str) -> str:
enum_name = casing.snake_case(enum_name).upper()
find = name.find(enum_name)
if find != -1:
name = name[find + len(enum_name) :].strip("_")
return casing.sanitize_name(name)

195
src/aristaproto/enum.py Normal file
View file

@ -0,0 +1,195 @@
from __future__ import annotations
import sys
from enum import (
EnumMeta,
IntEnum,
)
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Dict,
Optional,
Tuple,
)
if TYPE_CHECKING:
from collections.abc import (
Generator,
Mapping,
)
from typing_extensions import (
Never,
Self,
)
def _is_descriptor(obj: object) -> bool:
return (
hasattr(obj, "__get__") or hasattr(obj, "__set__") or hasattr(obj, "__delete__")
)
class EnumType(EnumMeta if TYPE_CHECKING else type):
_value_map_: Mapping[int, Enum]
_member_map_: Mapping[str, Enum]
def __new__(
mcs, name: str, bases: Tuple[type, ...], namespace: Dict[str, Any]
) -> Self:
value_map = {}
member_map = {}
new_mcs = type(
f"{name}Type",
tuple(
dict.fromkeys(
[base.__class__ for base in bases if base.__class__ is not type]
+ [EnumType, type]
)
), # reorder the bases so EnumType and type are last to avoid conflicts
{"_value_map_": value_map, "_member_map_": member_map},
)
members = {
name: value
for name, value in namespace.items()
if not _is_descriptor(value) and not name.startswith("__")
}
cls = type.__new__(
new_mcs,
name,
bases,
{key: value for key, value in namespace.items() if key not in members},
)
# this allows us to disallow member access from other members as
# members become proper class variables
for name, value in members.items():
member = value_map.get(value)
if member is None:
member = cls.__new__(cls, name=name, value=value) # type: ignore
value_map[value] = member
member_map[name] = member
type.__setattr__(new_mcs, name, member)
return cls
if not TYPE_CHECKING:
def __call__(cls, value: int) -> Enum:
try:
return cls._value_map_[value]
except (KeyError, TypeError):
raise ValueError(f"{value!r} is not a valid {cls.__name__}") from None
def __iter__(cls) -> Generator[Enum, None, None]:
yield from cls._member_map_.values()
def __reversed__(cls) -> Generator[Enum, None, None]:
yield from reversed(cls._member_map_.values())
def __getitem__(cls, key: str) -> Enum:
return cls._member_map_[key]
@property
def __members__(cls) -> MappingProxyType[str, Enum]:
return MappingProxyType(cls._member_map_)
def __repr__(cls) -> str:
return f"<enum {cls.__name__!r}>"
def __len__(cls) -> int:
return len(cls._member_map_)
def __setattr__(cls, name: str, value: Any) -> Never:
raise AttributeError(f"{cls.__name__}: cannot reassign Enum members.")
def __delattr__(cls, name: str) -> Never:
raise AttributeError(f"{cls.__name__}: cannot delete Enum members.")
def __contains__(cls, member: object) -> bool:
return isinstance(member, cls) and member.name in cls._member_map_
class Enum(IntEnum if TYPE_CHECKING else int, metaclass=EnumType):
"""
The base class for protobuf enumerations, all generated enumerations will
inherit from this. Emulates `enum.IntEnum`.
"""
name: Optional[str]
value: int
if not TYPE_CHECKING:
def __new__(cls, *, name: Optional[str], value: int) -> Self:
self = super().__new__(cls, value)
super().__setattr__(self, "name", name)
super().__setattr__(self, "value", value)
return self
def __str__(self) -> str:
return self.name or "None"
def __repr__(self) -> str:
return f"{self.__class__.__name__}.{self.name}"
def __setattr__(self, key: str, value: Any) -> Never:
raise AttributeError(
f"{self.__class__.__name__} Cannot reassign a member's attributes."
)
def __delattr__(self, item: Any) -> Never:
raise AttributeError(
f"{self.__class__.__name__} Cannot delete a member's attributes."
)
def __copy__(self) -> Self:
return self
def __deepcopy__(self, memo: Any) -> Self:
return self
@classmethod
def try_value(cls, value: int = 0) -> Self:
"""Return the value which corresponds to the value.
Parameters
-----------
value: :class:`int`
The value of the enum member to get.
Returns
-------
:class:`Enum`
The corresponding member or a new instance of the enum if
``value`` isn't actually a member.
"""
try:
return cls._value_map_[value]
except (KeyError, TypeError):
return cls.__new__(cls, name=None, value=value)
@classmethod
def from_string(cls, name: str) -> Self:
"""Return the value which corresponds to the string name.
Parameters
-----------
name: :class:`str`
The name of the enum member to get.
Raises
-------
:exc:`ValueError`
The member was not found in the Enum.
"""
try:
return cls._member_map_[name]
except KeyError as e:
raise ValueError(f"Unknown value {name} for enum {cls.__name__}") from e

View file

View file

@ -0,0 +1,177 @@
import asyncio
from abc import ABC
from typing import (
TYPE_CHECKING,
AsyncIterable,
AsyncIterator,
Collection,
Iterable,
Mapping,
Optional,
Tuple,
Type,
Union,
)
import grpclib.const
if TYPE_CHECKING:
from grpclib.client import Channel
from grpclib.metadata import Deadline
from .._types import (
ST,
IProtoMessage,
Message,
T,
)
Value = Union[str, bytes]
MetadataLike = Union[Mapping[str, Value], Collection[Tuple[str, Value]]]
MessageSource = Union[Iterable["IProtoMessage"], AsyncIterable["IProtoMessage"]]
class ServiceStub(ABC):
"""
Base class for async gRPC clients.
"""
def __init__(
self,
channel: "Channel",
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional[MetadataLike] = None,
) -> None:
self.channel = channel
self.timeout = timeout
self.deadline = deadline
self.metadata = metadata
def __resolve_request_kwargs(
self,
timeout: Optional[float],
deadline: Optional["Deadline"],
metadata: Optional[MetadataLike],
):
return {
"timeout": self.timeout if timeout is None else timeout,
"deadline": self.deadline if deadline is None else deadline,
"metadata": self.metadata if metadata is None else metadata,
}
async def _unary_unary(
self,
route: str,
request: "IProtoMessage",
response_type: Type["T"],
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional[MetadataLike] = None,
) -> "T":
"""Make a unary request and return the response."""
async with self.channel.request(
route,
grpclib.const.Cardinality.UNARY_UNARY,
type(request),
response_type,
**self.__resolve_request_kwargs(timeout, deadline, metadata),
) as stream:
await stream.send_message(request, end=True)
response = await stream.recv_message()
assert response is not None
return response
async def _unary_stream(
self,
route: str,
request: "IProtoMessage",
response_type: Type["T"],
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional[MetadataLike] = None,
) -> AsyncIterator["T"]:
"""Make a unary request and return the stream response iterator."""
async with self.channel.request(
route,
grpclib.const.Cardinality.UNARY_STREAM,
type(request),
response_type,
**self.__resolve_request_kwargs(timeout, deadline, metadata),
) as stream:
await stream.send_message(request, end=True)
async for message in stream:
yield message
async def _stream_unary(
self,
route: str,
request_iterator: MessageSource,
request_type: Type["IProtoMessage"],
response_type: Type["T"],
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional[MetadataLike] = None,
) -> "T":
"""Make a stream request and return the response."""
async with self.channel.request(
route,
grpclib.const.Cardinality.STREAM_UNARY,
request_type,
response_type,
**self.__resolve_request_kwargs(timeout, deadline, metadata),
) as stream:
await stream.send_request()
await self._send_messages(stream, request_iterator)
response = await stream.recv_message()
assert response is not None
return response
async def _stream_stream(
self,
route: str,
request_iterator: MessageSource,
request_type: Type["IProtoMessage"],
response_type: Type["T"],
*,
timeout: Optional[float] = None,
deadline: Optional["Deadline"] = None,
metadata: Optional[MetadataLike] = None,
) -> AsyncIterator["T"]:
"""
Make a stream request and return an AsyncIterator to iterate over response
messages.
"""
async with self.channel.request(
route,
grpclib.const.Cardinality.STREAM_STREAM,
request_type,
response_type,
**self.__resolve_request_kwargs(timeout, deadline, metadata),
) as stream:
await stream.send_request()
sending_task = asyncio.ensure_future(
self._send_messages(stream, request_iterator)
)
try:
async for response in stream:
yield response
except:
sending_task.cancel()
raise
@staticmethod
async def _send_messages(stream, messages: MessageSource):
if isinstance(messages, AsyncIterable):
async for message in messages:
await stream.send_message(message)
else:
for message in messages:
await stream.send_message(message)
await stream.end()

View file

@ -0,0 +1,33 @@
from abc import ABC
from collections.abc import AsyncIterable
from typing import (
Any,
Callable,
Dict,
)
import grpclib
import grpclib.server
class ServiceBase(ABC):
"""
Base class for async gRPC servers.
"""
async def _call_rpc_handler_server_stream(
self,
handler: Callable,
stream: grpclib.server.Stream,
request: Any,
) -> None:
response_iter = handler(request)
# check if response is actually an AsyncIterator
# this might be false if the method just returns without
# yielding at least once
# in that case, we just interpret it as an empty iterator
if isinstance(response_iter, AsyncIterable):
async for response_message in response_iter:
await stream.send_message(response_message)
else:
response_iter.close()

View file

View file

@ -0,0 +1,193 @@
import asyncio
from typing import (
AsyncIterable,
AsyncIterator,
Iterable,
Optional,
TypeVar,
Union,
)
T = TypeVar("T")
class ChannelClosed(Exception):
"""
An exception raised on an attempt to send through a closed channel
"""
class ChannelDone(Exception):
"""
An exception raised on an attempt to send receive from a channel that is both closed
and empty.
"""
class AsyncChannel(AsyncIterable[T]):
"""
A buffered async channel for sending items between coroutines with FIFO ordering.
This makes decoupled bidirectional steaming gRPC requests easy if used like:
.. code-block:: python
client = GeneratedStub(grpclib_chan)
request_channel = await AsyncChannel()
# We can start be sending all the requests we already have
await request_channel.send_from([RequestObject(...), RequestObject(...)])
async for response in client.rpc_call(request_channel):
# The response iterator will remain active until the connection is closed
...
# More items can be sent at any time
await request_channel.send(RequestObject(...))
...
# The channel must be closed to complete the gRPC connection
request_channel.close()
Items can be sent through the channel by either:
- providing an iterable to the send_from method
- passing them to the send method one at a time
Items can be received from the channel by either:
- iterating over the channel with a for loop to get all items
- calling the receive method to get one item at a time
If the channel is empty then receivers will wait until either an item appears or the
channel is closed.
Once the channel is closed then subsequent attempt to send through the channel will
fail with a ChannelClosed exception.
When th channel is closed and empty then it is done, and further attempts to receive
from it will fail with a ChannelDone exception
If multiple coroutines receive from the channel concurrently, each item sent will be
received by only one of the receivers.
:param source:
An optional iterable will items that should be sent through the channel
immediately.
:param buffer_limit:
Limit the number of items that can be buffered in the channel, A value less than
1 implies no limit. If the channel is full then attempts to send more items will
result in the sender waiting until an item is received from the channel.
:param close:
If set to True then the channel will automatically close after exhausting source
or immediately if no source is provided.
"""
def __init__(self, *, buffer_limit: int = 0, close: bool = False):
self._queue: asyncio.Queue[T] = asyncio.Queue(buffer_limit)
self._closed = False
self._waiting_receivers: int = 0
# Track whether flush has been invoked so it can only happen once
self._flushed = False
def __aiter__(self) -> AsyncIterator[T]:
return self
async def __anext__(self) -> T:
if self.done():
raise StopAsyncIteration
self._waiting_receivers += 1
try:
result = await self._queue.get()
if result is self.__flush:
raise StopAsyncIteration
return result
finally:
self._waiting_receivers -= 1
self._queue.task_done()
def closed(self) -> bool:
"""
Returns True if this channel is closed and no-longer accepting new items
"""
return self._closed
def done(self) -> bool:
"""
Check if this channel is done.
:return: True if this channel is closed and and has been drained of items in
which case any further attempts to receive an item from this channel will raise
a ChannelDone exception.
"""
# After close the channel is not yet done until there is at least one waiting
# receiver per enqueued item.
return self._closed and self._queue.qsize() <= self._waiting_receivers
async def send_from(
self, source: Union[Iterable[T], AsyncIterable[T]], close: bool = False
) -> "AsyncChannel[T]":
"""
Iterates the given [Async]Iterable and sends all the resulting items.
If close is set to True then subsequent send calls will be rejected with a
ChannelClosed exception.
:param source: an iterable of items to send
:param close:
if True then the channel will be closed after the source has been exhausted
"""
if self._closed:
raise ChannelClosed("Cannot send through a closed channel")
if isinstance(source, AsyncIterable):
async for item in source:
await self._queue.put(item)
else:
for item in source:
await self._queue.put(item)
if close:
# Complete the closing process
self.close()
return self
async def send(self, item: T) -> "AsyncChannel[T]":
"""
Send a single item over this channel.
:param item: The item to send
"""
if self._closed:
raise ChannelClosed("Cannot send through a closed channel")
await self._queue.put(item)
return self
async def receive(self) -> Optional[T]:
"""
Returns the next item from this channel when it becomes available,
or None if the channel is closed before another item is sent.
:return: An item from the channel
"""
if self.done():
raise ChannelDone("Cannot receive from a closed channel")
self._waiting_receivers += 1
try:
result = await self._queue.get()
if result is self.__flush:
return None
return result
finally:
self._waiting_receivers -= 1
self._queue.task_done()
def close(self):
"""
Close this channel to new items
"""
self._closed = True
asyncio.ensure_future(self._flush_queue())
async def _flush_queue(self):
"""
To be called after the channel is closed. Pushes a number of self.__flush
objects to the queue to ensure no waiting consumers get deadlocked.
"""
if not self._flushed:
self._flushed = True
deadlocked_receivers = max(0, self._waiting_receivers - self._queue.qsize())
for _ in range(deadlocked_receivers):
await self._queue.put(self.__flush)
# A special signal object for flushing the queue when the channel is closed
__flush = object()

View file

View file

View file

@ -0,0 +1 @@
from aristaproto.lib.std.google.protobuf import *

View file

@ -0,0 +1 @@
from aristaproto.lib.std.google.protobuf.compiler import *

View file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,210 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: google/protobuf/compiler/plugin.proto
# plugin: python-aristaproto
# This file has been @generated
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dataclasses import dataclass
else:
from pydantic.dataclasses import dataclass
from typing import List
import aristaproto
import aristaproto.lib.pydantic.google.protobuf as aristaproto_lib_pydantic_google_protobuf
class CodeGeneratorResponseFeature(aristaproto.Enum):
"""Sync with code_generator.h."""
FEATURE_NONE = 0
FEATURE_PROTO3_OPTIONAL = 1
FEATURE_SUPPORTS_EDITIONS = 2
@dataclass(eq=False, repr=False)
class Version(aristaproto.Message):
"""The version number of protocol compiler."""
major: int = aristaproto.int32_field(1)
minor: int = aristaproto.int32_field(2)
patch: int = aristaproto.int32_field(3)
suffix: str = aristaproto.string_field(4)
"""
A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
be empty for mainline stable releases.
"""
@dataclass(eq=False, repr=False)
class CodeGeneratorRequest(aristaproto.Message):
"""An encoded CodeGeneratorRequest is written to the plugin's stdin."""
file_to_generate: List[str] = aristaproto.string_field(1)
"""
The .proto files that were explicitly listed on the command-line. The
code generator should generate code only for these files. Each file's
descriptor will be included in proto_file, below.
"""
parameter: str = aristaproto.string_field(2)
"""The generator parameter passed on the command-line."""
proto_file: List[
"aristaproto_lib_pydantic_google_protobuf.FileDescriptorProto"
] = aristaproto.message_field(15)
"""
FileDescriptorProtos for all files in files_to_generate and everything
they import. The files will appear in topological order, so each file
appears before any file that imports it.
Note: the files listed in files_to_generate will include runtime-retention
options only, but all other files will include source-retention options.
The source_file_descriptors field below is available in case you need
source-retention options for files_to_generate.
protoc guarantees that all proto_files will be written after
the fields above, even though this is not technically guaranteed by the
protobuf wire format. This theoretically could allow a plugin to stream
in the FileDescriptorProtos and handle them one by one rather than read
the entire set into memory at once. However, as of this writing, this
is not similarly optimized on protoc's end -- it will store all fields in
memory at once before sending them to the plugin.
Type names of fields and extensions in the FileDescriptorProto are always
fully qualified.
"""
source_file_descriptors: List[
"aristaproto_lib_pydantic_google_protobuf.FileDescriptorProto"
] = aristaproto.message_field(17)
"""
File descriptors with all options, including source-retention options.
These descriptors are only provided for the files listed in
files_to_generate.
"""
compiler_version: "Version" = aristaproto.message_field(3)
"""The version number of protocol compiler."""
@dataclass(eq=False, repr=False)
class CodeGeneratorResponse(aristaproto.Message):
"""The plugin writes an encoded CodeGeneratorResponse to stdout."""
error: str = aristaproto.string_field(1)
"""
Error message. If non-empty, code generation failed. The plugin process
should exit with status code zero even if it reports an error in this way.
This should be used to indicate errors in .proto files which prevent the
code generator from generating correct code. Errors which indicate a
problem in protoc itself -- such as the input CodeGeneratorRequest being
unparseable -- should be reported by writing a message to stderr and
exiting with a non-zero status code.
"""
supported_features: int = aristaproto.uint64_field(2)
"""
A bitmask of supported features that the code generator supports.
This is a bitwise "or" of values from the Feature enum.
"""
minimum_edition: int = aristaproto.int32_field(3)
"""
The minimum edition this plugin supports. This will be treated as an
Edition enum, but we want to allow unknown values. It should be specified
according the edition enum value, *not* the edition number. Only takes
effect for plugins that have FEATURE_SUPPORTS_EDITIONS set.
"""
maximum_edition: int = aristaproto.int32_field(4)
"""
The maximum edition this plugin supports. This will be treated as an
Edition enum, but we want to allow unknown values. It should be specified
according the edition enum value, *not* the edition number. Only takes
effect for plugins that have FEATURE_SUPPORTS_EDITIONS set.
"""
file: List["CodeGeneratorResponseFile"] = aristaproto.message_field(15)
@dataclass(eq=False, repr=False)
class CodeGeneratorResponseFile(aristaproto.Message):
"""Represents a single generated file."""
name: str = aristaproto.string_field(1)
"""
The file name, relative to the output directory. The name must not
contain "." or ".." components and must be relative, not be absolute (so,
the file cannot lie outside the output directory). "/" must be used as
the path separator, not "\".
If the name is omitted, the content will be appended to the previous
file. This allows the generator to break large files into small chunks,
and allows the generated text to be streamed back to protoc so that large
files need not reside completely in memory at one time. Note that as of
this writing protoc does not optimize for this -- it will read the entire
CodeGeneratorResponse before writing files to disk.
"""
insertion_point: str = aristaproto.string_field(2)
"""
If non-empty, indicates that the named file should already exist, and the
content here is to be inserted into that file at a defined insertion
point. This feature allows a code generator to extend the output
produced by another code generator. The original generator may provide
insertion points by placing special annotations in the file that look
like:
@@protoc_insertion_point(NAME)
The annotation can have arbitrary text before and after it on the line,
which allows it to be placed in a comment. NAME should be replaced with
an identifier naming the point -- this is what other generators will use
as the insertion_point. Code inserted at this point will be placed
immediately above the line containing the insertion point (thus multiple
insertions to the same point will come out in the order they were added).
The double-@ is intended to make it unlikely that the generated code
could contain things that look like insertion points by accident.
For example, the C++ code generator places the following line in the
.pb.h files that it generates:
// @@protoc_insertion_point(namespace_scope)
This line appears within the scope of the file's package namespace, but
outside of any particular class. Another plugin can then specify the
insertion_point "namespace_scope" to generate additional classes or
other declarations that should be placed in this scope.
Note that if the line containing the insertion point begins with
whitespace, the same whitespace will be added to every line of the
inserted text. This is useful for languages like Python, where
indentation matters. In these languages, the insertion point comment
should be indented the same amount as any inserted code will need to be
in order to work correctly in that context.
The code generator that generates the initial file and the one which
inserts into it must both run as part of a single invocation of protoc.
Code generators are executed in the order in which they appear on the
command line.
If |insertion_point| is present, |name| must also be present.
"""
content: str = aristaproto.string_field(15)
"""The file contents."""
generated_code_info: (
"aristaproto_lib_pydantic_google_protobuf.GeneratedCodeInfo"
) = aristaproto.message_field(16)
"""
Information describing the file content being inserted. If an insertion
point is used, this information will be appropriately offset and inserted
into the code generation metadata for the generated files.
"""
CodeGeneratorRequest.__pydantic_model__.update_forward_refs() # type: ignore
CodeGeneratorResponse.__pydantic_model__.update_forward_refs() # type: ignore
CodeGeneratorResponseFile.__pydantic_model__.update_forward_refs() # type: ignore

View file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,198 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: google/protobuf/compiler/plugin.proto
# plugin: python-aristaproto
# This file has been @generated
from dataclasses import dataclass
from typing import List
import aristaproto
import aristaproto.lib.google.protobuf as aristaproto_lib_google_protobuf
class CodeGeneratorResponseFeature(aristaproto.Enum):
"""Sync with code_generator.h."""
FEATURE_NONE = 0
FEATURE_PROTO3_OPTIONAL = 1
FEATURE_SUPPORTS_EDITIONS = 2
@dataclass(eq=False, repr=False)
class Version(aristaproto.Message):
"""The version number of protocol compiler."""
major: int = aristaproto.int32_field(1)
minor: int = aristaproto.int32_field(2)
patch: int = aristaproto.int32_field(3)
suffix: str = aristaproto.string_field(4)
"""
A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
be empty for mainline stable releases.
"""
@dataclass(eq=False, repr=False)
class CodeGeneratorRequest(aristaproto.Message):
"""An encoded CodeGeneratorRequest is written to the plugin's stdin."""
file_to_generate: List[str] = aristaproto.string_field(1)
"""
The .proto files that were explicitly listed on the command-line. The
code generator should generate code only for these files. Each file's
descriptor will be included in proto_file, below.
"""
parameter: str = aristaproto.string_field(2)
"""The generator parameter passed on the command-line."""
proto_file: List[
"aristaproto_lib_google_protobuf.FileDescriptorProto"
] = aristaproto.message_field(15)
"""
FileDescriptorProtos for all files in files_to_generate and everything
they import. The files will appear in topological order, so each file
appears before any file that imports it.
Note: the files listed in files_to_generate will include runtime-retention
options only, but all other files will include source-retention options.
The source_file_descriptors field below is available in case you need
source-retention options for files_to_generate.
protoc guarantees that all proto_files will be written after
the fields above, even though this is not technically guaranteed by the
protobuf wire format. This theoretically could allow a plugin to stream
in the FileDescriptorProtos and handle them one by one rather than read
the entire set into memory at once. However, as of this writing, this
is not similarly optimized on protoc's end -- it will store all fields in
memory at once before sending them to the plugin.
Type names of fields and extensions in the FileDescriptorProto are always
fully qualified.
"""
source_file_descriptors: List[
"aristaproto_lib_google_protobuf.FileDescriptorProto"
] = aristaproto.message_field(17)
"""
File descriptors with all options, including source-retention options.
These descriptors are only provided for the files listed in
files_to_generate.
"""
compiler_version: "Version" = aristaproto.message_field(3)
"""The version number of protocol compiler."""
@dataclass(eq=False, repr=False)
class CodeGeneratorResponse(aristaproto.Message):
"""The plugin writes an encoded CodeGeneratorResponse to stdout."""
error: str = aristaproto.string_field(1)
"""
Error message. If non-empty, code generation failed. The plugin process
should exit with status code zero even if it reports an error in this way.
This should be used to indicate errors in .proto files which prevent the
code generator from generating correct code. Errors which indicate a
problem in protoc itself -- such as the input CodeGeneratorRequest being
unparseable -- should be reported by writing a message to stderr and
exiting with a non-zero status code.
"""
supported_features: int = aristaproto.uint64_field(2)
"""
A bitmask of supported features that the code generator supports.
This is a bitwise "or" of values from the Feature enum.
"""
minimum_edition: int = aristaproto.int32_field(3)
"""
The minimum edition this plugin supports. This will be treated as an
Edition enum, but we want to allow unknown values. It should be specified
according the edition enum value, *not* the edition number. Only takes
effect for plugins that have FEATURE_SUPPORTS_EDITIONS set.
"""
maximum_edition: int = aristaproto.int32_field(4)
"""
The maximum edition this plugin supports. This will be treated as an
Edition enum, but we want to allow unknown values. It should be specified
according the edition enum value, *not* the edition number. Only takes
effect for plugins that have FEATURE_SUPPORTS_EDITIONS set.
"""
file: List["CodeGeneratorResponseFile"] = aristaproto.message_field(15)
@dataclass(eq=False, repr=False)
class CodeGeneratorResponseFile(aristaproto.Message):
"""Represents a single generated file."""
name: str = aristaproto.string_field(1)
"""
The file name, relative to the output directory. The name must not
contain "." or ".." components and must be relative, not be absolute (so,
the file cannot lie outside the output directory). "/" must be used as
the path separator, not "\".
If the name is omitted, the content will be appended to the previous
file. This allows the generator to break large files into small chunks,
and allows the generated text to be streamed back to protoc so that large
files need not reside completely in memory at one time. Note that as of
this writing protoc does not optimize for this -- it will read the entire
CodeGeneratorResponse before writing files to disk.
"""
insertion_point: str = aristaproto.string_field(2)
"""
If non-empty, indicates that the named file should already exist, and the
content here is to be inserted into that file at a defined insertion
point. This feature allows a code generator to extend the output
produced by another code generator. The original generator may provide
insertion points by placing special annotations in the file that look
like:
@@protoc_insertion_point(NAME)
The annotation can have arbitrary text before and after it on the line,
which allows it to be placed in a comment. NAME should be replaced with
an identifier naming the point -- this is what other generators will use
as the insertion_point. Code inserted at this point will be placed
immediately above the line containing the insertion point (thus multiple
insertions to the same point will come out in the order they were added).
The double-@ is intended to make it unlikely that the generated code
could contain things that look like insertion points by accident.
For example, the C++ code generator places the following line in the
.pb.h files that it generates:
// @@protoc_insertion_point(namespace_scope)
This line appears within the scope of the file's package namespace, but
outside of any particular class. Another plugin can then specify the
insertion_point "namespace_scope" to generate additional classes or
other declarations that should be placed in this scope.
Note that if the line containing the insertion point begins with
whitespace, the same whitespace will be added to every line of the
inserted text. This is useful for languages like Python, where
indentation matters. In these languages, the insertion point comment
should be indented the same amount as any inserted code will need to be
in order to work correctly in that context.
The code generator that generates the initial file and the one which
inserts into it must both run as part of a single invocation of protoc.
Code generators are executed in the order in which they appear on the
command line.
If |insertion_point| is present, |name| must also be present.
"""
content: str = aristaproto.string_field(15)
"""The file contents."""
generated_code_info: "aristaproto_lib_google_protobuf.GeneratedCodeInfo" = (
aristaproto.message_field(16)
)
"""
Information describing the file content being inserted. If an insertion
point is used, this information will be appropriately offset and inserted
into the code generation metadata for the generated files.
"""

View file

@ -0,0 +1 @@
from .main import main

View file

@ -0,0 +1,4 @@
from .main import main
main()

View file

@ -0,0 +1,50 @@
import os.path
try:
# aristaproto[compiler] specific dependencies
import black
import isort.api
import jinja2
except ImportError as err:
print(
"\033[31m"
f"Unable to import `{err.name}` from aristaproto plugin! "
"Please ensure that you've installed aristaproto as "
'`pip install "aristaproto[compiler]"` so that compiler dependencies '
"are included."
"\033[0m"
)
raise SystemExit(1)
from .models import OutputTemplate
def outputfile_compiler(output_file: OutputTemplate) -> str:
templates_folder = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "templates")
)
env = jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(templates_folder),
)
template = env.get_template("template.py.j2")
code = template.render(output_file=output_file)
code = isort.api.sort_code_string(
code=code,
show_diff=False,
py_version=37,
profile="black",
combine_as_imports=True,
lines_after_imports=2,
quiet=True,
force_grid_wrap=2,
known_third_party=["grpclib", "aristaproto"],
)
return black.format_str(
src_contents=code,
mode=black.Mode(),
)

52
src/aristaproto/plugin/main.py Executable file
View file

@ -0,0 +1,52 @@
#!/usr/bin/env python
import os
import sys
from aristaproto.lib.google.protobuf.compiler import (
CodeGeneratorRequest,
CodeGeneratorResponse,
)
from aristaproto.plugin.models import monkey_patch_oneof_index
from aristaproto.plugin.parser import generate_code
def main() -> None:
"""The plugin's main entry point."""
# Read request message from stdin
data = sys.stdin.buffer.read()
# Apply Work around for proto2/3 difference in protoc messages
monkey_patch_oneof_index()
# Parse request
request = CodeGeneratorRequest()
request.parse(data)
dump_file = os.getenv("ARISTAPROTO_DUMP")
if dump_file:
dump_request(dump_file, request)
# Generate code
response = generate_code(request)
# Serialise response message
output = response.SerializeToString()
# Write to stdout
sys.stdout.buffer.write(output)
def dump_request(dump_file: str, request: CodeGeneratorRequest) -> None:
"""
For developers: Supports running plugin.py standalone so its possible to debug it.
Run protoc (or generate.py) with ARISTAPROTO_DUMP="yourfile.bin" to write the request to a file.
Then run plugin.py from your IDE in debugging mode, and redirect stdin to the file.
"""
with open(str(dump_file), "wb") as fh:
sys.stderr.write(f"\033[31mWriting input from protoc to: {dump_file}\033[0m\n")
fh.write(request.SerializeToString())
if __name__ == "__main__":
main()

View file

@ -0,0 +1,851 @@
"""Plugin model dataclasses.
These classes are meant to be an intermediate representation
of protobuf objects. They are used to organize the data collected during parsing.
The general intention is to create a doubly-linked tree-like structure
with the following types of references:
- Downwards references: from message -> fields, from output package -> messages
or from service -> service methods
- Upwards references: from field -> message, message -> package.
- Input/output message references: from a service method to it's corresponding
input/output messages, which may even be in another package.
There are convenience methods to allow climbing up and down this tree, for
example to retrieve the list of all messages that are in the same package as
the current message.
Most of these classes take as inputs:
- proto_obj: A reference to it's corresponding protobuf object as
presented by the protoc plugin.
- parent: a reference to the parent object in the tree.
With this information, the class is able to expose attributes,
such as a pythonized name, that will be calculated from proto_obj.
The instantiation should also attach a reference to the new object
into the corresponding place within it's parent object. For example,
instantiating field `A` with parent message `B` should add a
reference to `A` to `B`'s `fields` attribute.
"""
import builtins
import re
import textwrap
from dataclasses import (
dataclass,
field,
)
from typing import (
Dict,
Iterable,
Iterator,
List,
Optional,
Set,
Type,
Union,
)
import aristaproto
from aristaproto import which_one_of
from aristaproto.casing import sanitize_name
from aristaproto.compile.importing import (
get_type_reference,
parse_source_type_name,
)
from aristaproto.compile.naming import (
pythonize_class_name,
pythonize_field_name,
pythonize_method_name,
)
from aristaproto.lib.google.protobuf import (
DescriptorProto,
EnumDescriptorProto,
Field,
FieldDescriptorProto,
FieldDescriptorProtoLabel,
FieldDescriptorProtoType,
FileDescriptorProto,
MethodDescriptorProto,
)
from aristaproto.lib.google.protobuf.compiler import CodeGeneratorRequest
from ..compile.importing import (
get_type_reference,
parse_source_type_name,
)
from ..compile.naming import (
pythonize_class_name,
pythonize_enum_member_name,
pythonize_field_name,
pythonize_method_name,
)
# Create a unique placeholder to deal with
# https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses
PLACEHOLDER = object()
# Organize proto types into categories
PROTO_FLOAT_TYPES = (
FieldDescriptorProtoType.TYPE_DOUBLE, # 1
FieldDescriptorProtoType.TYPE_FLOAT, # 2
)
PROTO_INT_TYPES = (
FieldDescriptorProtoType.TYPE_INT64, # 3
FieldDescriptorProtoType.TYPE_UINT64, # 4
FieldDescriptorProtoType.TYPE_INT32, # 5
FieldDescriptorProtoType.TYPE_FIXED64, # 6
FieldDescriptorProtoType.TYPE_FIXED32, # 7
FieldDescriptorProtoType.TYPE_UINT32, # 13
FieldDescriptorProtoType.TYPE_SFIXED32, # 15
FieldDescriptorProtoType.TYPE_SFIXED64, # 16
FieldDescriptorProtoType.TYPE_SINT32, # 17
FieldDescriptorProtoType.TYPE_SINT64, # 18
)
PROTO_BOOL_TYPES = (FieldDescriptorProtoType.TYPE_BOOL,) # 8
PROTO_STR_TYPES = (FieldDescriptorProtoType.TYPE_STRING,) # 9
PROTO_BYTES_TYPES = (FieldDescriptorProtoType.TYPE_BYTES,) # 12
PROTO_MESSAGE_TYPES = (
FieldDescriptorProtoType.TYPE_MESSAGE, # 11
FieldDescriptorProtoType.TYPE_ENUM, # 14
)
PROTO_MAP_TYPES = (FieldDescriptorProtoType.TYPE_MESSAGE,) # 11
PROTO_PACKED_TYPES = (
FieldDescriptorProtoType.TYPE_DOUBLE, # 1
FieldDescriptorProtoType.TYPE_FLOAT, # 2
FieldDescriptorProtoType.TYPE_INT64, # 3
FieldDescriptorProtoType.TYPE_UINT64, # 4
FieldDescriptorProtoType.TYPE_INT32, # 5
FieldDescriptorProtoType.TYPE_FIXED64, # 6
FieldDescriptorProtoType.TYPE_FIXED32, # 7
FieldDescriptorProtoType.TYPE_BOOL, # 8
FieldDescriptorProtoType.TYPE_UINT32, # 13
FieldDescriptorProtoType.TYPE_SFIXED32, # 15
FieldDescriptorProtoType.TYPE_SFIXED64, # 16
FieldDescriptorProtoType.TYPE_SINT32, # 17
FieldDescriptorProtoType.TYPE_SINT64, # 18
)
def monkey_patch_oneof_index():
"""
The compiler message types are written for proto2, but we read them as proto3.
For this to work in the case of the oneof_index fields, which depend on being able
to tell whether they were set, we have to treat them as oneof fields. This method
monkey patches the generated classes after the fact to force this behaviour.
"""
object.__setattr__(
FieldDescriptorProto.__dataclass_fields__["oneof_index"].metadata[
"aristaproto"
],
"group",
"oneof_index",
)
object.__setattr__(
Field.__dataclass_fields__["oneof_index"].metadata["aristaproto"],
"group",
"oneof_index",
)
def get_comment(
proto_file: "FileDescriptorProto", path: List[int], indent: int = 4
) -> str:
pad = " " * indent
for sci_loc in proto_file.source_code_info.location:
if list(sci_loc.path) == path and sci_loc.leading_comments:
lines = sci_loc.leading_comments.strip().replace("\t", " ").split("\n")
# This is a field, message, enum, service, or method
if len(lines) == 1 and len(lines[0]) < 79 - indent - 6:
lines[0] = lines[0].strip('"')
# rstrip to remove trailing spaces including whitespaces from empty lines.
return f'{pad}"""{lines[0]}"""'
else:
# rstrip to remove trailing spaces including empty lines.
padded = [f"\n{pad}{line}".rstrip(" ") for line in lines]
joined = "".join(padded)
return f'{pad}"""{joined}\n{pad}"""'
return ""
class ProtoContentBase:
"""Methods common to MessageCompiler, ServiceCompiler and ServiceMethodCompiler."""
source_file: FileDescriptorProto
path: List[int]
comment_indent: int = 4
parent: Union["aristaproto.Message", "OutputTemplate"]
__dataclass_fields__: Dict[str, object]
def __post_init__(self) -> None:
"""Checks that no fake default fields were left as placeholders."""
for field_name, field_val in self.__dataclass_fields__.items():
if field_val is PLACEHOLDER:
raise ValueError(f"`{field_name}` is a required field.")
@property
def output_file(self) -> "OutputTemplate":
current = self
while not isinstance(current, OutputTemplate):
current = current.parent
return current
@property
def request(self) -> "PluginRequestCompiler":
current = self
while not isinstance(current, OutputTemplate):
current = current.parent
return current.parent_request
@property
def comment(self) -> str:
"""Crawl the proto source code and retrieve comments
for this object.
"""
return get_comment(
proto_file=self.source_file, path=self.path, indent=self.comment_indent
)
@dataclass
class PluginRequestCompiler:
plugin_request_obj: CodeGeneratorRequest
output_packages: Dict[str, "OutputTemplate"] = field(default_factory=dict)
@property
def all_messages(self) -> List["MessageCompiler"]:
"""All of the messages in this request.
Returns
-------
List[MessageCompiler]
List of all of the messages in this request.
"""
return [
msg for output in self.output_packages.values() for msg in output.messages
]
@dataclass
class OutputTemplate:
"""Representation of an output .py file.
Each output file corresponds to a .proto input file,
but may need references to other .proto files to be
built.
"""
parent_request: PluginRequestCompiler
package_proto_obj: FileDescriptorProto
input_files: List[str] = field(default_factory=list)
imports: Set[str] = field(default_factory=set)
datetime_imports: Set[str] = field(default_factory=set)
typing_imports: Set[str] = field(default_factory=set)
pydantic_imports: Set[str] = field(default_factory=set)
builtins_import: bool = False
messages: List["MessageCompiler"] = field(default_factory=list)
enums: List["EnumDefinitionCompiler"] = field(default_factory=list)
services: List["ServiceCompiler"] = field(default_factory=list)
imports_type_checking_only: Set[str] = field(default_factory=set)
pydantic_dataclasses: bool = False
output: bool = True
@property
def package(self) -> str:
"""Name of input package.
Returns
-------
str
Name of input package.
"""
return self.package_proto_obj.package
@property
def input_filenames(self) -> Iterable[str]:
"""Names of the input files used to build this output.
Returns
-------
Iterable[str]
Names of the input files used to build this output.
"""
return sorted(f.name for f in self.input_files)
@property
def python_module_imports(self) -> Set[str]:
imports = set()
if any(x for x in self.messages if any(x.deprecated_fields)):
imports.add("warnings")
if self.builtins_import:
imports.add("builtins")
return imports
@dataclass
class MessageCompiler(ProtoContentBase):
"""Representation of a protobuf message."""
source_file: FileDescriptorProto
parent: Union["MessageCompiler", OutputTemplate] = PLACEHOLDER
proto_obj: DescriptorProto = PLACEHOLDER
path: List[int] = PLACEHOLDER
fields: List[Union["FieldCompiler", "MessageCompiler"]] = field(
default_factory=list
)
deprecated: bool = field(default=False, init=False)
builtins_types: Set[str] = field(default_factory=set)
def __post_init__(self) -> None:
# Add message to output file
if isinstance(self.parent, OutputTemplate):
if isinstance(self, EnumDefinitionCompiler):
self.output_file.enums.append(self)
else:
self.output_file.messages.append(self)
self.deprecated = self.proto_obj.options.deprecated
super().__post_init__()
@property
def proto_name(self) -> str:
return self.proto_obj.name
@property
def py_name(self) -> str:
return pythonize_class_name(self.proto_name)
@property
def annotation(self) -> str:
if self.repeated:
return f"List[{self.py_name}]"
return self.py_name
@property
def deprecated_fields(self) -> Iterator[str]:
for f in self.fields:
if f.deprecated:
yield f.py_name
@property
def has_deprecated_fields(self) -> bool:
return any(self.deprecated_fields)
@property
def has_oneof_fields(self) -> bool:
return any(isinstance(field, OneOfFieldCompiler) for field in self.fields)
@property
def has_message_field(self) -> bool:
return any(
(
field.proto_obj.type in PROTO_MESSAGE_TYPES
for field in self.fields
if isinstance(field.proto_obj, FieldDescriptorProto)
)
)
def is_map(
proto_field_obj: FieldDescriptorProto, parent_message: DescriptorProto
) -> bool:
"""True if proto_field_obj is a map, otherwise False."""
if proto_field_obj.type == FieldDescriptorProtoType.TYPE_MESSAGE:
if not hasattr(parent_message, "nested_type"):
return False
# This might be a map...
message_type = proto_field_obj.type_name.split(".").pop().lower()
map_entry = f"{proto_field_obj.name.replace('_', '').lower()}entry"
if message_type == map_entry:
for nested in parent_message.nested_type: # parent message
if (
nested.name.replace("_", "").lower() == map_entry
and nested.options.map_entry
):
return True
return False
def is_oneof(proto_field_obj: FieldDescriptorProto) -> bool:
"""
True if proto_field_obj is a OneOf, otherwise False.
.. warning::
Becuase the message from protoc is defined in proto2, and aristaproto works with
proto3, and interpreting the FieldDescriptorProto.oneof_index field requires
distinguishing between default and unset values (which proto3 doesn't support),
we have to hack the generated FieldDescriptorProto class for this to work.
The hack consists of setting group="oneof_index" in the field metadata,
essentially making oneof_index the sole member of a one_of group, which allows
us to tell whether it was set, via the which_one_of interface.
"""
return (
not proto_field_obj.proto3_optional
and which_one_of(proto_field_obj, "oneof_index")[0] == "oneof_index"
)
@dataclass
class FieldCompiler(MessageCompiler):
parent: MessageCompiler = PLACEHOLDER
proto_obj: FieldDescriptorProto = PLACEHOLDER
def __post_init__(self) -> None:
# Add field to message
self.parent.fields.append(self)
# Check for new imports
self.add_imports_to(self.output_file)
super().__post_init__() # call FieldCompiler-> MessageCompiler __post_init__
def get_field_string(self, indent: int = 4) -> str:
"""Construct string representation of this field as a field."""
name = f"{self.py_name}"
annotations = f": {self.annotation}"
field_args = ", ".join(
([""] + self.aristaproto_field_args) if self.aristaproto_field_args else []
)
aristaproto_field_type = (
f"aristaproto.{self.field_type}_field({self.proto_obj.number}{field_args})"
)
if self.py_name in dir(builtins):
self.parent.builtins_types.add(self.py_name)
return f"{name}{annotations} = {aristaproto_field_type}"
@property
def aristaproto_field_args(self) -> List[str]:
args = []
if self.field_wraps:
args.append(f"wraps={self.field_wraps}")
if self.optional:
args.append(f"optional=True")
return args
@property
def datetime_imports(self) -> Set[str]:
imports = set()
annotation = self.annotation
# FIXME: false positives - e.g. `MyDatetimedelta`
if "timedelta" in annotation:
imports.add("timedelta")
if "datetime" in annotation:
imports.add("datetime")
return imports
@property
def typing_imports(self) -> Set[str]:
imports = set()
annotation = self.annotation
if "Optional[" in annotation:
imports.add("Optional")
if "List[" in annotation:
imports.add("List")
if "Dict[" in annotation:
imports.add("Dict")
return imports
@property
def pydantic_imports(self) -> Set[str]:
return set()
@property
def use_builtins(self) -> bool:
return self.py_type in self.parent.builtins_types or (
self.py_type == self.py_name and self.py_name in dir(builtins)
)
def add_imports_to(self, output_file: OutputTemplate) -> None:
output_file.datetime_imports.update(self.datetime_imports)
output_file.typing_imports.update(self.typing_imports)
output_file.pydantic_imports.update(self.pydantic_imports)
output_file.builtins_import = output_file.builtins_import or self.use_builtins
@property
def field_wraps(self) -> Optional[str]:
"""Returns aristaproto wrapped field type or None."""
match_wrapper = re.match(
r"\.google\.protobuf\.(.+)Value$", self.proto_obj.type_name
)
if match_wrapper:
wrapped_type = "TYPE_" + match_wrapper.group(1).upper()
if hasattr(aristaproto, wrapped_type):
return f"aristaproto.{wrapped_type}"
return None
@property
def repeated(self) -> bool:
return (
self.proto_obj.label == FieldDescriptorProtoLabel.LABEL_REPEATED
and not is_map(self.proto_obj, self.parent)
)
@property
def optional(self) -> bool:
return self.proto_obj.proto3_optional
@property
def mutable(self) -> bool:
"""True if the field is a mutable type, otherwise False."""
return self.annotation.startswith(("List[", "Dict["))
@property
def field_type(self) -> str:
"""String representation of proto field type."""
return (
FieldDescriptorProtoType(self.proto_obj.type)
.name.lower()
.replace("type_", "")
)
@property
def default_value_string(self) -> str:
"""Python representation of the default proto value."""
if self.repeated:
return "[]"
if self.optional:
return "None"
if self.py_type == "int":
return "0"
if self.py_type == "float":
return "0.0"
elif self.py_type == "bool":
return "False"
elif self.py_type == "str":
return '""'
elif self.py_type == "bytes":
return 'b""'
elif self.field_type == "enum":
enum_proto_obj_name = self.proto_obj.type_name.split(".").pop()
enum = next(
e
for e in self.output_file.enums
if e.proto_obj.name == enum_proto_obj_name
)
return enum.default_value_string
else:
# Message type
return "None"
@property
def packed(self) -> bool:
"""True if the wire representation is a packed format."""
return self.repeated and self.proto_obj.type in PROTO_PACKED_TYPES
@property
def py_name(self) -> str:
"""Pythonized name."""
return pythonize_field_name(self.proto_name)
@property
def proto_name(self) -> str:
"""Original protobuf name."""
return self.proto_obj.name
@property
def py_type(self) -> str:
"""String representation of Python type."""
if self.proto_obj.type in PROTO_FLOAT_TYPES:
return "float"
elif self.proto_obj.type in PROTO_INT_TYPES:
return "int"
elif self.proto_obj.type in PROTO_BOOL_TYPES:
return "bool"
elif self.proto_obj.type in PROTO_STR_TYPES:
return "str"
elif self.proto_obj.type in PROTO_BYTES_TYPES:
return "bytes"
elif self.proto_obj.type in PROTO_MESSAGE_TYPES:
# Type referencing another defined Message or a named enum
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.type_name,
pydantic=self.output_file.pydantic_dataclasses,
)
else:
raise NotImplementedError(f"Unknown type {self.proto_obj.type}")
@property
def annotation(self) -> str:
py_type = self.py_type
if self.use_builtins:
py_type = f"builtins.{py_type}"
if self.repeated:
return f"List[{py_type}]"
if self.optional:
return f"Optional[{py_type}]"
return py_type
@dataclass
class OneOfFieldCompiler(FieldCompiler):
@property
def aristaproto_field_args(self) -> List[str]:
args = super().aristaproto_field_args
group = self.parent.proto_obj.oneof_decl[self.proto_obj.oneof_index].name
args.append(f'group="{group}"')
return args
@dataclass
class PydanticOneOfFieldCompiler(OneOfFieldCompiler):
@property
def optional(self) -> bool:
# Force the optional to be True. This will allow the pydantic dataclass
# to validate the object correctly by allowing the field to be let empty.
# We add a pydantic validator later to ensure exactly one field is defined.
return True
@property
def pydantic_imports(self) -> Set[str]:
return {"root_validator"}
@dataclass
class MapEntryCompiler(FieldCompiler):
py_k_type: Type = PLACEHOLDER
py_v_type: Type = PLACEHOLDER
proto_k_type: str = PLACEHOLDER
proto_v_type: str = PLACEHOLDER
def __post_init__(self) -> None:
"""Explore nested types and set k_type and v_type if unset."""
map_entry = f"{self.proto_obj.name.replace('_', '').lower()}entry"
for nested in self.parent.proto_obj.nested_type:
if (
nested.name.replace("_", "").lower() == map_entry
and nested.options.map_entry
):
# Get Python types
self.py_k_type = FieldCompiler(
source_file=self.source_file,
parent=self,
proto_obj=nested.field[0], # key
).py_type
self.py_v_type = FieldCompiler(
source_file=self.source_file,
parent=self,
proto_obj=nested.field[1], # value
).py_type
# Get proto types
self.proto_k_type = FieldDescriptorProtoType(nested.field[0].type).name
self.proto_v_type = FieldDescriptorProtoType(nested.field[1].type).name
super().__post_init__() # call FieldCompiler-> MessageCompiler __post_init__
@property
def aristaproto_field_args(self) -> List[str]:
return [f"aristaproto.{self.proto_k_type}", f"aristaproto.{self.proto_v_type}"]
@property
def field_type(self) -> str:
return "map"
@property
def annotation(self) -> str:
return f"Dict[{self.py_k_type}, {self.py_v_type}]"
@property
def repeated(self) -> bool:
return False # maps cannot be repeated
@dataclass
class EnumDefinitionCompiler(MessageCompiler):
"""Representation of a proto Enum definition."""
proto_obj: EnumDescriptorProto = PLACEHOLDER
entries: List["EnumDefinitionCompiler.EnumEntry"] = PLACEHOLDER
@dataclass(unsafe_hash=True)
class EnumEntry:
"""Representation of an Enum entry."""
name: str
value: int
comment: str
def __post_init__(self) -> None:
# Get entries/allowed values for this Enum
self.entries = [
self.EnumEntry(
name=pythonize_enum_member_name(
entry_proto_value.name, self.proto_obj.name
),
value=entry_proto_value.number,
comment=get_comment(
proto_file=self.source_file, path=self.path + [2, entry_number]
),
)
for entry_number, entry_proto_value in enumerate(self.proto_obj.value)
]
super().__post_init__() # call MessageCompiler __post_init__
@property
def default_value_string(self) -> str:
"""Python representation of the default value for Enums.
As per the spec, this is the first value of the Enum.
"""
return str(self.entries[0].value) # ideally, should ALWAYS be int(0)!
@dataclass
class ServiceCompiler(ProtoContentBase):
parent: OutputTemplate = PLACEHOLDER
proto_obj: DescriptorProto = PLACEHOLDER
path: List[int] = PLACEHOLDER
methods: List["ServiceMethodCompiler"] = field(default_factory=list)
def __post_init__(self) -> None:
# Add service to output file
self.output_file.services.append(self)
self.output_file.typing_imports.add("Dict")
super().__post_init__() # check for unset fields
@property
def proto_name(self) -> str:
return self.proto_obj.name
@property
def py_name(self) -> str:
return pythonize_class_name(self.proto_name)
@dataclass
class ServiceMethodCompiler(ProtoContentBase):
parent: ServiceCompiler
proto_obj: MethodDescriptorProto
path: List[int] = PLACEHOLDER
comment_indent: int = 8
def __post_init__(self) -> None:
# Add method to service
self.parent.methods.append(self)
# Check for imports
if "Optional" in self.py_output_message_type:
self.output_file.typing_imports.add("Optional")
# Check for Async imports
if self.client_streaming:
self.output_file.typing_imports.add("AsyncIterable")
self.output_file.typing_imports.add("Iterable")
self.output_file.typing_imports.add("Union")
# Required by both client and server
if self.client_streaming or self.server_streaming:
self.output_file.typing_imports.add("AsyncIterator")
# add imports required for request arguments timeout, deadline and metadata
self.output_file.typing_imports.add("Optional")
self.output_file.imports_type_checking_only.add("import grpclib.server")
self.output_file.imports_type_checking_only.add(
"from aristaproto.grpc.grpclib_client import MetadataLike"
)
self.output_file.imports_type_checking_only.add(
"from grpclib.metadata import Deadline"
)
super().__post_init__() # check for unset fields
@property
def py_name(self) -> str:
"""Pythonized method name."""
return pythonize_method_name(self.proto_obj.name)
@property
def proto_name(self) -> str:
"""Original protobuf name."""
return self.proto_obj.name
@property
def route(self) -> str:
package_part = (
f"{self.output_file.package}." if self.output_file.package else ""
)
return f"/{package_part}{self.parent.proto_name}/{self.proto_name}"
@property
def py_input_message(self) -> Optional[MessageCompiler]:
"""Find the input message object.
Returns
-------
Optional[MessageCompiler]
Method instance representing the input message.
If not input message could be found or there are no
input messages, None is returned.
"""
package, name = parse_source_type_name(self.proto_obj.input_type)
# Nested types are currently flattened without dots.
# Todo: keep a fully quantified name in types, that is
# comparable with method.input_type
for msg in self.request.all_messages:
if (
msg.py_name == pythonize_class_name(name.replace(".", ""))
and msg.output_file.package == package
):
return msg
return None
@property
def py_input_message_type(self) -> str:
"""String representation of the Python type corresponding to the
input message.
Returns
-------
str
String representation of the Python type corresponding to the input message.
"""
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.input_type,
unwrap=False,
pydantic=self.output_file.pydantic_dataclasses,
).strip('"')
@property
def py_input_message_param(self) -> str:
"""Param name corresponding to py_input_message_type.
Returns
-------
str
Param name corresponding to py_input_message_type.
"""
return pythonize_field_name(self.py_input_message_type)
@property
def py_output_message_type(self) -> str:
"""String representation of the Python type corresponding to the
output message.
Returns
-------
str
String representation of the Python type corresponding to the output message.
"""
return get_type_reference(
package=self.output_file.package,
imports=self.output_file.imports,
source_type=self.proto_obj.output_type,
unwrap=False,
pydantic=self.output_file.pydantic_dataclasses,
).strip('"')
@property
def client_streaming(self) -> bool:
return self.proto_obj.client_streaming
@property
def server_streaming(self) -> bool:
return self.proto_obj.server_streaming

View file

@ -0,0 +1,221 @@
import pathlib
import sys
from typing import (
Generator,
List,
Set,
Tuple,
Union,
)
from aristaproto.lib.google.protobuf import (
DescriptorProto,
EnumDescriptorProto,
FieldDescriptorProto,
FileDescriptorProto,
ServiceDescriptorProto,
)
from aristaproto.lib.google.protobuf.compiler import (
CodeGeneratorRequest,
CodeGeneratorResponse,
CodeGeneratorResponseFeature,
CodeGeneratorResponseFile,
)
from .compiler import outputfile_compiler
from .models import (
EnumDefinitionCompiler,
FieldCompiler,
MapEntryCompiler,
MessageCompiler,
OneOfFieldCompiler,
OutputTemplate,
PluginRequestCompiler,
PydanticOneOfFieldCompiler,
ServiceCompiler,
ServiceMethodCompiler,
is_map,
is_oneof,
)
def traverse(
proto_file: FileDescriptorProto,
) -> Generator[
Tuple[Union[EnumDescriptorProto, DescriptorProto], List[int]], None, None
]:
# Todo: Keep information about nested hierarchy
def _traverse(
path: List[int],
items: Union[List[EnumDescriptorProto], List[DescriptorProto]],
prefix: str = "",
) -> Generator[
Tuple[Union[EnumDescriptorProto, DescriptorProto], List[int]], None, None
]:
for i, item in enumerate(items):
# Adjust the name since we flatten the hierarchy.
# Todo: don't change the name, but include full name in returned tuple
item.name = next_prefix = f"{prefix}_{item.name}"
yield item, [*path, i]
if isinstance(item, DescriptorProto):
# Get nested types.
yield from _traverse([*path, i, 4], item.enum_type, next_prefix)
yield from _traverse([*path, i, 3], item.nested_type, next_prefix)
yield from _traverse([5], proto_file.enum_type)
yield from _traverse([4], proto_file.message_type)
def generate_code(request: CodeGeneratorRequest) -> CodeGeneratorResponse:
response = CodeGeneratorResponse()
plugin_options = request.parameter.split(",") if request.parameter else []
response.supported_features = CodeGeneratorResponseFeature.FEATURE_PROTO3_OPTIONAL
request_data = PluginRequestCompiler(plugin_request_obj=request)
# Gather output packages
for proto_file in request.proto_file:
output_package_name = proto_file.package
if output_package_name not in request_data.output_packages:
# Create a new output if there is no output for this package
request_data.output_packages[output_package_name] = OutputTemplate(
parent_request=request_data, package_proto_obj=proto_file
)
# Add this input file to the output corresponding to this package
request_data.output_packages[output_package_name].input_files.append(proto_file)
if (
proto_file.package == "google.protobuf"
and "INCLUDE_GOOGLE" not in plugin_options
):
# If not INCLUDE_GOOGLE,
# skip outputting Google's well-known types
request_data.output_packages[output_package_name].output = False
if "pydantic_dataclasses" in plugin_options:
request_data.output_packages[
output_package_name
].pydantic_dataclasses = True
# Read Messages and Enums
# We need to read Messages before Services in so that we can
# get the references to input/output messages for each service
for output_package_name, output_package in request_data.output_packages.items():
for proto_input_file in output_package.input_files:
for item, path in traverse(proto_input_file):
read_protobuf_type(
source_file=proto_input_file,
item=item,
path=path,
output_package=output_package,
)
# Read Services
for output_package_name, output_package in request_data.output_packages.items():
for proto_input_file in output_package.input_files:
for index, service in enumerate(proto_input_file.service):
read_protobuf_service(service, index, output_package)
# Generate output files
output_paths: Set[pathlib.Path] = set()
for output_package_name, output_package in request_data.output_packages.items():
if not output_package.output:
continue
# Add files to the response object
output_path = pathlib.Path(*output_package_name.split("."), "__init__.py")
output_paths.add(output_path)
response.file.append(
CodeGeneratorResponseFile(
name=str(output_path),
# Render and then format the output file
content=outputfile_compiler(output_file=output_package),
)
)
# Make each output directory a package with __init__ file
init_files = {
directory.joinpath("__init__.py")
for path in output_paths
for directory in path.parents
if not directory.joinpath("__init__.py").exists()
} - output_paths
for init_file in init_files:
response.file.append(CodeGeneratorResponseFile(name=str(init_file)))
for output_package_name in sorted(output_paths.union(init_files)):
print(f"Writing {output_package_name}", file=sys.stderr)
return response
def _make_one_of_field_compiler(
output_package: OutputTemplate,
source_file: "FileDescriptorProto",
parent: MessageCompiler,
proto_obj: "FieldDescriptorProto",
path: List[int],
) -> FieldCompiler:
pydantic = output_package.pydantic_dataclasses
Cls = PydanticOneOfFieldCompiler if pydantic else OneOfFieldCompiler
return Cls(
source_file=source_file,
parent=parent,
proto_obj=proto_obj,
path=path,
)
def read_protobuf_type(
item: DescriptorProto,
path: List[int],
source_file: "FileDescriptorProto",
output_package: OutputTemplate,
) -> None:
if isinstance(item, DescriptorProto):
if item.options.map_entry:
# Skip generated map entry messages since we just use dicts
return
# Process Message
message_data = MessageCompiler(
source_file=source_file, parent=output_package, proto_obj=item, path=path
)
for index, field in enumerate(item.field):
if is_map(field, item):
MapEntryCompiler(
source_file=source_file,
parent=message_data,
proto_obj=field,
path=path + [2, index],
)
elif is_oneof(field):
_make_one_of_field_compiler(
output_package, source_file, message_data, field, path + [2, index]
)
else:
FieldCompiler(
source_file=source_file,
parent=message_data,
proto_obj=field,
path=path + [2, index],
)
elif isinstance(item, EnumDescriptorProto):
# Enum
EnumDefinitionCompiler(
source_file=source_file, parent=output_package, proto_obj=item, path=path
)
def read_protobuf_service(
service: ServiceDescriptorProto, index: int, output_package: OutputTemplate
) -> None:
service_data = ServiceCompiler(
parent=output_package, proto_obj=service, path=[6, index]
)
for j, method in enumerate(service.method):
ServiceMethodCompiler(
parent=service_data, proto_obj=method, path=[6, index, 2, j]
)

View file

@ -0,0 +1,2 @@
@SET plugin_dir=%~dp0
@python -m %plugin_dir% %*

0
src/aristaproto/py.typed Normal file
View file

View file

@ -0,0 +1,257 @@
# Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: {{ ', '.join(output_file.input_filenames) }}
# plugin: python-aristaproto
# This file has been @generated
{% for i in output_file.python_module_imports|sort %}
import {{ i }}
{% endfor %}
{% if output_file.pydantic_dataclasses %}
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from dataclasses import dataclass
else:
from pydantic.dataclasses import dataclass
{%- else -%}
from dataclasses import dataclass
{% endif %}
{% if output_file.datetime_imports %}
from datetime import {% for i in output_file.datetime_imports|sort %}{{ i }}{% if not loop.last %}, {% endif %}{% endfor %}
{% endif%}
{% if output_file.typing_imports %}
from typing import {% for i in output_file.typing_imports|sort %}{{ i }}{% if not loop.last %}, {% endif %}{% endfor %}
{% endif %}
{% if output_file.pydantic_imports %}
from pydantic import {% for i in output_file.pydantic_imports|sort %}{{ i }}{% if not loop.last %}, {% endif %}{% endfor %}
{% endif %}
import aristaproto
{% if output_file.services %}
from aristaproto.grpc.grpclib_server import ServiceBase
import grpclib
{% endif %}
{% for i in output_file.imports|sort %}
{{ i }}
{% endfor %}
{% if output_file.imports_type_checking_only %}
from typing import TYPE_CHECKING
if TYPE_CHECKING:
{% for i in output_file.imports_type_checking_only|sort %} {{ i }}
{% endfor %}
{% endif %}
{% if output_file.enums %}{% for enum in output_file.enums %}
class {{ enum.py_name }}(aristaproto.Enum):
{% if enum.comment %}
{{ enum.comment }}
{% endif %}
{% for entry in enum.entries %}
{{ entry.name }} = {{ entry.value }}
{% if entry.comment %}
{{ entry.comment }}
{% endif %}
{% endfor %}
{% endfor %}
{% endif %}
{% for message in output_file.messages %}
@dataclass(eq=False, repr=False)
class {{ message.py_name }}(aristaproto.Message):
{% if message.comment %}
{{ message.comment }}
{% endif %}
{% for field in message.fields %}
{{ field.get_field_string() }}
{% if field.comment %}
{{ field.comment }}
{% endif %}
{% endfor %}
{% if not message.fields %}
pass
{% endif %}
{% if message.deprecated or message.has_deprecated_fields %}
def __post_init__(self) -> None:
{% if message.deprecated %}
warnings.warn("{{ message.py_name }} is deprecated", DeprecationWarning)
{% endif %}
super().__post_init__()
{% for field in message.deprecated_fields %}
if self.is_set("{{ field }}"):
warnings.warn("{{ message.py_name }}.{{ field }} is deprecated", DeprecationWarning)
{% endfor %}
{% endif %}
{% if output_file.pydantic_dataclasses and message.has_oneof_fields %}
@root_validator()
def check_oneof(cls, values):
return cls._validate_field_groups(values)
{% endif %}
{% endfor %}
{% for service in output_file.services %}
class {{ service.py_name }}Stub(aristaproto.ServiceStub):
{% if service.comment %}
{{ service.comment }}
{% elif not service.methods %}
pass
{% endif %}
{% for method in service.methods %}
async def {{ method.py_name }}(self
{%- if not method.client_streaming -%}
{%- if method.py_input_message -%}, {{ method.py_input_message_param }}: "{{ method.py_input_message_type }}"{%- endif -%}
{%- else -%}
{# Client streaming: need a request iterator instead #}
, {{ method.py_input_message_param }}_iterator: Union[AsyncIterable["{{ method.py_input_message_type }}"], Iterable["{{ method.py_input_message_type }}"]]
{%- endif -%}
,
*
, timeout: Optional[float] = None
, deadline: Optional["Deadline"] = None
, metadata: Optional["MetadataLike"] = None
) -> {% if method.server_streaming %}AsyncIterator["{{ method.py_output_message_type }}"]{% else %}"{{ method.py_output_message_type }}"{% endif %}:
{% if method.comment %}
{{ method.comment }}
{% endif %}
{% if method.server_streaming %}
{% if method.client_streaming %}
async for response in self._stream_stream(
"{{ method.route }}",
{{ method.py_input_message_param }}_iterator,
{{ method.py_input_message_type }},
{{ method.py_output_message_type.strip('"') }},
timeout=timeout,
deadline=deadline,
metadata=metadata,
):
yield response
{% else %}{# i.e. not client streaming #}
async for response in self._unary_stream(
"{{ method.route }}",
{{ method.py_input_message_param }},
{{ method.py_output_message_type.strip('"') }},
timeout=timeout,
deadline=deadline,
metadata=metadata,
):
yield response
{% endif %}{# if client streaming #}
{% else %}{# i.e. not server streaming #}
{% if method.client_streaming %}
return await self._stream_unary(
"{{ method.route }}",
{{ method.py_input_message_param }}_iterator,
{{ method.py_input_message_type }},
{{ method.py_output_message_type.strip('"') }},
timeout=timeout,
deadline=deadline,
metadata=metadata,
)
{% else %}{# i.e. not client streaming #}
return await self._unary_unary(
"{{ method.route }}",
{{ method.py_input_message_param }},
{{ method.py_output_message_type.strip('"') }},
timeout=timeout,
deadline=deadline,
metadata=metadata,
)
{% endif %}{# client streaming #}
{% endif %}
{% endfor %}
{% endfor %}
{% for service in output_file.services %}
class {{ service.py_name }}Base(ServiceBase):
{% if service.comment %}
{{ service.comment }}
{% endif %}
{% for method in service.methods %}
async def {{ method.py_name }}(self
{%- if not method.client_streaming -%}
{%- if method.py_input_message -%}, {{ method.py_input_message_param }}: "{{ method.py_input_message_type }}"{%- endif -%}
{%- else -%}
{# Client streaming: need a request iterator instead #}
, {{ method.py_input_message_param }}_iterator: AsyncIterator["{{ method.py_input_message_type }}"]
{%- endif -%}
) -> {% if method.server_streaming %}AsyncIterator["{{ method.py_output_message_type }}"]{% else %}"{{ method.py_output_message_type }}"{% endif %}:
{% if method.comment %}
{{ method.comment }}
{% endif %}
raise grpclib.GRPCError(grpclib.const.Status.UNIMPLEMENTED)
{% if method.server_streaming %}
{# Commented out to avoid unreachable code. #}
{# yield {{ method.py_output_message_type }}() #}
{% endif %}
{% endfor %}
{% for method in service.methods %}
async def __rpc_{{ method.py_name }}(self, stream: "grpclib.server.Stream[{{ method.py_input_message_type }}, {{ method.py_output_message_type }}]") -> None:
{% if not method.client_streaming %}
request = await stream.recv_message()
{% else %}
request = stream.__aiter__()
{% endif %}
{% if not method.server_streaming %}
response = await self.{{ method.py_name }}(request)
await stream.send_message(response)
{% else %}
await self._call_rpc_handler_server_stream(
self.{{ method.py_name }},
stream,
request,
)
{% endif %}
{% endfor %}
def __mapping__(self) -> Dict[str, grpclib.const.Handler]:
return {
{% for method in service.methods %}
"{{ method.route }}": grpclib.const.Handler(
self.__rpc_{{ method.py_name }},
{% if not method.client_streaming and not method.server_streaming %}
grpclib.const.Cardinality.UNARY_UNARY,
{% elif not method.client_streaming and method.server_streaming %}
grpclib.const.Cardinality.UNARY_STREAM,
{% elif method.client_streaming and not method.server_streaming %}
grpclib.const.Cardinality.STREAM_UNARY,
{% else %}
grpclib.const.Cardinality.STREAM_STREAM,
{% endif %}
{{ method.py_input_message_type }},
{{ method.py_output_message_type }},
),
{% endfor %}
}
{% endfor %}
{% if output_file.pydantic_dataclasses %}
{% for message in output_file.messages %}
{% if message.has_message_field %}
{{ message.py_name }}.__pydantic_model__.update_forward_refs() # type: ignore
{% endif %}
{% endfor %}
{% endif %}

56
src/aristaproto/utils.py Normal file
View file

@ -0,0 +1,56 @@
from __future__ import annotations
from typing import (
Any,
Callable,
Generic,
Optional,
Type,
TypeVar,
)
from typing_extensions import (
Concatenate,
ParamSpec,
Self,
)
SelfT = TypeVar("SelfT")
P = ParamSpec("P")
HybridT = TypeVar("HybridT", covariant=True)
class hybridmethod(Generic[SelfT, P, HybridT]):
def __init__(
self,
func: Callable[
Concatenate[type[SelfT], P], HybridT
], # Must be the classmethod version
):
self.cls_func = func
self.__doc__ = func.__doc__
def instancemethod(self, func: Callable[Concatenate[SelfT, P], HybridT]) -> Self:
self.instance_func = func
return self
def __get__(
self, instance: Optional[SelfT], owner: Type[SelfT]
) -> Callable[P, HybridT]:
if instance is None or self.instance_func is None:
# either bound to the class, or no instance method available
return self.cls_func.__get__(owner, None)
return self.instance_func.__get__(instance, owner)
T_co = TypeVar("T_co")
TT_co = TypeVar("TT_co", bound="type[Any]")
class classproperty(Generic[TT_co, T_co]):
def __init__(self, func: Callable[[TT_co], T_co]):
self.__func__ = func
def __get__(self, instance: Any, type: TT_co) -> T_co:
return self.__func__(type)