Skip to content

waku

WakuApplication

WakuApplication(
    *, container, registry, lifespan, extension_registry
)
Source code in src/waku/application.py
def __init__(
    self,
    *,
    container: AsyncContainer,
    registry: ModuleRegistry,
    lifespan: Sequence[LifespanFunc | LifespanWrapper],
    extension_registry: ExtensionRegistry,
) -> None:
    self._container = container
    self._registry = registry
    self._lifespan = tuple(
        LifespanWrapper(lifespan_func) if not isinstance(lifespan_func, LifespanWrapper) else lifespan_func
        for lifespan_func in lifespan
    )
    self._extension_registry = extension_registry

    self._exit_stack = AsyncExitStack()
    self._initialized = False

container property

container

registry property

registry

initialize async

initialize()
Source code in src/waku/application.py
async def initialize(self) -> None:
    if self._initialized:
        return
    await self._call_on_init_extensions()
    self._initialized = True
    await self._call_after_init_extensions()

close async

close()
Source code in src/waku/application.py
async def close(self) -> None:
    if not self._initialized:
        return
    await self._call_on_shutdown_extensions()
    self._initialized = False

WakuFactory

WakuFactory(
    root_module_type,
    /,
    context=None,
    lifespan=(),
    extensions=DEFAULT_EXTENSIONS,
    container_config=None,
)
Source code in src/waku/factory.py
def __init__(
    self,
    root_module_type: ModuleType,
    /,
    context: dict[Any, Any] | None = None,
    lifespan: Sequence[LifespanFunc] = (),
    extensions: Sequence[ApplicationExtension] = DEFAULT_EXTENSIONS,
    container_config: ContainerConfig | None = None,
) -> None:
    self._root_module_type = root_module_type

    self._context = context
    self._lifespan = lifespan
    self._extensions = extensions
    self._container_config = container_config or ContainerConfig()

create

create()
Source code in src/waku/factory.py
def create(self) -> WakuApplication:
    registry = ModuleRegistryBuilder(
        self._root_module_type,
        context=self._context,
        app_extensions=self._extensions,
    ).build()

    container = self._build_container(registry.providers)
    return WakuApplication(
        container=container,
        registry=registry,
        lifespan=self._lifespan,
        extension_registry=self._build_extension_registry(registry.modules),
    )

DynamicModule dataclass

DynamicModule(
    *,
    providers=list(),
    imports=list(),
    exports=list(),
    extensions=list(),
    is_global=False,
    id=uuid4(),
    parent_module,
)

Bases: ModuleMetadata

providers class-attribute instance-attribute

providers = field(default_factory=list)

List of providers for dependency injection.

imports class-attribute instance-attribute

imports = field(default_factory=list)

List of modules imported by this module.

exports class-attribute instance-attribute

exports = field(default_factory=list)

List of types or modules exported by this module.

extensions class-attribute instance-attribute

extensions = field(default_factory=list)

List of module extensions for lifecycle hooks.

is_global class-attribute instance-attribute

is_global = False

Whether this module is global or not.

id class-attribute instance-attribute

id = field(default_factory=uuid4)

parent_module instance-attribute

parent_module

Module

Module(module_type, metadata)
Source code in src/waku/modules/_module.py
def __init__(self, module_type: ModuleType, metadata: ModuleMetadata) -> None:
    self.id: Final[UUID] = metadata.id
    self.target: Final[ModuleType] = module_type

    self.providers: Final[Sequence[Provider]] = metadata.providers
    self.imports: Final[Sequence[ModuleType | DynamicModule]] = metadata.imports
    self.exports: Final[Sequence[type[object] | ModuleType | DynamicModule]] = metadata.exports
    self.extensions: Final[Sequence[ModuleExtension]] = metadata.extensions
    self.is_global: Final[bool] = metadata.is_global

    self._provider: BaseProvider | None = None

id instance-attribute

id = id

target instance-attribute

target = module_type

providers instance-attribute

providers = providers

imports instance-attribute

imports = imports

exports instance-attribute

exports = exports

extensions instance-attribute

extensions = extensions

is_global instance-attribute

is_global = is_global

name property

name

provider property

provider

create_provider

create_provider()
Source code in src/waku/modules/_module.py
def create_provider(self) -> BaseProvider:
    cls = cast('type[_ModuleProvider]', type(f'{self.name}Provider', (_ModuleProvider,), {}))
    self._provider = cls(self.providers)
    return self._provider

module

module(
    *,
    providers=(),
    imports=(),
    exports=(),
    extensions=(),
    is_global=False,
)

Decorator to define a module.

PARAMETER DESCRIPTION
providers

Sequence of providers for dependency injection.

TYPE: Sequence[Provider] DEFAULT: ()

imports

Sequence of modules imported by this module.

TYPE: Sequence[ModuleType | DynamicModule] DEFAULT: ()

exports

Sequence of types or modules exported by this module.

TYPE: Sequence[type[object] | ModuleType | DynamicModule] DEFAULT: ()

extensions

Sequence of module extensions for lifecycle hooks.

TYPE: Sequence[ModuleExtension] DEFAULT: ()

is_global

Whether this module is global or not.

TYPE: bool DEFAULT: False

Source code in src/waku/modules/_metadata.py
def module(
    *,
    providers: Sequence[Provider] = (),
    imports: Sequence[ModuleType | DynamicModule] = (),
    exports: Sequence[type[object] | ModuleType | DynamicModule] = (),
    extensions: Sequence[ModuleExtension] = (),
    is_global: bool = False,
) -> Callable[[type[_T]], type[_T]]:
    """Decorator to define a module.

    Args:
        providers: Sequence of providers for dependency injection.
        imports: Sequence of modules imported by this module.
        exports: Sequence of types or modules exported by this module.
        extensions: Sequence of module extensions for lifecycle hooks.
        is_global: Whether this module is global or not.
    """

    def decorator(cls: type[_T]) -> type[_T]:
        metadata = ModuleMetadata(
            providers=list(providers),
            imports=list(imports),
            exports=list(exports),
            extensions=list(extensions),
            is_global=is_global,
        )
        for extension in metadata.extensions:
            if isinstance(extension, OnModuleConfigure):
                extension.on_module_configure(metadata)

        setattr(cls, _MODULE_METADATA_KEY, metadata)
        return cls

    return decorator

application

WakuApplication

WakuApplication(
    *, container, registry, lifespan, extension_registry
)
Source code in src/waku/application.py
def __init__(
    self,
    *,
    container: AsyncContainer,
    registry: ModuleRegistry,
    lifespan: Sequence[LifespanFunc | LifespanWrapper],
    extension_registry: ExtensionRegistry,
) -> None:
    self._container = container
    self._registry = registry
    self._lifespan = tuple(
        LifespanWrapper(lifespan_func) if not isinstance(lifespan_func, LifespanWrapper) else lifespan_func
        for lifespan_func in lifespan
    )
    self._extension_registry = extension_registry

    self._exit_stack = AsyncExitStack()
    self._initialized = False

container property

container

registry property

registry

initialize async

initialize()
Source code in src/waku/application.py
async def initialize(self) -> None:
    if self._initialized:
        return
    await self._call_on_init_extensions()
    self._initialized = True
    await self._call_after_init_extensions()

close async

close()
Source code in src/waku/application.py
async def close(self) -> None:
    if not self._initialized:
        return
    await self._call_on_shutdown_extensions()
    self._initialized = False

di

activator

activator(fn, *markers)

Create a Provider with an activator for simple cases.

PARAMETER DESCRIPTION
fn

Callable that returns bool to determine marker activation.

TYPE: Callable[..., bool]

*markers

Marker instances or types to activate.

TYPE: Any DEFAULT: ()

RETURNS DESCRIPTION
Provider

Provider with the activator registered.

Source code in src/waku/di/_providers.py
def activator(fn: Callable[..., bool], *markers: Any) -> Provider:
    """Create a Provider with an activator for simple cases.

    Args:
        fn: Callable that returns bool to determine marker activation.
        *markers: Marker instances or types to activate.

    Returns:
        Provider with the activator registered.
    """
    p = Provider()
    p.activate(fn, *markers)
    return p

contextual

contextual(provided_type, *, scope=REQUEST)

Provide a dependency from the current context (e.g., app/request).

PARAMETER DESCRIPTION
provided_type

The type to resolve from context.

TYPE: Any

scope

Scope of the context variable (default: Scope.REQUEST).

TYPE: Scope DEFAULT: REQUEST

RETURNS DESCRIPTION
Provider

Provider configured for context resolution.

Source code in src/waku/di/_providers.py
def contextual(
    provided_type: Any,
    *,
    scope: Scope = Scope.REQUEST,
) -> Provider:
    """Provide a dependency from the current context (e.g., app/request).

    Args:
        provided_type: The type to resolve from context.
        scope: Scope of the context variable (default: Scope.REQUEST).

    Returns:
        Provider configured for context resolution.
    """
    provider_ = Provider()
    provider_.from_context(provided_type, scope=scope)
    return provider_

many

many(
    interface,
    *implementations,
    scope=REQUEST,
    cache=True,
    when=None,
    collect=True,
)

Register multiple implementations as a collection.

PARAMETER DESCRIPTION
interface

Interface type for the collection.

TYPE: Any

*implementations

Implementation types or factory functions to include in collection.

TYPE: Any DEFAULT: ()

scope

Scope of the collection (default: Scope.REQUEST).

TYPE: Scope DEFAULT: REQUEST

cache

Whether to cache the resolve results within scope.

TYPE: bool DEFAULT: True

when

Optional marker to conditionally activate the provider.

TYPE: BaseMarker | None DEFAULT: None

collect

Whether to include collect+alias for Sequence/list resolution. Set to False to only register implementations without the collector.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
Provider

Provider configured for collection resolution.

RAISES DESCRIPTION
ValueError

If no implementations and collect is False.

Source code in src/waku/di/_providers.py
def many(
    interface: Any,
    *implementations: Any,
    scope: Scope = Scope.REQUEST,
    cache: bool = True,
    when: BaseMarker | None = None,
    collect: bool = True,
) -> Provider:
    """Register multiple implementations as a collection.

    Args:
        interface: Interface type for the collection.
        *implementations: Implementation types or factory functions to include in collection.
        scope: Scope of the collection (default: Scope.REQUEST).
        cache: Whether to cache the resolve results within scope.
        when: Optional marker to conditionally activate the provider.
        collect: Whether to include collect+alias for Sequence/list resolution.
            Set to False to only register implementations without the collector.

    Returns:
        Provider configured for collection resolution.

    Raises:
        ValueError: If no implementations and collect is False.
    """
    if not implementations and not collect:
        msg = 'At least one implementation must be provided when collect=False'
        raise ValueError(msg)

    provider_ = Provider(scope=scope)
    for impl in implementations:
        provider_.provide(impl, provides=interface, cache=cache, when=when)
    if collect:
        provider_.collect(interface, scope=scope, cache=cache, provides=Sequence[interface])
        provider_.alias(Sequence[interface], provides=list[interface], cache=cache)
    return provider_

object_

object_(obj, *, provided_type=None, when=None)

Provide the exact object passed at creation time as a singleton dependency.

PARAMETER DESCRIPTION
obj

The instance to provide as-is.

TYPE: Any

provided_type

Explicit type to provide (default: inferred).

TYPE: Any | None DEFAULT: None

when

Optional marker to conditionally activate the provider.

TYPE: BaseMarker | None DEFAULT: None

RETURNS DESCRIPTION
Provider

Provider configured to return the given object.

Source code in src/waku/di/_providers.py
def object_(
    obj: Any,
    *,
    provided_type: Any | None = None,
    when: BaseMarker | None = None,
) -> Provider:
    """Provide the exact object passed at creation time as a singleton dependency.

    Args:
        obj: The instance to provide as-is.
        provided_type: Explicit type to provide (default: inferred).
        when: Optional marker to conditionally activate the provider.

    Returns:
        Provider configured to return the given object.
    """
    actual_type = provided_type if provided_type is not None else type(obj)
    return provider(lambda: obj, scope=Scope.APP, provided_type=actual_type, cache=True, when=when)

provider

provider(
    source,
    *,
    scope=REQUEST,
    provided_type=None,
    cache=True,
    when=None,
)
Source code in src/waku/di/_providers.py
def provider(
    source: Callable[..., Any] | type[Any],
    *,
    scope: Scope = Scope.REQUEST,
    provided_type: Any | None = None,
    cache: bool = True,
    when: BaseMarker | None = None,
) -> Provider:
    provider_ = Provider(scope=scope)
    provider_.provide(source, provides=provided_type, cache=cache, when=when)
    return provider_

scoped

scoped(
    interface_or_source,
    implementation=None,
    /,
    *,
    when=None,
)

Create a scoped provider (lifetime: request).

PARAMETER DESCRIPTION
interface_or_source

Interface type or source if no separate implementation.

TYPE: type[Any] | Callable[..., Any]

implementation

Implementation type if interface is provided.

TYPE: type[Any] | Callable[..., Any] | None DEFAULT: None

when

Optional marker to conditionally activate the provider.

TYPE: BaseMarker | None DEFAULT: None

RETURNS DESCRIPTION
Provider

Provider configured for request scope.

Source code in src/waku/di/_providers.py
def scoped(
    interface_or_source: type[Any] | Callable[..., Any],
    implementation: type[Any] | Callable[..., Any] | None = None,
    /,
    *,
    when: BaseMarker | None = None,
) -> Provider:
    """Create a scoped provider (lifetime: request).

    Args:
        interface_or_source: Interface type or source if no separate implementation.
        implementation: Implementation type if interface is provided.
        when: Optional marker to conditionally activate the provider.

    Returns:
        Provider configured for request scope.
    """
    if implementation is not None:
        return provider(implementation, scope=Scope.REQUEST, provided_type=interface_or_source, when=when)
    return provider(interface_or_source, scope=Scope.REQUEST, when=when)

singleton

singleton(
    interface_or_source,
    implementation=None,
    /,
    *,
    when=None,
)

Create a singleton provider (lifetime: app).

PARAMETER DESCRIPTION
interface_or_source

Interface type or source if no separate implementation.

TYPE: type[Any] | Callable[..., Any]

implementation

Implementation type if interface is provided.

TYPE: type[Any] | Callable[..., Any] | None DEFAULT: None

when

Optional marker to conditionally activate the provider.

TYPE: BaseMarker | None DEFAULT: None

RETURNS DESCRIPTION
Provider

Provider configured for singleton scope.

Source code in src/waku/di/_providers.py
def singleton(
    interface_or_source: type[Any] | Callable[..., Any],
    implementation: type[Any] | Callable[..., Any] | None = None,
    /,
    *,
    when: BaseMarker | None = None,
) -> Provider:
    """Create a singleton provider (lifetime: app).

    Args:
        interface_or_source: Interface type or source if no separate implementation.
        implementation: Implementation type if interface is provided.
        when: Optional marker to conditionally activate the provider.

    Returns:
        Provider configured for singleton scope.
    """
    if implementation is not None:
        return provider(implementation, scope=Scope.APP, provided_type=interface_or_source, when=when)
    return provider(interface_or_source, scope=Scope.APP, when=when)

transient

transient(
    interface_or_source,
    implementation=None,
    /,
    *,
    when=None,
)

Create a transient provider (new instance per injection).

PARAMETER DESCRIPTION
interface_or_source

Interface type or source if no separate implementation.

TYPE: type[Any] | Callable[..., Any]

implementation

Implementation type if interface is provided.

TYPE: type[Any] | Callable[..., Any] | None DEFAULT: None

when

Optional marker to conditionally activate the provider.

TYPE: BaseMarker | None DEFAULT: None

RETURNS DESCRIPTION
Provider

Provider configured for transient (no cache) scope.

Source code in src/waku/di/_providers.py
def transient(
    interface_or_source: type[Any] | Callable[..., Any],
    implementation: type[Any] | Callable[..., Any] | None = None,
    /,
    *,
    when: BaseMarker | None = None,
) -> Provider:
    """Create a transient provider (new instance per injection).

    Args:
        interface_or_source: Interface type or source if no separate implementation.
        implementation: Implementation type if interface is provided.
        when: Optional marker to conditionally activate the provider.

    Returns:
        Provider configured for transient (no cache) scope.
    """
    if implementation is not None:
        return provider(implementation, scope=Scope.REQUEST, provided_type=interface_or_source, cache=False, when=when)
    return provider(interface_or_source, scope=Scope.REQUEST, cache=False, when=when)

eventsourcing

AggregateT module-attribute

AggregateT = TypeVar(
    'AggregateT', bound=EventSourcedAggregate
)

CommandT module-attribute

CommandT = TypeVar('CommandT')

EventT module-attribute

EventT = TypeVar('EventT', bound=IEvent)

StateT module-attribute

StateT = TypeVar('StateT')

DataT module-attribute

DataT = TypeVar('DataT', bound='IEvent', default='IEvent')

ExpectedVersion module-attribute

ExpectedVersion = (
    Exact | NoStream | StreamExists | AnyVersion
)

EventTypeSpec module-attribute

EventTypeSpec = 'type[IEvent] | EventType'

EventSourcedAggregate

EventSourcedAggregate()

Bases: ABC

Source code in src/waku/eventsourcing/contracts/aggregate.py
def __init__(self) -> None:
    self._version = -1
    self._pending_events = []

version property

version

collect_events

collect_events()
Source code in src/waku/eventsourcing/contracts/aggregate.py
def collect_events(self) -> list[IEvent]:
    events = list(self._pending_events)
    self._pending_events.clear()
    return events

mark_persisted

mark_persisted(version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def mark_persisted(self, version: int) -> None:
    self._version = version

load_from_history

load_from_history(events, version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def load_from_history(self, events: Sequence[IEvent], version: int) -> None:
    for event in events:
        self._apply(event)
    self._version = version

IDecider

Bases: Protocol[StateT, _CommandT_contra, EventT]

initial_state

initial_state()
Source code in src/waku/eventsourcing/contracts/aggregate.py
def initial_state(self) -> StateT: ...

decide

decide(command, state)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def decide(self, command: _CommandT_contra, state: StateT) -> Sequence[EventT]: ...

evolve

evolve(state, event)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def evolve(self, state: StateT, event: EventT) -> StateT: ...

EventEnvelope dataclass

EventEnvelope(
    *,
    domain_event,
    idempotency_key,
    metadata=EventMetadata(),
)

domain_event instance-attribute

domain_event

idempotency_key instance-attribute

idempotency_key

metadata class-attribute instance-attribute

metadata = field(default_factory=EventMetadata)

EventMetadata dataclass

EventMetadata(
    *, correlation_id=None, causation_id=None, extra=dict()
)

correlation_id class-attribute instance-attribute

correlation_id = None

causation_id class-attribute instance-attribute

causation_id = None

extra class-attribute instance-attribute

extra = field(default_factory=dict)

IMetadataEnricher

Bases: ABC

Enriches event metadata before persistence.

enrich abstractmethod

enrich(metadata)
Source code in src/waku/eventsourcing/contracts/event.py
@abc.abstractmethod
def enrich(self, metadata: EventMetadata, /) -> EventMetadata: ...

StoredEvent dataclass

StoredEvent(
    *,
    event_id,
    stream_id,
    event_type,
    position,
    global_position,
    timestamp,
    data,
    metadata,
    idempotency_key,
    schema_version=1,
)

Bases: Generic[DataT]

event_id instance-attribute

event_id

stream_id instance-attribute

stream_id

event_type instance-attribute

event_type

position instance-attribute

position

global_position instance-attribute

global_position

timestamp instance-attribute

timestamp

data instance-attribute

data

metadata instance-attribute

metadata

idempotency_key instance-attribute

idempotency_key

schema_version class-attribute instance-attribute

schema_version = 1

AnyVersion dataclass

AnyVersion()

Exact dataclass

Exact(version)

version instance-attribute

version

NoStream dataclass

NoStream()

StreamExists dataclass

StreamExists()

StreamId dataclass

StreamId(stream_type, stream_key)

stream_type instance-attribute

stream_type

stream_key instance-attribute

stream_key

value property

value

for_aggregate classmethod

for_aggregate(aggregate_type, aggregate_id)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def for_aggregate(cls, aggregate_type: str, aggregate_id: str) -> StreamId:
    return cls(stream_type=aggregate_type, stream_key=aggregate_id)

from_value classmethod

from_value(value)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def from_value(cls, value: str) -> StreamId:
    stream_type, sep, stream_key = value.partition('-')
    if not sep or not stream_type or not stream_key:
        msg = f"Invalid stream ID format: {value!r}. Expected '{{stream_type}}-{{stream_key}}'"
        raise ValueError(msg)
    return cls(stream_type=stream_type, stream_key=stream_key)

StreamPosition

Bases: Enum

START class-attribute instance-attribute

START = 'start'

END class-attribute instance-attribute

END = 'end'

DeciderCommandHandler

DeciderCommandHandler(repository, decider, publisher)

Bases: RequestHandler[RequestT, ResponseT], ABC, Generic[RequestT, StateT, CommandT, EventT, ResponseT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher

max_attempts class-attribute

max_attempts = 3

handle async

handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

DeciderRepository

DeciderRepository(decider, event_store)

Bases: ABC, Generic[StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
) -> None:
    self._decider = decider
    self._event_store = event_store

aggregate_name class-attribute

aggregate_name

max_stream_length class-attribute

max_stream_length = None

load async

load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    stored_events = await read_aggregate_stream(
        self._event_store,
        stream_id,
        max_stream_length=self.max_stream_length,
    )
    state = self._decider.initial_state()
    for stored in cast('list[StoredEvent[EventT]]', stored_events):
        state = self._decider.evolve(state, stored.data)
    version = stored_events[-1].position if stored_events else -1
    logger.debug('Loaded %d events for %s/%s', len(stored_events), self.aggregate_name, aggregate_id)
    return state, version

save async

save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,  # noqa: ARG002
    idempotency_key: str | None = None,
) -> int:
    if not events:
        return expected_version
    stream_id = self._stream_id(aggregate_id)
    envelopes = [
        EventEnvelope(
            domain_event=e,
            idempotency_key=f'{idempotency_key}:{i}' if idempotency_key else str(uuid.uuid4()),
        )
        for i, e in enumerate(events)
    ]
    expected = Exact(version=expected_version) if expected_version >= 0 else NoStream()
    new_version = await self._event_store.append_to_stream(stream_id, envelopes, expected_version=expected)
    logger.debug(
        'Saved %d events to %s/%s, version %d',
        len(events),
        self.aggregate_name,
        aggregate_id,
        new_version,
    )
    return new_version

DeciderVoidCommandHandler

DeciderVoidCommandHandler(repository, decider, publisher)

Bases: DeciderCommandHandler[RequestT, StateT, CommandT, EventT, None], ABC, Generic[RequestT, StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher

max_attempts class-attribute

max_attempts = 3

handle async

handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

SnapshotDeciderRepository

SnapshotDeciderRepository(
    decider,
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: DeciderRepository[StateT, CommandT, EventT], ABC

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(decider, event_store)
    self._state_serializer = state_serializer
    self._state_type: type[StateT] = type(self._decider.initial_state())
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self._state_type.__name__,
    )

aggregate_name class-attribute

aggregate_name

max_stream_length class-attribute

max_stream_length = None

snapshot_state_type class-attribute

snapshot_state_type = None

load async

load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        state = self._state_serializer.deserialize(snapshot.state, self._state_type)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        for stored in cast('list[StoredEvent[EventT]]', stored_events):
            state = self._decider.evolve(state, stored.data)
        version = stored_events[-1].position if stored_events else snapshot.version
        return state, version

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)

save async

save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,
    idempotency_key: str | None = None,
) -> int:
    new_version = await super().save(
        aggregate_id,
        events,
        expected_version,
        current_state=current_state,
        idempotency_key=idempotency_key,
    )

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        if current_state is not None:
            state = current_state
        else:
            state, _ = await self.load(aggregate_id)
        state_data = self._state_serializer.serialize(state)
        stream_id = self._stream_id(aggregate_id)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version

AggregateNotFoundError

AggregateNotFoundError(aggregate_type, aggregate_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_type: str, aggregate_id: str) -> None:
    self.aggregate_type = aggregate_type
    self.aggregate_id = aggregate_id
    super().__init__(f'{aggregate_type} with id {aggregate_id!r} not found')

aggregate_type instance-attribute

aggregate_type = aggregate_type

aggregate_id instance-attribute

aggregate_id = aggregate_id

ConcurrencyConflictError

ConcurrencyConflictError(
    stream_id, expected_version, actual_version
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, expected_version: int, actual_version: int) -> None:
    self.stream_id = stream_id
    self.expected_version = expected_version
    self.actual_version = actual_version
    super().__init__(
        f'Concurrency conflict on stream {stream_id}: expected version {expected_version}, actual {actual_version}'
    )

stream_id instance-attribute

stream_id = stream_id

expected_version instance-attribute

expected_version = expected_version

actual_version instance-attribute

actual_version = actual_version

ConflictingEventTypeError

ConflictingEventTypeError(
    event_type_name,
    existing_name,
    existing_version,
    attempted_name,
    attempted_version,
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(
    self,
    event_type_name: str,
    existing_name: str,
    existing_version: int,
    attempted_name: str,
    attempted_version: int,
) -> None:
    if existing_name != attempted_name:
        detail = f'name {existing_name!r} → {attempted_name!r}'
    else:
        detail = f'version v{existing_version} → v{attempted_version}'
    super().__init__(f'Conflicting registration for event type {event_type_name!r}: {detail}')

DuplicateAggregateNameError

DuplicateAggregateNameError(
    aggregate_name, repository_names
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_name: str, repository_names: list[str]) -> None:
    self.aggregate_name = aggregate_name
    self.repository_names = repository_names
    super().__init__(
        f'Duplicate aggregate name {aggregate_name!r} used by multiple repositories: {", ".join(repository_names)}'
    )

aggregate_name instance-attribute

aggregate_name = aggregate_name

repository_names instance-attribute

repository_names = repository_names

DuplicateEventTypeError

DuplicateEventTypeError(event_type_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, event_type_name: str) -> None:
    self.event_type_name = event_type_name
    super().__init__(f'Event type {event_type_name!r} is already registered')

event_type_name instance-attribute

event_type_name = event_type_name

DuplicateIdempotencyKeyError

DuplicateIdempotencyKeyError(stream_id, *, reason)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, *, reason: str) -> None:
    self.stream_id = stream_id
    self.reason = reason
    super().__init__(f'Duplicate idempotency keys ({reason}) on stream {stream_id}')

stream_id instance-attribute

stream_id = stream_id

reason instance-attribute

reason = reason

EventSourcingConfigError

EventSourcingError

Bases: WakuError

PartialDuplicateAppendError

PartialDuplicateAppendError(
    stream_id, existing_count, total_count
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, existing_count: int, total_count: int) -> None:
    self.stream_id = stream_id
    self.existing_count = existing_count
    self.total_count = total_count
    super().__init__(
        f'Partial duplicate append on stream {stream_id}: '
        f'{existing_count} of {total_count} idempotency keys already exist'
    )

stream_id instance-attribute

stream_id = stream_id

existing_count instance-attribute

existing_count = existing_count

total_count instance-attribute

total_count = total_count

ProjectionError

ProjectionStoppedError

ProjectionStoppedError(projection_name, cause)

Bases: ProjectionError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, projection_name: str, cause: Exception) -> None:
    self.projection_name = projection_name
    self.cause = cause
    super().__init__(f'Projection {projection_name!r} stopped due to error: {cause}')

projection_name instance-attribute

projection_name = projection_name

cause instance-attribute

cause = cause

RegistryFrozenError

RegistryFrozenError()

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self) -> None:
    super().__init__('Cannot register event types after registry is frozen')

SnapshotConfigNotFoundError

SnapshotConfigNotFoundError(aggregate_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_name: str) -> None:
    self.aggregate_name = aggregate_name
    super().__init__(
        f'No snapshot config found for aggregate {aggregate_name!r}. '
        f'Provide snapshot=SnapshotOptions(...) via bind_aggregate() or bind_decider().'
    )

aggregate_name instance-attribute

aggregate_name = aggregate_name

SnapshotMigrationChainError

SnapshotTypeMismatchError

SnapshotTypeMismatchError(
    stream_id, expected_type, actual_type
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, expected_type: str, actual_type: str) -> None:
    self.stream_id = stream_id
    self.expected_type = expected_type
    self.actual_type = actual_type
    super().__init__(
        f'Snapshot type mismatch on stream {stream_id}: expected {expected_type!r}, got {actual_type!r}'
    )

stream_id instance-attribute

stream_id = stream_id

expected_type instance-attribute

expected_type = expected_type

actual_type instance-attribute

actual_type = actual_type

StreamDeletedError

StreamDeletedError(stream_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId) -> None:
    self.stream_id = stream_id
    super().__init__(f'Stream {stream_id} is deleted')

stream_id instance-attribute

stream_id = stream_id

StreamNotFoundError

StreamNotFoundError(stream_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId) -> None:
    self.stream_id = stream_id
    super().__init__(f'Stream {stream_id} not found')

stream_id instance-attribute

stream_id = stream_id

StreamTooLargeError

StreamTooLargeError(stream_id, max_length)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, max_length: int) -> None:
    self.stream_id = stream_id
    self.max_length = max_length
    super().__init__(
        f'Stream {stream_id} exceeds maximum length of {max_length} events. '
        f'Configure snapshots to reduce stream replay size.'
    )

stream_id instance-attribute

stream_id = stream_id

max_length instance-attribute

max_length = max_length

UnknownEventTypeError

UnknownEventTypeError(event_type_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, event_type_name: str) -> None:
    self.event_type_name = event_type_name
    super().__init__(f'Unknown event type: {event_type_name!r}')

event_type_name instance-attribute

event_type_name = event_type_name

UpcasterChainError

EventSourcedCommandHandler

EventSourcedCommandHandler(repository, publisher)

Bases: RequestHandler[RequestT, ResponseT], ABC, Generic[RequestT, AggregateT, ResponseT]

Source code in src/waku/eventsourcing/handler.py
def __init__(
    self,
    repository: EventSourcedRepository[AggregateT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._publisher = publisher

max_attempts class-attribute

max_attempts = 3

handle async

handle(request)
Source code in src/waku/eventsourcing/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    is_creation: bool = self._is_creation_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        if is_creation:
            aggregate = self._repository.create_aggregate()
        else:
            aggregate = await self._repository.load(aggregate_id)

        idempotency_key = self._idempotency_key(request, aggregate.version)
        await self._execute(request, aggregate)

        _, events = await self._repository.save(
            aggregate_id,
            aggregate,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(aggregate)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        is_creation=is_creation,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

EventSourcedVoidCommandHandler

EventSourcedVoidCommandHandler(repository, publisher)

Bases: EventSourcedCommandHandler[RequestT, AggregateT, None], ABC, Generic[RequestT, AggregateT]

Source code in src/waku/eventsourcing/handler.py
def __init__(
    self,
    repository: EventSourcedRepository[AggregateT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._publisher = publisher

max_attempts class-attribute

max_attempts = 3

handle async

handle(request)
Source code in src/waku/eventsourcing/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    is_creation: bool = self._is_creation_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        if is_creation:
            aggregate = self._repository.create_aggregate()
        else:
            aggregate = await self._repository.load(aggregate_id)

        idempotency_key = self._idempotency_key(request, aggregate.version)
        await self._execute(request, aggregate)

        _, events = await self._repository.save(
            aggregate_id,
            aggregate,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(aggregate)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        is_creation=is_creation,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

EventSourcingConfig dataclass

EventSourcingConfig(
    *,
    store,
    event_serializer=None,
    snapshot_store=None,
    snapshot_state_serializer=None,
    checkpoint_store=None,
    enrichers=(),
)

store instance-attribute

store

event_serializer class-attribute instance-attribute

event_serializer = None

snapshot_store class-attribute instance-attribute

snapshot_store = None

snapshot_state_serializer class-attribute instance-attribute

snapshot_state_serializer = None

checkpoint_store class-attribute instance-attribute

checkpoint_store = None

enrichers class-attribute instance-attribute

enrichers = ()

EventSourcingExtension dataclass

EventSourcingExtension()

Bases: OnModuleConfigure

catch_up_bindings property

catch_up_bindings

registry property

registry

bind_aggregate

bind_aggregate(
    repository,
    event_types=(),
    projections=(),
    snapshot=None,
)
Source code in src/waku/eventsourcing/modules.py
def bind_aggregate(
    self,
    repository: type[EventSourcedRepository[Any]],
    event_types: Sequence[EventTypeSpec] = (),
    projections: Sequence[type[IProjection]] = (),
    snapshot: SnapshotOptions | None = None,
) -> Self:
    self._bindings.append(
        AggregateBinding(
            repository=repository,
            event_types=event_types,
            projections=projections,
            snapshot=snapshot,
        )
    )
    self._registry.projection_types.extend(projections)
    self._registry.event_type_bindings.extend(event_types)
    return self

bind_decider

bind_decider(
    repository,
    decider,
    event_types=(),
    projections=(),
    snapshot=None,
)
Source code in src/waku/eventsourcing/modules.py
def bind_decider(
    self,
    repository: type[DeciderRepository[Any, Any, Any]],
    decider: type[IDecider[Any, Any, Any]],
    event_types: Sequence[EventTypeSpec] = (),
    projections: Sequence[type[IProjection]] = (),
    snapshot: SnapshotOptions | None = None,
) -> Self:
    self._decider_bindings.append(
        DeciderBinding(
            repository=repository,
            decider=decider,
            event_types=event_types,
            projections=projections,
            snapshot=snapshot,
        )
    )
    self._registry.projection_types.extend(projections)
    self._registry.event_type_bindings.extend(event_types)
    return self

bind_catch_up_projection

bind_catch_up_projection(
    projection,
    *,
    error_policy=STOP,
    max_retry_attempts=0,
    base_retry_delay_seconds=10.0,
    max_retry_delay_seconds=300.0,
    batch_size=100,
    gap_detection_enabled=False,
    gap_timeout_seconds=10.0,
)
Source code in src/waku/eventsourcing/modules.py
def bind_catch_up_projection(  # noqa: PLR0913
    self,
    projection: type[ICatchUpProjection],
    *,
    error_policy: ErrorPolicy = ErrorPolicy.STOP,
    max_retry_attempts: int = 0,
    base_retry_delay_seconds: float = 10.0,
    max_retry_delay_seconds: float = 300.0,
    batch_size: int = 100,
    gap_detection_enabled: bool = False,
    gap_timeout_seconds: float = 10.0,
) -> Self:
    self._registry.catch_up_projection_types.append(projection)
    self._catch_up_bindings.append(
        CatchUpProjectionBinding(
            projection=projection,
            error_policy=error_policy,
            max_retry_attempts=max_retry_attempts,
            base_retry_delay_seconds=base_retry_delay_seconds,
            max_retry_delay_seconds=max_retry_delay_seconds,
            batch_size=batch_size,
            gap_detection_enabled=gap_detection_enabled,
            gap_timeout_seconds=gap_timeout_seconds,
        )
    )
    return self

aggregate_names

aggregate_names()
Source code in src/waku/eventsourcing/modules.py
def aggregate_names(self) -> Iterator[tuple[str, type]]:
    for binding in self._bindings:
        yield binding.repository.aggregate_name, binding.repository
    for binding in self._decider_bindings:
        yield binding.repository.aggregate_name, binding.repository

snapshot_bindings

snapshot_bindings()
Source code in src/waku/eventsourcing/modules.py
def snapshot_bindings(self) -> Iterator[tuple[str, SnapshotOptions]]:
    for binding in self._bindings:
        if binding.snapshot is not None:
            yield binding.repository.aggregate_name, binding.snapshot
    for binding in self._decider_bindings:
        if binding.snapshot is not None:
            yield binding.repository.aggregate_name, binding.snapshot

on_module_configure

on_module_configure(metadata)
Source code in src/waku/eventsourcing/modules.py
def on_module_configure(self, metadata: ModuleMetadata) -> None:
    for binding in self._bindings:
        repo_type = binding.repository
        metadata.providers.append(scoped(WithParents[repo_type], repo_type))  # type: ignore[misc,valid-type]

    for binding in self._decider_bindings:
        repo_type = binding.repository
        metadata.providers.append(scoped(WithParents[repo_type], repo_type))  # type: ignore[misc,valid-type]
        decider_iface = self._resolve_decider_interface(repo_type)
        metadata.providers.append(scoped(decider_iface, binding.decider))

EventSourcingModule

register classmethod

register(config)
Source code in src/waku/eventsourcing/modules.py
@classmethod
def register(cls, config: EventSourcingConfig, /) -> DynamicModule:
    providers: list[Provider] = [
        scoped(IEventStore, config.store),
    ]

    if config.event_serializer is not None:
        providers.append(scoped(IEventSerializer, config.event_serializer))

    if config.snapshot_store is not None:
        providers.append(scoped(ISnapshotStore, config.snapshot_store))

    if config.snapshot_state_serializer is not None:
        providers.append(scoped(ISnapshotStateSerializer, config.snapshot_state_serializer))

    if config.checkpoint_store is not None:
        providers.append(scoped(ICheckpointStore, config.checkpoint_store))

    providers.append(many(IMetadataEnricher, *config.enrichers))

    return DynamicModule(
        parent_module=cls,
        providers=providers,
        extensions=[
            EventSourcingRegistryAggregator(has_serializer=config.event_serializer is not None),
        ],
        is_global=True,
    )

EventType dataclass

EventType(
    event_type,
    *,
    name=None,
    aliases=(),
    version=1,
    upcasters=(),
)

event_type instance-attribute

event_type

name class-attribute instance-attribute

name = field(default=None, kw_only=True)

aliases class-attribute instance-attribute

aliases = field(default=(), kw_only=True)

version class-attribute instance-attribute

version = field(default=1, kw_only=True)

upcasters class-attribute instance-attribute

upcasters = field(default=(), kw_only=True)

SnapshotOptions dataclass

SnapshotOptions(
    *, strategy, schema_version=1, migrations=()
)

strategy instance-attribute

strategy

schema_version class-attribute instance-attribute

schema_version = 1

migrations class-attribute instance-attribute

migrations = ()

CatchUpProjectionBinding dataclass

CatchUpProjectionBinding(
    *,
    projection,
    error_policy=STOP,
    max_retry_attempts=0,
    base_retry_delay_seconds=10.0,
    max_retry_delay_seconds=300.0,
    batch_size=100,
    event_type_names=None,
    gap_detection_enabled=False,
    gap_timeout_seconds=10.0,
)

projection instance-attribute

projection

error_policy class-attribute instance-attribute

error_policy = STOP

max_retry_attempts class-attribute instance-attribute

max_retry_attempts = 0

base_retry_delay_seconds class-attribute instance-attribute

base_retry_delay_seconds = 10.0

max_retry_delay_seconds class-attribute instance-attribute

max_retry_delay_seconds = 300.0

batch_size class-attribute instance-attribute

batch_size = 100

event_type_names class-attribute instance-attribute

event_type_names = None

gap_detection_enabled class-attribute instance-attribute

gap_detection_enabled = False

gap_timeout_seconds class-attribute instance-attribute

gap_timeout_seconds = 10.0

ErrorPolicy

Bases: StrEnum

SKIP class-attribute instance-attribute

SKIP = auto()

STOP class-attribute instance-attribute

STOP = auto()

ICatchUpProjection

Bases: IProjection, ABC

Projection that processes events asynchronously via polling.

At-least-once delivery: the checkpoint is saved after project() processes a batch, so a crash before checkpoint save causes re-delivery on restart. project() must be idempotent.

Set event_types to filter which event types this projection receives. When None (default), all events are delivered.

projection_name class-attribute

projection_name

event_types class-attribute

event_types = None

project abstractmethod async

project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...

on_skip async

on_skip(events, error)
Source code in src/waku/eventsourcing/projection/interfaces.py
async def on_skip(self, events: Sequence[StoredEvent], error: Exception) -> None:
    pass

teardown async

teardown()
Source code in src/waku/eventsourcing/projection/interfaces.py
async def teardown(self) -> None:
    pass

ICheckpointStore

Bases: ABC

load abstractmethod async

load(projection_name)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def load(self, projection_name: str, /) -> Checkpoint | None: ...

save abstractmethod async

save(checkpoint)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def save(self, checkpoint: Checkpoint, /) -> None: ...

IProjection

Bases: ABC

projection_name class-attribute

projection_name

project abstractmethod async

project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...

CatchUpProjectionRegistry

CatchUpProjectionRegistry(bindings)
Source code in src/waku/eventsourcing/projection/registry.py
def __init__(self, bindings: tuple[CatchUpProjectionBinding, ...]) -> None:
    self._bindings = bindings
    by_name: dict[str, CatchUpProjectionBinding] = {}
    for b in self._bindings:
        name = b.projection.projection_name
        if name in by_name:
            msg = f'Duplicate projection name {name!r}'
            raise ValueError(msg)
        by_name[name] = b
    self._by_name = by_name

get

get(projection_name)
Source code in src/waku/eventsourcing/projection/registry.py
def get(self, projection_name: str) -> CatchUpProjectionBinding:
    try:
        return self._by_name[projection_name]
    except KeyError:
        msg = f'Projection {projection_name!r} not found'
        raise ValueError(msg) from None

CatchUpProjectionRunner

CatchUpProjectionRunner(
    container, lock, bindings, polling=_DEFAULT_POLLING
)
Source code in src/waku/eventsourcing/projection/runner.py
def __init__(
    self,
    container: AsyncContainer,
    lock: IProjectionLock,
    bindings: Sequence[CatchUpProjectionBinding],
    polling: PollingConfig = _DEFAULT_POLLING,
) -> None:
    self._container = container
    self._lock = lock
    self._bindings = tuple(bindings)
    self._polling = polling
    self._shutdown_event = anyio.Event()

create async classmethod

create(
    container,
    lock,
    projections=None,
    polling=_DEFAULT_POLLING,
)
Source code in src/waku/eventsourcing/projection/runner.py
@classmethod
async def create(
    cls,
    container: AsyncContainer,
    lock: IProjectionLock,
    projections: Sequence[type[ICatchUpProjection]] | None = None,
    polling: PollingConfig = _DEFAULT_POLLING,
) -> CatchUpProjectionRunner:
    async with container() as scope:
        projection_registry = await scope.get(CatchUpProjectionRegistry)
    if projections is not None:
        projection_set = set(projections)
        bindings = [b for b in projection_registry if b.projection in projection_set]
    else:
        bindings = list(projection_registry)
    return cls(
        container=container,
        lock=lock,
        bindings=bindings,
        polling=polling,
    )

run async

run()
Source code in src/waku/eventsourcing/projection/runner.py
async def run(self) -> None:
    if not self._bindings:
        logger.warning('No catch-up projections registered, exiting')
        return

    async with anyio.create_task_group() as tg:
        tg.start_soon(self._signal_listener, tg.cancel_scope)
        tg.start_soon(self._run_all_projections, tg.cancel_scope)

rebuild async

rebuild(projection_name)
Source code in src/waku/eventsourcing/projection/runner.py
async def rebuild(self, projection_name: str) -> None:
    binding = self._find_binding(projection_name)

    async with self._lock.acquire(projection_name) as acquired:
        if not acquired:
            msg = f'Projection {projection_name!r} is locked by another instance'
            raise RuntimeError(msg)

        async with self._container() as scope:
            projection = await scope.get(binding.projection)
            await projection.teardown()

        processor = ProjectionProcessor(binding)

        async with self._container() as scope:
            checkpoint_store = await scope.get(ICheckpointStore)
            await processor.reset_checkpoint(checkpoint_store)

        while True:
            async with self._container() as scope:
                projection = await scope.get(binding.projection)
                reader = await scope.get(IEventReader)
                checkpoint_store = await scope.get(ICheckpointStore)
                processed = await processor.run_once(projection, reader, checkpoint_store)

            if processed == 0:
                break

request_shutdown

request_shutdown()
Source code in src/waku/eventsourcing/projection/runner.py
def request_shutdown(self) -> None:
    self._shutdown_event.set()

EventSourcedRepository

EventSourcedRepository(event_store)

Bases: ABC, Generic[AggregateT]

Source code in src/waku/eventsourcing/repository.py
def __init__(self, event_store: IEventStore) -> None:
    self._event_store = event_store

aggregate_name class-attribute

aggregate_name

max_stream_length class-attribute

max_stream_length = None

load async

load(aggregate_id)
Source code in src/waku/eventsourcing/repository.py
async def load(self, aggregate_id: str) -> AggregateT:
    stream_id = self._stream_id(aggregate_id)
    stored_events = await read_aggregate_stream(
        self._event_store,
        stream_id,
        max_stream_length=self.max_stream_length,
    )
    if not stored_events:
        raise AggregateNotFoundError(
            aggregate_type=self.aggregate_name,
            aggregate_id=aggregate_id,
        )
    aggregate = self.create_aggregate()
    domain_events = [e.data for e in stored_events]
    version = stored_events[-1].position
    logger.debug('Loaded %d events for %s/%s', len(stored_events), self.aggregate_name, aggregate_id)
    aggregate.load_from_history(domain_events, version)
    return aggregate

save async

save(aggregate_id, aggregate, *, idempotency_key=None)
Source code in src/waku/eventsourcing/repository.py
async def save(
    self,
    aggregate_id: str,
    aggregate: AggregateT,
    *,
    idempotency_key: str | None = None,
) -> tuple[int, list[IEvent]]:
    stream_id = self._stream_id(aggregate_id)
    events = aggregate.collect_events()
    if not events:
        return aggregate.version, []

    envelopes = [
        EventEnvelope(
            domain_event=event,
            idempotency_key=f'{idempotency_key}:{i}' if idempotency_key else str(uuid.uuid4()),
        )
        for i, event in enumerate(events)
    ]
    expected = Exact(version=aggregate.version) if aggregate.version >= 0 else NoStream()
    new_version = await self._event_store.append_to_stream(stream_id, envelopes, expected_version=expected)
    aggregate.mark_persisted(new_version)
    logger.debug(
        'Saved %d events to %s/%s, version %d',
        len(events),
        self.aggregate_name,
        aggregate_id,
        new_version,
    )
    return new_version, events

create_aggregate

create_aggregate()
Source code in src/waku/eventsourcing/repository.py
def create_aggregate(self) -> AggregateT:
    aggregate_cls = self._resolve_aggregate_type()
    if aggregate_cls is None:
        msg = f'{type(self).__name__}: cannot auto-create aggregate, override create_aggregate()'
        raise TypeError(msg)
    return aggregate_cls()

SnapshotEventSourcedRepository

SnapshotEventSourcedRepository(
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: EventSourcedRepository[AggregateT], ABC, Generic[AggregateT]

Source code in src/waku/eventsourcing/snapshot/repository.py
def __init__(
    self,
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(event_store)
    self._state_serializer = state_serializer
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self.aggregate_name,
    )

aggregate_name class-attribute

aggregate_name

max_stream_length class-attribute

max_stream_length = None

snapshot_state_type class-attribute

snapshot_state_type = None

create_aggregate

create_aggregate()
Source code in src/waku/eventsourcing/repository.py
def create_aggregate(self) -> AggregateT:
    aggregate_cls = self._resolve_aggregate_type()
    if aggregate_cls is None:
        msg = f'{type(self).__name__}: cannot auto-create aggregate, override create_aggregate()'
        raise TypeError(msg)
    return aggregate_cls()

load async

load(aggregate_id)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def load(self, aggregate_id: str) -> AggregateT:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        aggregate = self._restore_from_snapshot(snapshot)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        domain_events = [e.data for e in stored_events]
        version = stored_events[-1].position if stored_events else snapshot.version
        if domain_events:
            aggregate.load_from_history(domain_events, version)
        else:
            aggregate.mark_persisted(version)
        return aggregate

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)

save async

save(aggregate_id, aggregate, *, idempotency_key=None)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def save(
    self,
    aggregate_id: str,
    aggregate: AggregateT,
    *,
    idempotency_key: str | None = None,
) -> tuple[int, list[IEvent]]:
    new_version, events = await super().save(aggregate_id, aggregate, idempotency_key=idempotency_key)

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        stream_id = self._stream_id(aggregate_id)
        state_obj = self._snapshot_state(aggregate)
        state_data = self._state_serializer.serialize(state_obj)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version, events

FnUpcaster

FnUpcaster(from_version, fn)

Bases: IEventUpcaster

Source code in src/waku/eventsourcing/upcasting/fn.py
def __init__(self, from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> None:
    self.from_version = from_version
    self._fn = fn

from_version instance-attribute

from_version = from_version

upcast

upcast(data)
Source code in src/waku/eventsourcing/upcasting/fn.py
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]:
    return self._fn(data)

IEventUpcaster

Bases: ABC

from_version instance-attribute

from_version

upcast abstractmethod

upcast(data)
Source code in src/waku/eventsourcing/upcasting/interfaces.py
@abc.abstractmethod
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]: ...

UpcasterChain

UpcasterChain(upcasters_by_type)
Source code in src/waku/eventsourcing/upcasting/chain.py
def __init__(self, upcasters_by_type: Mapping[str, Sequence[IEventUpcaster]]) -> None:
    chains: dict[str, tuple[IEventUpcaster, ...]] = {}
    for event_type, upcasters in upcasters_by_type.items():
        sorted_upcasters = sorted(upcasters, key=lambda u: u.from_version)
        seen: set[int] = set()
        for u in sorted_upcasters:
            if u.from_version < 1:
                msg = f'Invalid from_version {u.from_version} for event type {event_type!r}: must be >= 1'
                raise UpcasterChainError(msg)
            if u.from_version in seen:
                msg = f'Duplicate upcaster for event type {event_type!r} at from_version {u.from_version}'
                raise UpcasterChainError(msg)
            seen.add(u.from_version)
        chains[event_type] = tuple(sorted_upcasters)
    self._chains = chains

upcast

upcast(event_type, data, schema_version)
Source code in src/waku/eventsourcing/upcasting/chain.py
def upcast(self, event_type: str, data: dict[str, Any], schema_version: int) -> dict[str, Any]:
    upcasters = self._chains.get(event_type)
    if not upcasters:
        return data
    if schema_version > upcasters[-1].from_version:
        return data
    for u in upcasters:
        if u.from_version >= schema_version:
            data = u.upcast(data)
    return data

add_field

add_field(from_version, *, field, default)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def add_field(from_version: int, *, field: str, default: Any) -> IEventUpcaster:
    def _add(data: dict[str, Any]) -> dict[str, Any]:
        result = dict(data)
        if field not in result:
            result[field] = copy.copy(default)
        return result

    return FnUpcaster(from_version, fn=_add)

noop

noop(from_version)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def noop(from_version: int) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=dict)

remove_field

remove_field(from_version, *, field)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def remove_field(from_version: int, *, field: str) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=lambda data: {k: v for k, v in data.items() if k != field})

rename_field

rename_field(from_version, *, old, new)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def rename_field(from_version: int, *, old: str, new: str) -> IEventUpcaster:
    def _rename(data: dict[str, Any]) -> dict[str, Any]:
        result = {k: v for k, v in data.items() if k != old}
        if old in data:
            result[new] = data[old]
        return result

    return FnUpcaster(from_version, fn=_rename)

upcast

upcast(from_version, fn)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def upcast(from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=fn)

contracts

AggregateT module-attribute

AggregateT = TypeVar(
    'AggregateT', bound=EventSourcedAggregate
)

CommandT module-attribute

CommandT = TypeVar('CommandT')

EventT module-attribute

EventT = TypeVar('EventT', bound=IEvent)

StateT module-attribute

StateT = TypeVar('StateT')

DataT module-attribute

DataT = TypeVar('DataT', bound='IEvent', default='IEvent')

ExpectedVersion module-attribute

ExpectedVersion = (
    Exact | NoStream | StreamExists | AnyVersion
)

EventSourcedAggregate

EventSourcedAggregate()

Bases: ABC

Source code in src/waku/eventsourcing/contracts/aggregate.py
def __init__(self) -> None:
    self._version = -1
    self._pending_events = []
version property
version
collect_events
collect_events()
Source code in src/waku/eventsourcing/contracts/aggregate.py
def collect_events(self) -> list[IEvent]:
    events = list(self._pending_events)
    self._pending_events.clear()
    return events
mark_persisted
mark_persisted(version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def mark_persisted(self, version: int) -> None:
    self._version = version
load_from_history
load_from_history(events, version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def load_from_history(self, events: Sequence[IEvent], version: int) -> None:
    for event in events:
        self._apply(event)
    self._version = version

EventEnvelope dataclass

EventEnvelope(
    *,
    domain_event,
    idempotency_key,
    metadata=EventMetadata(),
)
domain_event instance-attribute
domain_event
idempotency_key instance-attribute
idempotency_key
metadata class-attribute instance-attribute
metadata = field(default_factory=EventMetadata)

EventMetadata dataclass

EventMetadata(
    *, correlation_id=None, causation_id=None, extra=dict()
)
correlation_id class-attribute instance-attribute
correlation_id = None
causation_id class-attribute instance-attribute
causation_id = None
extra class-attribute instance-attribute
extra = field(default_factory=dict)

IMetadataEnricher

Bases: ABC

Enriches event metadata before persistence.

enrich abstractmethod
enrich(metadata)
Source code in src/waku/eventsourcing/contracts/event.py
@abc.abstractmethod
def enrich(self, metadata: EventMetadata, /) -> EventMetadata: ...

StoredEvent dataclass

StoredEvent(
    *,
    event_id,
    stream_id,
    event_type,
    position,
    global_position,
    timestamp,
    data,
    metadata,
    idempotency_key,
    schema_version=1,
)

Bases: Generic[DataT]

event_id instance-attribute
event_id
stream_id instance-attribute
stream_id
event_type instance-attribute
event_type
position instance-attribute
position
global_position instance-attribute
global_position
timestamp instance-attribute
timestamp
data instance-attribute
data
metadata instance-attribute
metadata
idempotency_key instance-attribute
idempotency_key
schema_version class-attribute instance-attribute
schema_version = 1

AnyVersion dataclass

AnyVersion()

Exact dataclass

Exact(version)
version instance-attribute
version

NoStream dataclass

NoStream()

StreamExists dataclass

StreamExists()

StreamId dataclass

StreamId(stream_type, stream_key)
stream_type instance-attribute
stream_type
stream_key instance-attribute
stream_key
value property
value
for_aggregate classmethod
for_aggregate(aggregate_type, aggregate_id)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def for_aggregate(cls, aggregate_type: str, aggregate_id: str) -> StreamId:
    return cls(stream_type=aggregate_type, stream_key=aggregate_id)
from_value classmethod
from_value(value)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def from_value(cls, value: str) -> StreamId:
    stream_type, sep, stream_key = value.partition('-')
    if not sep or not stream_type or not stream_key:
        msg = f"Invalid stream ID format: {value!r}. Expected '{{stream_type}}-{{stream_key}}'"
        raise ValueError(msg)
    return cls(stream_type=stream_type, stream_key=stream_key)

StreamPosition

Bases: Enum

START class-attribute instance-attribute
START = 'start'
END class-attribute instance-attribute
END = 'end'

aggregate

StateT module-attribute
StateT = TypeVar('StateT')
CommandT module-attribute
CommandT = TypeVar('CommandT')
EventT module-attribute
EventT = TypeVar('EventT', bound=IEvent)
AggregateT module-attribute
AggregateT = TypeVar(
    'AggregateT', bound=EventSourcedAggregate
)
IDecider

Bases: Protocol[StateT, _CommandT_contra, EventT]

initial_state
initial_state()
Source code in src/waku/eventsourcing/contracts/aggregate.py
def initial_state(self) -> StateT: ...
decide
decide(command, state)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def decide(self, command: _CommandT_contra, state: StateT) -> Sequence[EventT]: ...
evolve
evolve(state, event)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def evolve(self, state: StateT, event: EventT) -> StateT: ...
EventSourcedAggregate
EventSourcedAggregate()

Bases: ABC

Source code in src/waku/eventsourcing/contracts/aggregate.py
def __init__(self) -> None:
    self._version = -1
    self._pending_events = []
version property
version
collect_events
collect_events()
Source code in src/waku/eventsourcing/contracts/aggregate.py
def collect_events(self) -> list[IEvent]:
    events = list(self._pending_events)
    self._pending_events.clear()
    return events
mark_persisted
mark_persisted(version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def mark_persisted(self, version: int) -> None:
    self._version = version
load_from_history
load_from_history(events, version)
Source code in src/waku/eventsourcing/contracts/aggregate.py
def load_from_history(self, events: Sequence[IEvent], version: int) -> None:
    for event in events:
        self._apply(event)
    self._version = version

event

DataT module-attribute
DataT = TypeVar('DataT', bound='IEvent', default='IEvent')
EventMetadata dataclass
EventMetadata(
    *, correlation_id=None, causation_id=None, extra=dict()
)
correlation_id class-attribute instance-attribute
correlation_id = None
causation_id class-attribute instance-attribute
causation_id = None
extra class-attribute instance-attribute
extra = field(default_factory=dict)
IMetadataEnricher

Bases: ABC

Enriches event metadata before persistence.

enrich abstractmethod
enrich(metadata)
Source code in src/waku/eventsourcing/contracts/event.py
@abc.abstractmethod
def enrich(self, metadata: EventMetadata, /) -> EventMetadata: ...
EventEnvelope dataclass
EventEnvelope(
    *,
    domain_event,
    idempotency_key,
    metadata=EventMetadata(),
)
domain_event instance-attribute
domain_event
idempotency_key instance-attribute
idempotency_key
metadata class-attribute instance-attribute
metadata = field(default_factory=EventMetadata)
StoredEvent dataclass
StoredEvent(
    *,
    event_id,
    stream_id,
    event_type,
    position,
    global_position,
    timestamp,
    data,
    metadata,
    idempotency_key,
    schema_version=1,
)

Bases: Generic[DataT]

event_id instance-attribute
event_id
stream_id instance-attribute
stream_id
event_type instance-attribute
event_type
position instance-attribute
position
global_position instance-attribute
global_position
timestamp instance-attribute
timestamp
data instance-attribute
data
metadata instance-attribute
metadata
idempotency_key instance-attribute
idempotency_key
schema_version class-attribute instance-attribute
schema_version = 1

stream

ExpectedVersion module-attribute
ExpectedVersion = (
    Exact | NoStream | StreamExists | AnyVersion
)
StreamId dataclass
StreamId(stream_type, stream_key)
stream_type instance-attribute
stream_type
stream_key instance-attribute
stream_key
value property
value
for_aggregate classmethod
for_aggregate(aggregate_type, aggregate_id)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def for_aggregate(cls, aggregate_type: str, aggregate_id: str) -> StreamId:
    return cls(stream_type=aggregate_type, stream_key=aggregate_id)
from_value classmethod
from_value(value)
Source code in src/waku/eventsourcing/contracts/stream.py
@classmethod
def from_value(cls, value: str) -> StreamId:
    stream_type, sep, stream_key = value.partition('-')
    if not sep or not stream_type or not stream_key:
        msg = f"Invalid stream ID format: {value!r}. Expected '{{stream_type}}-{{stream_key}}'"
        raise ValueError(msg)
    return cls(stream_type=stream_type, stream_key=stream_key)
Exact dataclass
Exact(version)
version instance-attribute
version
NoStream dataclass
NoStream()
StreamExists dataclass
StreamExists()
AnyVersion dataclass
AnyVersion()
StreamPosition

Bases: Enum

START class-attribute instance-attribute
START = 'start'
END class-attribute instance-attribute
END = 'end'

decider

DeciderCommandHandler

DeciderCommandHandler(repository, decider, publisher)

Bases: RequestHandler[RequestT, ResponseT], ABC, Generic[RequestT, StateT, CommandT, EventT, ResponseT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

DeciderVoidCommandHandler

DeciderVoidCommandHandler(repository, decider, publisher)

Bases: DeciderCommandHandler[RequestT, StateT, CommandT, EventT, None], ABC, Generic[RequestT, StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

DeciderRepository

DeciderRepository(decider, event_store)

Bases: ABC, Generic[StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
) -> None:
    self._decider = decider
    self._event_store = event_store
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    stored_events = await read_aggregate_stream(
        self._event_store,
        stream_id,
        max_stream_length=self.max_stream_length,
    )
    state = self._decider.initial_state()
    for stored in cast('list[StoredEvent[EventT]]', stored_events):
        state = self._decider.evolve(state, stored.data)
    version = stored_events[-1].position if stored_events else -1
    logger.debug('Loaded %d events for %s/%s', len(stored_events), self.aggregate_name, aggregate_id)
    return state, version
save async
save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,  # noqa: ARG002
    idempotency_key: str | None = None,
) -> int:
    if not events:
        return expected_version
    stream_id = self._stream_id(aggregate_id)
    envelopes = [
        EventEnvelope(
            domain_event=e,
            idempotency_key=f'{idempotency_key}:{i}' if idempotency_key else str(uuid.uuid4()),
        )
        for i, e in enumerate(events)
    ]
    expected = Exact(version=expected_version) if expected_version >= 0 else NoStream()
    new_version = await self._event_store.append_to_stream(stream_id, envelopes, expected_version=expected)
    logger.debug(
        'Saved %d events to %s/%s, version %d',
        len(events),
        self.aggregate_name,
        aggregate_id,
        new_version,
    )
    return new_version

SnapshotDeciderRepository

SnapshotDeciderRepository(
    decider,
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: DeciderRepository[StateT, CommandT, EventT], ABC

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(decider, event_store)
    self._state_serializer = state_serializer
    self._state_type: type[StateT] = type(self._decider.initial_state())
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self._state_type.__name__,
    )
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
snapshot_state_type class-attribute
snapshot_state_type = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        state = self._state_serializer.deserialize(snapshot.state, self._state_type)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        for stored in cast('list[StoredEvent[EventT]]', stored_events):
            state = self._decider.evolve(state, stored.data)
        version = stored_events[-1].position if stored_events else snapshot.version
        return state, version

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)
save async
save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,
    idempotency_key: str | None = None,
) -> int:
    new_version = await super().save(
        aggregate_id,
        events,
        expected_version,
        current_state=current_state,
        idempotency_key=idempotency_key,
    )

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        if current_state is not None:
            state = current_state
        else:
            state, _ = await self.load(aggregate_id)
        state_data = self._state_serializer.serialize(state)
        stream_id = self._stream_id(aggregate_id)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version

handler

logger module-attribute
logger = getLogger(__name__)
DeciderCommandHandler
DeciderCommandHandler(repository, decider, publisher)

Bases: RequestHandler[RequestT, ResponseT], ABC, Generic[RequestT, StateT, CommandT, EventT, ResponseT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )
DeciderVoidCommandHandler
DeciderVoidCommandHandler(repository, decider, publisher)

Bases: DeciderCommandHandler[RequestT, StateT, CommandT, EventT, None], ABC, Generic[RequestT, StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/handler.py
def __init__(
    self,
    repository: DeciderRepository[StateT, CommandT, EventT],
    decider: IDecider[StateT, CommandT, EventT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._decider = decider
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/decider/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    command: CommandT = self._to_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        state, version = await self._repository.load(aggregate_id)
        idempotency_key = self._idempotency_key(request, version)

        events = self._decider.decide(command, state)
        for event in events:
            state = self._decider.evolve(state, event)

        new_version: int = await self._repository.save(
            aggregate_id,
            events,
            version,
            current_state=state,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(state, new_version)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

repository

logger module-attribute
logger = getLogger(__name__)
DeciderRepository
DeciderRepository(decider, event_store)

Bases: ABC, Generic[StateT, CommandT, EventT]

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
) -> None:
    self._decider = decider
    self._event_store = event_store
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    stored_events = await read_aggregate_stream(
        self._event_store,
        stream_id,
        max_stream_length=self.max_stream_length,
    )
    state = self._decider.initial_state()
    for stored in cast('list[StoredEvent[EventT]]', stored_events):
        state = self._decider.evolve(state, stored.data)
    version = stored_events[-1].position if stored_events else -1
    logger.debug('Loaded %d events for %s/%s', len(stored_events), self.aggregate_name, aggregate_id)
    return state, version
save async
save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,  # noqa: ARG002
    idempotency_key: str | None = None,
) -> int:
    if not events:
        return expected_version
    stream_id = self._stream_id(aggregate_id)
    envelopes = [
        EventEnvelope(
            domain_event=e,
            idempotency_key=f'{idempotency_key}:{i}' if idempotency_key else str(uuid.uuid4()),
        )
        for i, e in enumerate(events)
    ]
    expected = Exact(version=expected_version) if expected_version >= 0 else NoStream()
    new_version = await self._event_store.append_to_stream(stream_id, envelopes, expected_version=expected)
    logger.debug(
        'Saved %d events to %s/%s, version %d',
        len(events),
        self.aggregate_name,
        aggregate_id,
        new_version,
    )
    return new_version
SnapshotDeciderRepository
SnapshotDeciderRepository(
    decider,
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: DeciderRepository[StateT, CommandT, EventT], ABC

Source code in src/waku/eventsourcing/decider/repository.py
def __init__(
    self,
    decider: IDecider[StateT, CommandT, EventT],
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(decider, event_store)
    self._state_serializer = state_serializer
    self._state_type: type[StateT] = type(self._decider.initial_state())
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self._state_type.__name__,
    )
snapshot_state_type class-attribute
snapshot_state_type = None
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/decider/repository.py
async def load(self, aggregate_id: str) -> tuple[StateT, int]:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        state = self._state_serializer.deserialize(snapshot.state, self._state_type)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        for stored in cast('list[StoredEvent[EventT]]', stored_events):
            state = self._decider.evolve(state, stored.data)
        version = stored_events[-1].position if stored_events else snapshot.version
        return state, version

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)
save async
save(
    aggregate_id,
    events,
    expected_version,
    *,
    current_state=None,
    idempotency_key=None,
)
Source code in src/waku/eventsourcing/decider/repository.py
async def save(
    self,
    aggregate_id: str,
    events: typing.Sequence[EventT],
    expected_version: int,
    *,
    current_state: StateT | None = None,
    idempotency_key: str | None = None,
) -> int:
    new_version = await super().save(
        aggregate_id,
        events,
        expected_version,
        current_state=current_state,
        idempotency_key=idempotency_key,
    )

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        if current_state is not None:
            state = current_state
        else:
            state, _ = await self.load(aggregate_id)
        state_data = self._state_serializer.serialize(state)
        stream_id = self._stream_id(aggregate_id)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version

exceptions

EventSourcingError

Bases: WakuError

EventSourcingConfigError

StreamNotFoundError

StreamNotFoundError(stream_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId) -> None:
    self.stream_id = stream_id
    super().__init__(f'Stream {stream_id} not found')
stream_id instance-attribute
stream_id = stream_id

StreamDeletedError

StreamDeletedError(stream_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId) -> None:
    self.stream_id = stream_id
    super().__init__(f'Stream {stream_id} is deleted')
stream_id instance-attribute
stream_id = stream_id

AggregateNotFoundError

AggregateNotFoundError(aggregate_type, aggregate_id)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_type: str, aggregate_id: str) -> None:
    self.aggregate_type = aggregate_type
    self.aggregate_id = aggregate_id
    super().__init__(f'{aggregate_type} with id {aggregate_id!r} not found')
aggregate_type instance-attribute
aggregate_type = aggregate_type
aggregate_id instance-attribute
aggregate_id = aggregate_id

ConcurrencyConflictError

ConcurrencyConflictError(
    stream_id, expected_version, actual_version
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, expected_version: int, actual_version: int) -> None:
    self.stream_id = stream_id
    self.expected_version = expected_version
    self.actual_version = actual_version
    super().__init__(
        f'Concurrency conflict on stream {stream_id}: expected version {expected_version}, actual {actual_version}'
    )
stream_id instance-attribute
stream_id = stream_id
expected_version instance-attribute
expected_version = expected_version
actual_version instance-attribute
actual_version = actual_version

DuplicateAggregateNameError

DuplicateAggregateNameError(
    aggregate_name, repository_names
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_name: str, repository_names: list[str]) -> None:
    self.aggregate_name = aggregate_name
    self.repository_names = repository_names
    super().__init__(
        f'Duplicate aggregate name {aggregate_name!r} used by multiple repositories: {", ".join(repository_names)}'
    )
aggregate_name instance-attribute
aggregate_name = aggregate_name
repository_names instance-attribute
repository_names = repository_names

UnknownEventTypeError

UnknownEventTypeError(event_type_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, event_type_name: str) -> None:
    self.event_type_name = event_type_name
    super().__init__(f'Unknown event type: {event_type_name!r}')
event_type_name instance-attribute
event_type_name = event_type_name

DuplicateEventTypeError

DuplicateEventTypeError(event_type_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, event_type_name: str) -> None:
    self.event_type_name = event_type_name
    super().__init__(f'Event type {event_type_name!r} is already registered')
event_type_name instance-attribute
event_type_name = event_type_name

ConflictingEventTypeError

ConflictingEventTypeError(
    event_type_name,
    existing_name,
    existing_version,
    attempted_name,
    attempted_version,
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(
    self,
    event_type_name: str,
    existing_name: str,
    existing_version: int,
    attempted_name: str,
    attempted_version: int,
) -> None:
    if existing_name != attempted_name:
        detail = f'name {existing_name!r} → {attempted_name!r}'
    else:
        detail = f'version v{existing_version} → v{attempted_version}'
    super().__init__(f'Conflicting registration for event type {event_type_name!r}: {detail}')

SnapshotTypeMismatchError

SnapshotTypeMismatchError(
    stream_id, expected_type, actual_type
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, expected_type: str, actual_type: str) -> None:
    self.stream_id = stream_id
    self.expected_type = expected_type
    self.actual_type = actual_type
    super().__init__(
        f'Snapshot type mismatch on stream {stream_id}: expected {expected_type!r}, got {actual_type!r}'
    )
stream_id instance-attribute
stream_id = stream_id
expected_type instance-attribute
expected_type = expected_type
actual_type instance-attribute
actual_type = actual_type

StreamTooLargeError

StreamTooLargeError(stream_id, max_length)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, max_length: int) -> None:
    self.stream_id = stream_id
    self.max_length = max_length
    super().__init__(
        f'Stream {stream_id} exceeds maximum length of {max_length} events. '
        f'Configure snapshots to reduce stream replay size.'
    )
stream_id instance-attribute
stream_id = stream_id
max_length instance-attribute
max_length = max_length

RegistryFrozenError

RegistryFrozenError()

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self) -> None:
    super().__init__('Cannot register event types after registry is frozen')

ProjectionError

ProjectionStoppedError

ProjectionStoppedError(projection_name, cause)

Bases: ProjectionError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, projection_name: str, cause: Exception) -> None:
    self.projection_name = projection_name
    self.cause = cause
    super().__init__(f'Projection {projection_name!r} stopped due to error: {cause}')
projection_name instance-attribute
projection_name = projection_name
cause instance-attribute
cause = cause

DuplicateIdempotencyKeyError

DuplicateIdempotencyKeyError(stream_id, *, reason)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, *, reason: str) -> None:
    self.stream_id = stream_id
    self.reason = reason
    super().__init__(f'Duplicate idempotency keys ({reason}) on stream {stream_id}')
stream_id instance-attribute
stream_id = stream_id
reason instance-attribute
reason = reason

PartialDuplicateAppendError

PartialDuplicateAppendError(
    stream_id, existing_count, total_count
)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, stream_id: StreamId, existing_count: int, total_count: int) -> None:
    self.stream_id = stream_id
    self.existing_count = existing_count
    self.total_count = total_count
    super().__init__(
        f'Partial duplicate append on stream {stream_id}: '
        f'{existing_count} of {total_count} idempotency keys already exist'
    )
stream_id instance-attribute
stream_id = stream_id
existing_count instance-attribute
existing_count = existing_count
total_count instance-attribute
total_count = total_count

SnapshotConfigNotFoundError

SnapshotConfigNotFoundError(aggregate_name)

Bases: EventSourcingError

Source code in src/waku/eventsourcing/exceptions.py
def __init__(self, aggregate_name: str) -> None:
    self.aggregate_name = aggregate_name
    super().__init__(
        f'No snapshot config found for aggregate {aggregate_name!r}. '
        f'Provide snapshot=SnapshotOptions(...) via bind_aggregate() or bind_decider().'
    )
aggregate_name instance-attribute
aggregate_name = aggregate_name

SnapshotMigrationChainError

UpcasterChainError

handler

logger module-attribute

logger = getLogger(__name__)

EventSourcedCommandHandler

EventSourcedCommandHandler(repository, publisher)

Bases: RequestHandler[RequestT, ResponseT], ABC, Generic[RequestT, AggregateT, ResponseT]

Source code in src/waku/eventsourcing/handler.py
def __init__(
    self,
    repository: EventSourcedRepository[AggregateT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    is_creation: bool = self._is_creation_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        if is_creation:
            aggregate = self._repository.create_aggregate()
        else:
            aggregate = await self._repository.load(aggregate_id)

        idempotency_key = self._idempotency_key(request, aggregate.version)
        await self._execute(request, aggregate)

        _, events = await self._repository.save(
            aggregate_id,
            aggregate,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(aggregate)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        is_creation=is_creation,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

EventSourcedVoidCommandHandler

EventSourcedVoidCommandHandler(repository, publisher)

Bases: EventSourcedCommandHandler[RequestT, AggregateT, None], ABC, Generic[RequestT, AggregateT]

Source code in src/waku/eventsourcing/handler.py
def __init__(
    self,
    repository: EventSourcedRepository[AggregateT],
    publisher: IPublisher,
) -> None:
    self._repository = repository
    self._publisher = publisher
max_attempts class-attribute
max_attempts = 3
handle async
handle(request)
Source code in src/waku/eventsourcing/handler.py
async def handle(self, request: RequestT, /) -> ResponseT:
    aggregate_id: str = self._aggregate_id(request)
    is_creation: bool = self._is_creation_command(request)
    logger.debug('Handling %s for %s', type(request).__name__, aggregate_id)

    async def _attempt() -> ResponseT:
        if is_creation:
            aggregate = self._repository.create_aggregate()
        else:
            aggregate = await self._repository.load(aggregate_id)

        idempotency_key = self._idempotency_key(request, aggregate.version)
        await self._execute(request, aggregate)

        _, events = await self._repository.save(
            aggregate_id,
            aggregate,
            idempotency_key=idempotency_key,
        )

        for event in events:
            await self._publisher.publish(event)

        return self._to_response(aggregate)

    return await execute_with_optimistic_retry(
        _attempt,
        max_attempts=self.max_attempts,
        is_creation=is_creation,
        request_name=type(request).__name__,
        aggregate_id=aggregate_id,
        attempt_context=self._create_attempt_context,
    )

modules

EventTypeSpec module-attribute

EventTypeSpec = 'type[IEvent] | EventType'

EventType dataclass

EventType(
    event_type,
    *,
    name=None,
    aliases=(),
    version=1,
    upcasters=(),
)
event_type instance-attribute
event_type
name class-attribute instance-attribute
name = field(default=None, kw_only=True)
aliases class-attribute instance-attribute
aliases = field(default=(), kw_only=True)
version class-attribute instance-attribute
version = field(default=1, kw_only=True)
upcasters class-attribute instance-attribute
upcasters = field(default=(), kw_only=True)

SnapshotOptions dataclass

SnapshotOptions(
    *, strategy, schema_version=1, migrations=()
)
strategy instance-attribute
strategy
schema_version class-attribute instance-attribute
schema_version = 1
migrations class-attribute instance-attribute
migrations = ()

AggregateBinding dataclass

AggregateBinding(
    *, repository, event_types, projections, snapshot
)
repository instance-attribute
repository
event_types instance-attribute
event_types
projections instance-attribute
projections
snapshot instance-attribute
snapshot

DeciderBinding dataclass

DeciderBinding(
    *,
    repository,
    decider,
    event_types,
    projections,
    snapshot,
)
repository instance-attribute
repository
decider instance-attribute
decider
event_types instance-attribute
event_types
projections instance-attribute
projections
snapshot instance-attribute
snapshot

EventSourcingConfig dataclass

EventSourcingConfig(
    *,
    store,
    event_serializer=None,
    snapshot_store=None,
    snapshot_state_serializer=None,
    checkpoint_store=None,
    enrichers=(),
)
store instance-attribute
store
event_serializer class-attribute instance-attribute
event_serializer = None
snapshot_store class-attribute instance-attribute
snapshot_store = None
snapshot_state_serializer class-attribute instance-attribute
snapshot_state_serializer = None
checkpoint_store class-attribute instance-attribute
checkpoint_store = None
enrichers class-attribute instance-attribute
enrichers = ()

EventSourcingRegistry dataclass

EventSourcingRegistry(
    projection_types=list(),
    catch_up_projection_types=list(),
    event_type_bindings=list(),
)
projection_types class-attribute instance-attribute
projection_types = field(default_factory=list)
catch_up_projection_types class-attribute instance-attribute
catch_up_projection_types = field(default_factory=list)
event_type_bindings class-attribute instance-attribute
event_type_bindings = field(default_factory=list)
merge
merge(other)
Source code in src/waku/eventsourcing/modules.py
def merge(self, other: EventSourcingRegistry) -> None:
    self._check_not_frozen()
    self.projection_types.extend(other.projection_types)
    self.catch_up_projection_types.extend(other.catch_up_projection_types)
    self.event_type_bindings.extend(other.event_type_bindings)
freeze
freeze()
Source code in src/waku/eventsourcing/modules.py
def freeze(self) -> None:
    self._frozen = True
handler_providers
handler_providers()
Source code in src/waku/eventsourcing/modules.py
def handler_providers(self) -> Iterator[Provider]:
    if self.projection_types:
        yield many(IProjection, *self.projection_types, collect=False)
    if self.catch_up_projection_types:
        yield many(ICatchUpProjection, *self.catch_up_projection_types, collect=False)
collector_providers staticmethod
collector_providers()
Source code in src/waku/eventsourcing/modules.py
@staticmethod
def collector_providers() -> Iterator[Provider]:
    yield many(IProjection, collect=True)
    yield many(ICatchUpProjection, collect=True)

EventSourcingModule

register classmethod
register(config)
Source code in src/waku/eventsourcing/modules.py
@classmethod
def register(cls, config: EventSourcingConfig, /) -> DynamicModule:
    providers: list[Provider] = [
        scoped(IEventStore, config.store),
    ]

    if config.event_serializer is not None:
        providers.append(scoped(IEventSerializer, config.event_serializer))

    if config.snapshot_store is not None:
        providers.append(scoped(ISnapshotStore, config.snapshot_store))

    if config.snapshot_state_serializer is not None:
        providers.append(scoped(ISnapshotStateSerializer, config.snapshot_state_serializer))

    if config.checkpoint_store is not None:
        providers.append(scoped(ICheckpointStore, config.checkpoint_store))

    providers.append(many(IMetadataEnricher, *config.enrichers))

    return DynamicModule(
        parent_module=cls,
        providers=providers,
        extensions=[
            EventSourcingRegistryAggregator(has_serializer=config.event_serializer is not None),
        ],
        is_global=True,
    )

EventSourcingExtension dataclass

EventSourcingExtension()

Bases: OnModuleConfigure

catch_up_bindings property
catch_up_bindings
registry property
registry
bind_aggregate
bind_aggregate(
    repository,
    event_types=(),
    projections=(),
    snapshot=None,
)
Source code in src/waku/eventsourcing/modules.py
def bind_aggregate(
    self,
    repository: type[EventSourcedRepository[Any]],
    event_types: Sequence[EventTypeSpec] = (),
    projections: Sequence[type[IProjection]] = (),
    snapshot: SnapshotOptions | None = None,
) -> Self:
    self._bindings.append(
        AggregateBinding(
            repository=repository,
            event_types=event_types,
            projections=projections,
            snapshot=snapshot,
        )
    )
    self._registry.projection_types.extend(projections)
    self._registry.event_type_bindings.extend(event_types)
    return self
bind_decider
bind_decider(
    repository,
    decider,
    event_types=(),
    projections=(),
    snapshot=None,
)
Source code in src/waku/eventsourcing/modules.py
def bind_decider(
    self,
    repository: type[DeciderRepository[Any, Any, Any]],
    decider: type[IDecider[Any, Any, Any]],
    event_types: Sequence[EventTypeSpec] = (),
    projections: Sequence[type[IProjection]] = (),
    snapshot: SnapshotOptions | None = None,
) -> Self:
    self._decider_bindings.append(
        DeciderBinding(
            repository=repository,
            decider=decider,
            event_types=event_types,
            projections=projections,
            snapshot=snapshot,
        )
    )
    self._registry.projection_types.extend(projections)
    self._registry.event_type_bindings.extend(event_types)
    return self
bind_catch_up_projection
bind_catch_up_projection(
    projection,
    *,
    error_policy=STOP,
    max_retry_attempts=0,
    base_retry_delay_seconds=10.0,
    max_retry_delay_seconds=300.0,
    batch_size=100,
    gap_detection_enabled=False,
    gap_timeout_seconds=10.0,
)
Source code in src/waku/eventsourcing/modules.py
def bind_catch_up_projection(  # noqa: PLR0913
    self,
    projection: type[ICatchUpProjection],
    *,
    error_policy: ErrorPolicy = ErrorPolicy.STOP,
    max_retry_attempts: int = 0,
    base_retry_delay_seconds: float = 10.0,
    max_retry_delay_seconds: float = 300.0,
    batch_size: int = 100,
    gap_detection_enabled: bool = False,
    gap_timeout_seconds: float = 10.0,
) -> Self:
    self._registry.catch_up_projection_types.append(projection)
    self._catch_up_bindings.append(
        CatchUpProjectionBinding(
            projection=projection,
            error_policy=error_policy,
            max_retry_attempts=max_retry_attempts,
            base_retry_delay_seconds=base_retry_delay_seconds,
            max_retry_delay_seconds=max_retry_delay_seconds,
            batch_size=batch_size,
            gap_detection_enabled=gap_detection_enabled,
            gap_timeout_seconds=gap_timeout_seconds,
        )
    )
    return self
aggregate_names
aggregate_names()
Source code in src/waku/eventsourcing/modules.py
def aggregate_names(self) -> Iterator[tuple[str, type]]:
    for binding in self._bindings:
        yield binding.repository.aggregate_name, binding.repository
    for binding in self._decider_bindings:
        yield binding.repository.aggregate_name, binding.repository
snapshot_bindings
snapshot_bindings()
Source code in src/waku/eventsourcing/modules.py
def snapshot_bindings(self) -> Iterator[tuple[str, SnapshotOptions]]:
    for binding in self._bindings:
        if binding.snapshot is not None:
            yield binding.repository.aggregate_name, binding.snapshot
    for binding in self._decider_bindings:
        if binding.snapshot is not None:
            yield binding.repository.aggregate_name, binding.snapshot
on_module_configure
on_module_configure(metadata)
Source code in src/waku/eventsourcing/modules.py
def on_module_configure(self, metadata: ModuleMetadata) -> None:
    for binding in self._bindings:
        repo_type = binding.repository
        metadata.providers.append(scoped(WithParents[repo_type], repo_type))  # type: ignore[misc,valid-type]

    for binding in self._decider_bindings:
        repo_type = binding.repository
        metadata.providers.append(scoped(WithParents[repo_type], repo_type))  # type: ignore[misc,valid-type]
        decider_iface = self._resolve_decider_interface(repo_type)
        metadata.providers.append(scoped(decider_iface, binding.decider))

EventSourcingRegistryAggregator

EventSourcingRegistryAggregator(*, has_serializer=False)

Bases: OnModuleRegistration

Source code in src/waku/eventsourcing/modules.py
def __init__(self, *, has_serializer: bool = False) -> None:
    self._has_serializer = has_serializer
on_module_registration
on_module_registration(registry, owning_module, context)
Source code in src/waku/eventsourcing/modules.py
@override
def on_module_registration(
    self,
    registry: ModuleMetadataRegistry,
    owning_module: ModuleType,
    context: Mapping[Any, Any] | None,
) -> None:
    aggregated = EventSourcingRegistry()
    all_aggregate_names: defaultdict[str, list[str]] = defaultdict(list)
    all_catch_up_bindings: list[CatchUpProjectionBinding] = []

    for module_type, ext in registry.find_extensions(EventSourcingExtension):
        aggregated.merge(ext.registry)
        all_catch_up_bindings.extend(ext.catch_up_bindings)
        for provider in ext.registry.handler_providers():
            registry.add_provider(module_type, provider)
        for name, repo_type in ext.aggregate_names():
            all_aggregate_names[name].append(repo_type.__qualname__)

    for name, repo_names in all_aggregate_names.items():
        if len(repo_names) > 1:
            raise DuplicateAggregateNameError(name, repo_names)

    for provider in aggregated.collector_providers():
        registry.add_provider(owning_module, provider)

    event_type_registry, upcaster_chain = self._build_type_registry(aggregated)
    registry.add_provider(owning_module, object_(event_type_registry))
    registry.add_provider(owning_module, object_(upcaster_chain))

    resolved_bindings = self._resolve_catch_up_bindings(all_catch_up_bindings, event_type_registry)
    registry.add_provider(
        owning_module,
        object_(CatchUpProjectionRegistry(resolved_bindings)),
    )

    aggregated.freeze()
    registry.add_provider(owning_module, object_(aggregated))

    snapshot_config_registry = self._build_snapshot_config_registry(
        registry.find_extensions(EventSourcingExtension),
    )
    registry.add_provider(owning_module, object_(snapshot_config_registry))

    if self._has_serializer and len(event_type_registry) == 0:
        msg = (
            'A serializer is configured but no event types were registered via '
            'bind_aggregate(event_types=[...]). Deserialization will fail at runtime.'
        )
        raise EventSourcingConfigError(msg)

projection

CatchUpProjectionBinding dataclass

CatchUpProjectionBinding(
    *,
    projection,
    error_policy=STOP,
    max_retry_attempts=0,
    base_retry_delay_seconds=10.0,
    max_retry_delay_seconds=300.0,
    batch_size=100,
    event_type_names=None,
    gap_detection_enabled=False,
    gap_timeout_seconds=10.0,
)
projection instance-attribute
projection
error_policy class-attribute instance-attribute
error_policy = STOP
max_retry_attempts class-attribute instance-attribute
max_retry_attempts = 0
base_retry_delay_seconds class-attribute instance-attribute
base_retry_delay_seconds = 10.0
max_retry_delay_seconds class-attribute instance-attribute
max_retry_delay_seconds = 300.0
batch_size class-attribute instance-attribute
batch_size = 100
event_type_names class-attribute instance-attribute
event_type_names = None
gap_detection_enabled class-attribute instance-attribute
gap_detection_enabled = False
gap_timeout_seconds class-attribute instance-attribute
gap_timeout_seconds = 10.0

Checkpoint dataclass

Checkpoint(*, projection_name, position, updated_at)
projection_name instance-attribute
projection_name
position instance-attribute
position
updated_at instance-attribute
updated_at

LeaseConfig dataclass

LeaseConfig(
    *, ttl_seconds=30.0, renew_interval_factor=1 / 3
)
ttl_seconds class-attribute instance-attribute
ttl_seconds = 30.0
renew_interval_factor class-attribute instance-attribute
renew_interval_factor = 1 / 3
renew_interval_seconds property
renew_interval_seconds

PollingConfig dataclass

PollingConfig(
    *,
    poll_interval_min_seconds=0.5,
    poll_interval_max_seconds=5.0,
    poll_interval_step_seconds=1.0,
    poll_interval_jitter_factor=0.1,
)
poll_interval_min_seconds class-attribute instance-attribute
poll_interval_min_seconds = 0.5
poll_interval_max_seconds class-attribute instance-attribute
poll_interval_max_seconds = 5.0
poll_interval_step_seconds class-attribute instance-attribute
poll_interval_step_seconds = 1.0
poll_interval_jitter_factor class-attribute instance-attribute
poll_interval_jitter_factor = 0.1

InMemoryCheckpointStore

InMemoryCheckpointStore()

Bases: ICheckpointStore

Source code in src/waku/eventsourcing/projection/in_memory.py
def __init__(self) -> None:
    self._checkpoints: dict[str, Checkpoint] = {}
load async
load(projection_name)
Source code in src/waku/eventsourcing/projection/in_memory.py
async def load(self, projection_name: str, /) -> Checkpoint | None:
    return self._checkpoints.get(projection_name)
save async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/in_memory.py
async def save(self, checkpoint: Checkpoint, /) -> None:
    self._checkpoints[checkpoint.projection_name] = checkpoint

ErrorPolicy

Bases: StrEnum

SKIP class-attribute instance-attribute
SKIP = auto()
STOP class-attribute instance-attribute
STOP = auto()

ICatchUpProjection

Bases: IProjection, ABC

Projection that processes events asynchronously via polling.

At-least-once delivery: the checkpoint is saved after project() processes a batch, so a crash before checkpoint save causes re-delivery on restart. project() must be idempotent.

Set event_types to filter which event types this projection receives. When None (default), all events are delivered.

projection_name class-attribute
projection_name
event_types class-attribute
event_types = None
project abstractmethod async
project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...
on_skip async
on_skip(events, error)
Source code in src/waku/eventsourcing/projection/interfaces.py
async def on_skip(self, events: Sequence[StoredEvent], error: Exception) -> None:
    pass
teardown async
teardown()
Source code in src/waku/eventsourcing/projection/interfaces.py
async def teardown(self) -> None:
    pass

ICheckpointStore

Bases: ABC

load abstractmethod async
load(projection_name)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def load(self, projection_name: str, /) -> Checkpoint | None: ...
save abstractmethod async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def save(self, checkpoint: Checkpoint, /) -> None: ...

IProjection

Bases: ABC

projection_name class-attribute
projection_name
project abstractmethod async
project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...

CatchUpProjectionRegistry

CatchUpProjectionRegistry(bindings)
Source code in src/waku/eventsourcing/projection/registry.py
def __init__(self, bindings: tuple[CatchUpProjectionBinding, ...]) -> None:
    self._bindings = bindings
    by_name: dict[str, CatchUpProjectionBinding] = {}
    for b in self._bindings:
        name = b.projection.projection_name
        if name in by_name:
            msg = f'Duplicate projection name {name!r}'
            raise ValueError(msg)
        by_name[name] = b
    self._by_name = by_name
get
get(projection_name)
Source code in src/waku/eventsourcing/projection/registry.py
def get(self, projection_name: str) -> CatchUpProjectionBinding:
    try:
        return self._by_name[projection_name]
    except KeyError:
        msg = f'Projection {projection_name!r} not found'
        raise ValueError(msg) from None

CatchUpProjectionRunner

CatchUpProjectionRunner(
    container, lock, bindings, polling=_DEFAULT_POLLING
)
Source code in src/waku/eventsourcing/projection/runner.py
def __init__(
    self,
    container: AsyncContainer,
    lock: IProjectionLock,
    bindings: Sequence[CatchUpProjectionBinding],
    polling: PollingConfig = _DEFAULT_POLLING,
) -> None:
    self._container = container
    self._lock = lock
    self._bindings = tuple(bindings)
    self._polling = polling
    self._shutdown_event = anyio.Event()
create async classmethod
create(
    container,
    lock,
    projections=None,
    polling=_DEFAULT_POLLING,
)
Source code in src/waku/eventsourcing/projection/runner.py
@classmethod
async def create(
    cls,
    container: AsyncContainer,
    lock: IProjectionLock,
    projections: Sequence[type[ICatchUpProjection]] | None = None,
    polling: PollingConfig = _DEFAULT_POLLING,
) -> CatchUpProjectionRunner:
    async with container() as scope:
        projection_registry = await scope.get(CatchUpProjectionRegistry)
    if projections is not None:
        projection_set = set(projections)
        bindings = [b for b in projection_registry if b.projection in projection_set]
    else:
        bindings = list(projection_registry)
    return cls(
        container=container,
        lock=lock,
        bindings=bindings,
        polling=polling,
    )
run async
run()
Source code in src/waku/eventsourcing/projection/runner.py
async def run(self) -> None:
    if not self._bindings:
        logger.warning('No catch-up projections registered, exiting')
        return

    async with anyio.create_task_group() as tg:
        tg.start_soon(self._signal_listener, tg.cancel_scope)
        tg.start_soon(self._run_all_projections, tg.cancel_scope)
rebuild async
rebuild(projection_name)
Source code in src/waku/eventsourcing/projection/runner.py
async def rebuild(self, projection_name: str) -> None:
    binding = self._find_binding(projection_name)

    async with self._lock.acquire(projection_name) as acquired:
        if not acquired:
            msg = f'Projection {projection_name!r} is locked by another instance'
            raise RuntimeError(msg)

        async with self._container() as scope:
            projection = await scope.get(binding.projection)
            await projection.teardown()

        processor = ProjectionProcessor(binding)

        async with self._container() as scope:
            checkpoint_store = await scope.get(ICheckpointStore)
            await processor.reset_checkpoint(checkpoint_store)

        while True:
            async with self._container() as scope:
                projection = await scope.get(binding.projection)
                reader = await scope.get(IEventReader)
                checkpoint_store = await scope.get(ICheckpointStore)
                processed = await processor.run_once(projection, reader, checkpoint_store)

            if processed == 0:
                break
request_shutdown
request_shutdown()
Source code in src/waku/eventsourcing/projection/runner.py
def request_shutdown(self) -> None:
    self._shutdown_event.set()

adaptive_interval

AdaptiveInterval
AdaptiveInterval(
    min_seconds,
    max_seconds,
    step_seconds,
    jitter_factor=0.1,
)

Fast when busy, slow when idle.

Source code in src/waku/eventsourcing/projection/adaptive_interval.py
def __init__(
    self,
    min_seconds: float,
    max_seconds: float,
    step_seconds: float,
    jitter_factor: float = 0.1,
) -> None:
    self._min = min_seconds
    self._max = max_seconds
    self._step = step_seconds
    self._jitter_factor = jitter_factor
    self._current = min_seconds
current property
current
current_with_jitter
current_with_jitter()
Source code in src/waku/eventsourcing/projection/adaptive_interval.py
def current_with_jitter(self) -> float:
    return self._current * random.uniform(1 - self._jitter_factor, 1 + self._jitter_factor)  # noqa: S311
on_work_done
on_work_done()
Source code in src/waku/eventsourcing/projection/adaptive_interval.py
def on_work_done(self) -> None:
    self._current = self._min
on_idle
on_idle()
Source code in src/waku/eventsourcing/projection/adaptive_interval.py
def on_idle(self) -> None:
    self._current = min(self._current + self._step, self._max)
calculate_backoff_with_jitter
calculate_backoff_with_jitter(
    attempt, base_delay_seconds, max_delay_seconds
)

Full jitter: random(0, min(base * 2^attempt, max_delay)).

Source code in src/waku/eventsourcing/projection/adaptive_interval.py
def calculate_backoff_with_jitter(
    attempt: int,
    base_delay_seconds: float,
    max_delay_seconds: float,
) -> float:
    """Full jitter: random(0, min(base * 2^attempt, max_delay))."""
    max_delay = min(base_delay_seconds * (2**attempt), max_delay_seconds)
    return random.uniform(0, max_delay)  # noqa: S311

binding

CatchUpProjectionBinding dataclass
CatchUpProjectionBinding(
    *,
    projection,
    error_policy=STOP,
    max_retry_attempts=0,
    base_retry_delay_seconds=10.0,
    max_retry_delay_seconds=300.0,
    batch_size=100,
    event_type_names=None,
    gap_detection_enabled=False,
    gap_timeout_seconds=10.0,
)
projection instance-attribute
projection
error_policy class-attribute instance-attribute
error_policy = STOP
max_retry_attempts class-attribute instance-attribute
max_retry_attempts = 0
base_retry_delay_seconds class-attribute instance-attribute
base_retry_delay_seconds = 10.0
max_retry_delay_seconds class-attribute instance-attribute
max_retry_delay_seconds = 300.0
batch_size class-attribute instance-attribute
batch_size = 100
event_type_names class-attribute instance-attribute
event_type_names = None
gap_detection_enabled class-attribute instance-attribute
gap_detection_enabled = False
gap_timeout_seconds class-attribute instance-attribute
gap_timeout_seconds = 10.0

checkpoint

Checkpoint dataclass
Checkpoint(*, projection_name, position, updated_at)
projection_name instance-attribute
projection_name
position instance-attribute
position
updated_at instance-attribute
updated_at

config

PollingConfig dataclass
PollingConfig(
    *,
    poll_interval_min_seconds=0.5,
    poll_interval_max_seconds=5.0,
    poll_interval_step_seconds=1.0,
    poll_interval_jitter_factor=0.1,
)
poll_interval_min_seconds class-attribute instance-attribute
poll_interval_min_seconds = 0.5
poll_interval_max_seconds class-attribute instance-attribute
poll_interval_max_seconds = 5.0
poll_interval_step_seconds class-attribute instance-attribute
poll_interval_step_seconds = 1.0
poll_interval_jitter_factor class-attribute instance-attribute
poll_interval_jitter_factor = 0.1
LeaseConfig dataclass
LeaseConfig(
    *, ttl_seconds=30.0, renew_interval_factor=1 / 3
)
ttl_seconds class-attribute instance-attribute
ttl_seconds = 30.0
renew_interval_factor class-attribute instance-attribute
renew_interval_factor = 1 / 3
renew_interval_seconds property
renew_interval_seconds

gap_detection

GapTracker
GapTracker(gap_timeout_seconds, clock=monotonic)
Source code in src/waku/eventsourcing/projection/gap_detection.py
def __init__(
    self,
    gap_timeout_seconds: float,
    clock: Callable[[], float] = time.monotonic,
) -> None:
    self._gap_timeout_seconds = gap_timeout_seconds
    self._clock = clock
    self._known_gaps: dict[int, float] = {}
known_gaps property
known_gaps
safe_position
safe_position(checkpoint, committed_positions)

Return the highest position the checkpoint can safely advance to.

PARAMETER DESCRIPTION
checkpoint

Current checkpoint position.

TYPE: int

committed_positions

Committed global positions in ascending order.

TYPE: Sequence[int]

Source code in src/waku/eventsourcing/projection/gap_detection.py
def safe_position(self, checkpoint: int, committed_positions: Sequence[int]) -> int:
    """Return the highest position the checkpoint can safely advance to.

    Args:
        checkpoint: Current checkpoint position.
        committed_positions: Committed global positions in ascending order.
    """
    if not committed_positions:
        return checkpoint

    max_position = committed_positions[-1]
    position_set = set(committed_positions)
    now = self._clock()
    safe = checkpoint

    pos = checkpoint + 1
    while pos <= max_position:
        if pos in position_set:
            safe = pos
            self._known_gaps.pop(pos, None)
        elif pos in self._known_gaps:
            if now - self._known_gaps[pos] >= self._gap_timeout_seconds:
                safe = pos
                del self._known_gaps[pos]
            else:
                break
        else:
            self._known_gaps[pos] = now
            break
        pos += 1

    if pos > max_position:
        return safe

    # Track remaining gaps so they start their timeout clocks
    for remaining_pos in range(pos + 1, max_position + 1):
        if remaining_pos not in position_set and remaining_pos not in self._known_gaps:
            self._known_gaps[remaining_pos] = now

    return safe

in_memory

InMemoryCheckpointStore
InMemoryCheckpointStore()

Bases: ICheckpointStore

Source code in src/waku/eventsourcing/projection/in_memory.py
def __init__(self) -> None:
    self._checkpoints: dict[str, Checkpoint] = {}
load async
load(projection_name)
Source code in src/waku/eventsourcing/projection/in_memory.py
async def load(self, projection_name: str, /) -> Checkpoint | None:
    return self._checkpoints.get(projection_name)
save async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/in_memory.py
async def save(self, checkpoint: Checkpoint, /) -> None:
    self._checkpoints[checkpoint.projection_name] = checkpoint

interfaces

IProjection

Bases: ABC

projection_name class-attribute
projection_name
project abstractmethod async
project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...
ErrorPolicy

Bases: StrEnum

SKIP class-attribute instance-attribute
SKIP = auto()
STOP class-attribute instance-attribute
STOP = auto()
ICatchUpProjection

Bases: IProjection, ABC

Projection that processes events asynchronously via polling.

At-least-once delivery: the checkpoint is saved after project() processes a batch, so a crash before checkpoint save causes re-delivery on restart. project() must be idempotent.

Set event_types to filter which event types this projection receives. When None (default), all events are delivered.

event_types class-attribute
event_types = None
projection_name class-attribute
projection_name
on_skip async
on_skip(events, error)
Source code in src/waku/eventsourcing/projection/interfaces.py
async def on_skip(self, events: Sequence[StoredEvent], error: Exception) -> None:
    pass
teardown async
teardown()
Source code in src/waku/eventsourcing/projection/interfaces.py
async def teardown(self) -> None:
    pass
project abstractmethod async
project(events)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def project(self, events: Sequence[StoredEvent], /) -> None: ...
ICheckpointStore

Bases: ABC

load abstractmethod async
load(projection_name)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def load(self, projection_name: str, /) -> Checkpoint | None: ...
save abstractmethod async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/interfaces.py
@abc.abstractmethod
async def save(self, checkpoint: Checkpoint, /) -> None: ...

lock

InMemoryProjectionLock
InMemoryProjectionLock()

Bases: IProjectionLock

Always acquires in single-process. Tracks held locks for testing.

Source code in src/waku/eventsourcing/projection/lock/in_memory.py
def __init__(self) -> None:
    self._held: set[str] = set()
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/in_memory.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    if projection_name in self._held:
        yield False
        return

    self._held.add(projection_name)
    try:
        yield True
    finally:
        self._held.discard(projection_name)
IProjectionLock

Bases: ABC

Lock abstraction ensuring only one catch-up projection instance runs at a time.

acquire abstractmethod async
acquire(projection_name)

Yields True if the lock was acquired, False if held by another instance.

Source code in src/waku/eventsourcing/projection/lock/interfaces.py
@abc.abstractmethod
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    """Yields True if the lock was acquired, False if held by another instance."""
    yield False  # pragma: no cover
in_memory
InMemoryProjectionLock
InMemoryProjectionLock()

Bases: IProjectionLock

Always acquires in single-process. Tracks held locks for testing.

Source code in src/waku/eventsourcing/projection/lock/in_memory.py
def __init__(self) -> None:
    self._held: set[str] = set()
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/in_memory.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    if projection_name in self._held:
        yield False
        return

    self._held.add(projection_name)
    try:
        yield True
    finally:
        self._held.discard(projection_name)
interfaces
IProjectionLock

Bases: ABC

Lock abstraction ensuring only one catch-up projection instance runs at a time.

acquire abstractmethod async
acquire(projection_name)

Yields True if the lock was acquired, False if held by another instance.

Source code in src/waku/eventsourcing/projection/lock/interfaces.py
@abc.abstractmethod
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    """Yields True if the lock was acquired, False if held by another instance."""
    yield False  # pragma: no cover
sqlalchemy
PostgresAdvisoryProjectionLock
PostgresAdvisoryProjectionLock(engine)

Bases: IProjectionLock

Session-level PostgreSQL advisory lock.

Holds a database connection for the entire duration of the lock because pg_advisory_lock is bound to the session — releasing the connection releases the lock. For long-running projections consider :class:PostgresLeaseProjectionLock which only connects during heartbeats.

Not compatible with PgBouncer in transaction-pooling mode.

Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/advisory.py
def __init__(self, engine: AsyncEngine) -> None:
    self._engine = engine
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/advisory.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    async with self._engine.connect() as conn:
        await conn.execution_options(isolation_level='AUTOCOMMIT')
        result = await conn.execute(_LOCK_SQL, {'name': projection_name})
        acquired = bool(result.scalar_one())

        if not acquired:
            yield False
            return

        logger.debug('Advisory lock acquired for %s', projection_name)
        try:
            yield True
        finally:
            try:
                await conn.execute(_UNLOCK_SQL, {'name': projection_name})
                logger.debug('Advisory lock released for %s', projection_name)
            except Exception:
                logger.warning('Failed to release advisory lock for %s', projection_name, exc_info=True)
PostgresLeaseProjectionLock
PostgresLeaseProjectionLock(engine, config)

Bases: IProjectionLock

Production lease-based projection lock backed by PostgreSQL.

Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/lock.py
def __init__(self, engine: AsyncEngine, config: LeaseConfig) -> None:
    self._engine = engine
    self._config = config
    self._holder_id = str(uuid.uuid4())
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/lock.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    async with self._engine.connect() as conn:
        await conn.execution_options(isolation_level='AUTOCOMMIT')
        result = await conn.execute(
            _UPSERT_SQL,
            {'name': projection_name, 'holder': self._holder_id, 'ttl': self._config.ttl_seconds},
        )
        row = result.fetchone()

    if row is None:
        yield False
        return

    logger.debug('Lease acquired for %s by %s', projection_name, self._holder_id)

    try:
        async with anyio.create_task_group() as tg:
            tg.start_soon(self._heartbeat, projection_name, tg.cancel_scope)
            try:
                yield True
            finally:
                tg.cancel_scope.cancel()
    finally:
        await self._release(projection_name)
bind_lease_tables
bind_lease_tables(metadata)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/tables.py
def bind_lease_tables(metadata: MetaData) -> Table:
    if es_projection_leases_table.name in metadata.tables:
        return metadata.tables[es_projection_leases_table.name]
    return es_projection_leases_table.to_metadata(metadata)
advisory
logger module-attribute
logger = getLogger(__name__)
PostgresAdvisoryProjectionLock
PostgresAdvisoryProjectionLock(engine)

Bases: IProjectionLock

Session-level PostgreSQL advisory lock.

Holds a database connection for the entire duration of the lock because pg_advisory_lock is bound to the session — releasing the connection releases the lock. For long-running projections consider :class:PostgresLeaseProjectionLock which only connects during heartbeats.

Not compatible with PgBouncer in transaction-pooling mode.

Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/advisory.py
def __init__(self, engine: AsyncEngine) -> None:
    self._engine = engine
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/advisory.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    async with self._engine.connect() as conn:
        await conn.execution_options(isolation_level='AUTOCOMMIT')
        result = await conn.execute(_LOCK_SQL, {'name': projection_name})
        acquired = bool(result.scalar_one())

        if not acquired:
            yield False
            return

        logger.debug('Advisory lock acquired for %s', projection_name)
        try:
            yield True
        finally:
            try:
                await conn.execute(_UNLOCK_SQL, {'name': projection_name})
                logger.debug('Advisory lock released for %s', projection_name)
            except Exception:
                logger.warning('Failed to release advisory lock for %s', projection_name, exc_info=True)
lock
logger module-attribute
logger = getLogger(__name__)
PostgresLeaseProjectionLock
PostgresLeaseProjectionLock(engine, config)

Bases: IProjectionLock

Production lease-based projection lock backed by PostgreSQL.

Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/lock.py
def __init__(self, engine: AsyncEngine, config: LeaseConfig) -> None:
    self._engine = engine
    self._config = config
    self._holder_id = str(uuid.uuid4())
acquire async
acquire(projection_name)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/lock.py
@contextlib.asynccontextmanager
async def acquire(self, projection_name: str) -> AsyncGenerator[bool]:
    async with self._engine.connect() as conn:
        await conn.execution_options(isolation_level='AUTOCOMMIT')
        result = await conn.execute(
            _UPSERT_SQL,
            {'name': projection_name, 'holder': self._holder_id, 'ttl': self._config.ttl_seconds},
        )
        row = result.fetchone()

    if row is None:
        yield False
        return

    logger.debug('Lease acquired for %s by %s', projection_name, self._holder_id)

    try:
        async with anyio.create_task_group() as tg:
            tg.start_soon(self._heartbeat, projection_name, tg.cancel_scope)
            try:
                yield True
            finally:
                tg.cancel_scope.cancel()
    finally:
        await self._release(projection_name)
tables
es_projection_leases_table module-attribute
es_projection_leases_table = Table(
    'es_projection_leases',
    _internal_metadata,
    Column('projection_name', Text, primary_key=True),
    Column('holder_id', Text, nullable=False),
    Column(
        'acquired_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
    ),
    Column(
        'renewed_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
    ),
    Column(
        'expires_at',
        TIMESTAMP(timezone=True),
        nullable=False,
    ),
)
bind_lease_tables
bind_lease_tables(metadata)
Source code in src/waku/eventsourcing/projection/lock/sqlalchemy/tables.py
def bind_lease_tables(metadata: MetaData) -> Table:
    if es_projection_leases_table.name in metadata.tables:
        return metadata.tables[es_projection_leases_table.name]
    return es_projection_leases_table.to_metadata(metadata)

processor

logger module-attribute
logger = getLogger(__name__)
ProjectionProcessor
ProjectionProcessor(binding)
Source code in src/waku/eventsourcing/projection/processor.py
def __init__(self, binding: CatchUpProjectionBinding) -> None:
    self._binding = binding
    self._attempts: int = 0
    self._gap_tracker: GapTracker | None = (
        GapTracker(binding.gap_timeout_seconds) if binding.gap_detection_enabled else None
    )
projection_name property
projection_name
run_once async
run_once(projection, event_reader, checkpoint_store)
Source code in src/waku/eventsourcing/projection/processor.py
async def run_once(
    self,
    projection: ICatchUpProjection,
    event_reader: IEventReader,
    checkpoint_store: ICheckpointStore,
) -> int:
    checkpoint = await checkpoint_store.load(self.projection_name)
    position = checkpoint.position if checkpoint is not None else -1

    events = await event_reader.read_all(
        after_position=position,
        count=self._binding.batch_size,
        event_types=self._binding.event_type_names,
    )
    if not events:
        return 0

    if self._gap_tracker is not None:
        events = await self._apply_gap_detection(events, event_reader, position)
        if not events:
            return 0

    try:
        await projection.project(events)
    except Exception as exc:  # noqa: BLE001
        return await self._handle_error(exc, events, projection, checkpoint_store)

    await checkpoint_store.save(
        Checkpoint(
            projection_name=self.projection_name,
            position=events[-1].global_position,
            updated_at=datetime.now(UTC),
        ),
    )
    self._attempts = 0
    return len(events)
reset_checkpoint async
reset_checkpoint(checkpoint_store)
Source code in src/waku/eventsourcing/projection/processor.py
async def reset_checkpoint(self, checkpoint_store: ICheckpointStore) -> None:
    await checkpoint_store.save(
        Checkpoint(
            projection_name=self.projection_name,
            position=-1,
            updated_at=datetime.now(UTC),
        ),
    )

registry

CatchUpProjectionRegistry
CatchUpProjectionRegistry(bindings)
Source code in src/waku/eventsourcing/projection/registry.py
def __init__(self, bindings: tuple[CatchUpProjectionBinding, ...]) -> None:
    self._bindings = bindings
    by_name: dict[str, CatchUpProjectionBinding] = {}
    for b in self._bindings:
        name = b.projection.projection_name
        if name in by_name:
            msg = f'Duplicate projection name {name!r}'
            raise ValueError(msg)
        by_name[name] = b
    self._by_name = by_name
get
get(projection_name)
Source code in src/waku/eventsourcing/projection/registry.py
def get(self, projection_name: str) -> CatchUpProjectionBinding:
    try:
        return self._by_name[projection_name]
    except KeyError:
        msg = f'Projection {projection_name!r} not found'
        raise ValueError(msg) from None

runner

logger module-attribute
logger = getLogger(__name__)
CatchUpProjectionRunner
CatchUpProjectionRunner(
    container, lock, bindings, polling=_DEFAULT_POLLING
)
Source code in src/waku/eventsourcing/projection/runner.py
def __init__(
    self,
    container: AsyncContainer,
    lock: IProjectionLock,
    bindings: Sequence[CatchUpProjectionBinding],
    polling: PollingConfig = _DEFAULT_POLLING,
) -> None:
    self._container = container
    self._lock = lock
    self._bindings = tuple(bindings)
    self._polling = polling
    self._shutdown_event = anyio.Event()
create async classmethod
create(
    container,
    lock,
    projections=None,
    polling=_DEFAULT_POLLING,
)
Source code in src/waku/eventsourcing/projection/runner.py
@classmethod
async def create(
    cls,
    container: AsyncContainer,
    lock: IProjectionLock,
    projections: Sequence[type[ICatchUpProjection]] | None = None,
    polling: PollingConfig = _DEFAULT_POLLING,
) -> CatchUpProjectionRunner:
    async with container() as scope:
        projection_registry = await scope.get(CatchUpProjectionRegistry)
    if projections is not None:
        projection_set = set(projections)
        bindings = [b for b in projection_registry if b.projection in projection_set]
    else:
        bindings = list(projection_registry)
    return cls(
        container=container,
        lock=lock,
        bindings=bindings,
        polling=polling,
    )
run async
run()
Source code in src/waku/eventsourcing/projection/runner.py
async def run(self) -> None:
    if not self._bindings:
        logger.warning('No catch-up projections registered, exiting')
        return

    async with anyio.create_task_group() as tg:
        tg.start_soon(self._signal_listener, tg.cancel_scope)
        tg.start_soon(self._run_all_projections, tg.cancel_scope)
rebuild async
rebuild(projection_name)
Source code in src/waku/eventsourcing/projection/runner.py
async def rebuild(self, projection_name: str) -> None:
    binding = self._find_binding(projection_name)

    async with self._lock.acquire(projection_name) as acquired:
        if not acquired:
            msg = f'Projection {projection_name!r} is locked by another instance'
            raise RuntimeError(msg)

        async with self._container() as scope:
            projection = await scope.get(binding.projection)
            await projection.teardown()

        processor = ProjectionProcessor(binding)

        async with self._container() as scope:
            checkpoint_store = await scope.get(ICheckpointStore)
            await processor.reset_checkpoint(checkpoint_store)

        while True:
            async with self._container() as scope:
                projection = await scope.get(binding.projection)
                reader = await scope.get(IEventReader)
                checkpoint_store = await scope.get(ICheckpointStore)
                processed = await processor.run_once(projection, reader, checkpoint_store)

            if processed == 0:
                break
request_shutdown
request_shutdown()
Source code in src/waku/eventsourcing/projection/runner.py
def request_shutdown(self) -> None:
    self._shutdown_event.set()

sqlalchemy

SqlAlchemyCheckpointStore
SqlAlchemyCheckpointStore(session, checkpoints_table)

Bases: ICheckpointStore

Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
def __init__(self, session: AsyncSession, checkpoints_table: Table) -> None:
    self._session = session
    self._checkpoints = checkpoints_table
load async
load(projection_name)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
async def load(self, projection_name: str, /) -> Checkpoint | None:
    query = select(self._checkpoints).where(self._checkpoints.c.projection_name == projection_name)
    result = await self._session.execute(query)
    row: Any = result.one_or_none()
    if row is None:
        return None
    return Checkpoint(
        projection_name=row.projection_name,
        position=row.position,
        updated_at=row.updated_at,
    )
save async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
async def save(self, checkpoint: Checkpoint, /) -> None:
    stmt = pg_insert(self._checkpoints).values(
        projection_name=checkpoint.projection_name,
        position=checkpoint.position,
        updated_at=checkpoint.updated_at,
    )
    stmt = stmt.on_conflict_do_update(
        index_elements=['projection_name'],
        set_={
            'position': stmt.excluded.position,
            'updated_at': stmt.excluded.updated_at,
        },
    )
    await self._session.execute(stmt)
    await self._session.flush()
make_sqlalchemy_checkpoint_store
make_sqlalchemy_checkpoint_store(checkpoints_table)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
def make_sqlalchemy_checkpoint_store(
    checkpoints_table: Table,
) -> Callable[..., SqlAlchemyCheckpointStore]:
    def factory(session: AsyncSession) -> SqlAlchemyCheckpointStore:
        return SqlAlchemyCheckpointStore(session, checkpoints_table)

    return factory
bind_checkpoint_tables
bind_checkpoint_tables(metadata)
Source code in src/waku/eventsourcing/projection/sqlalchemy/tables.py
def bind_checkpoint_tables(metadata: MetaData) -> Table:
    if es_checkpoints_table.name in metadata.tables:
        return metadata.tables[es_checkpoints_table.name]
    return es_checkpoints_table.to_metadata(metadata)
store
SqlAlchemyCheckpointStore
SqlAlchemyCheckpointStore(session, checkpoints_table)

Bases: ICheckpointStore

Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
def __init__(self, session: AsyncSession, checkpoints_table: Table) -> None:
    self._session = session
    self._checkpoints = checkpoints_table
load async
load(projection_name)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
async def load(self, projection_name: str, /) -> Checkpoint | None:
    query = select(self._checkpoints).where(self._checkpoints.c.projection_name == projection_name)
    result = await self._session.execute(query)
    row: Any = result.one_or_none()
    if row is None:
        return None
    return Checkpoint(
        projection_name=row.projection_name,
        position=row.position,
        updated_at=row.updated_at,
    )
save async
save(checkpoint)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
async def save(self, checkpoint: Checkpoint, /) -> None:
    stmt = pg_insert(self._checkpoints).values(
        projection_name=checkpoint.projection_name,
        position=checkpoint.position,
        updated_at=checkpoint.updated_at,
    )
    stmt = stmt.on_conflict_do_update(
        index_elements=['projection_name'],
        set_={
            'position': stmt.excluded.position,
            'updated_at': stmt.excluded.updated_at,
        },
    )
    await self._session.execute(stmt)
    await self._session.flush()
make_sqlalchemy_checkpoint_store
make_sqlalchemy_checkpoint_store(checkpoints_table)
Source code in src/waku/eventsourcing/projection/sqlalchemy/store.py
def make_sqlalchemy_checkpoint_store(
    checkpoints_table: Table,
) -> Callable[..., SqlAlchemyCheckpointStore]:
    def factory(session: AsyncSession) -> SqlAlchemyCheckpointStore:
        return SqlAlchemyCheckpointStore(session, checkpoints_table)

    return factory
tables
es_checkpoints_table module-attribute
es_checkpoints_table = Table(
    'es_checkpoints',
    _internal_metadata,
    Column('projection_name', Text, primary_key=True),
    Column('position', BigInteger, nullable=False),
    Column(
        'updated_at',
        TIMESTAMP(timezone=True),
        nullable=False,
    ),
    Column(
        'created_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
    ),
)
bind_checkpoint_tables
bind_checkpoint_tables(metadata)
Source code in src/waku/eventsourcing/projection/sqlalchemy/tables.py
def bind_checkpoint_tables(metadata: MetaData) -> Table:
    if es_checkpoints_table.name in metadata.tables:
        return metadata.tables[es_checkpoints_table.name]
    return es_checkpoints_table.to_metadata(metadata)

repository

logger module-attribute

logger = getLogger(__name__)

EventSourcedRepository

EventSourcedRepository(event_store)

Bases: ABC, Generic[AggregateT]

Source code in src/waku/eventsourcing/repository.py
def __init__(self, event_store: IEventStore) -> None:
    self._event_store = event_store
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/repository.py
async def load(self, aggregate_id: str) -> AggregateT:
    stream_id = self._stream_id(aggregate_id)
    stored_events = await read_aggregate_stream(
        self._event_store,
        stream_id,
        max_stream_length=self.max_stream_length,
    )
    if not stored_events:
        raise AggregateNotFoundError(
            aggregate_type=self.aggregate_name,
            aggregate_id=aggregate_id,
        )
    aggregate = self.create_aggregate()
    domain_events = [e.data for e in stored_events]
    version = stored_events[-1].position
    logger.debug('Loaded %d events for %s/%s', len(stored_events), self.aggregate_name, aggregate_id)
    aggregate.load_from_history(domain_events, version)
    return aggregate
save async
save(aggregate_id, aggregate, *, idempotency_key=None)
Source code in src/waku/eventsourcing/repository.py
async def save(
    self,
    aggregate_id: str,
    aggregate: AggregateT,
    *,
    idempotency_key: str | None = None,
) -> tuple[int, list[IEvent]]:
    stream_id = self._stream_id(aggregate_id)
    events = aggregate.collect_events()
    if not events:
        return aggregate.version, []

    envelopes = [
        EventEnvelope(
            domain_event=event,
            idempotency_key=f'{idempotency_key}:{i}' if idempotency_key else str(uuid.uuid4()),
        )
        for i, event in enumerate(events)
    ]
    expected = Exact(version=aggregate.version) if aggregate.version >= 0 else NoStream()
    new_version = await self._event_store.append_to_stream(stream_id, envelopes, expected_version=expected)
    aggregate.mark_persisted(new_version)
    logger.debug(
        'Saved %d events to %s/%s, version %d',
        len(events),
        self.aggregate_name,
        aggregate_id,
        new_version,
    )
    return new_version, events
create_aggregate
create_aggregate()
Source code in src/waku/eventsourcing/repository.py
def create_aggregate(self) -> AggregateT:
    aggregate_cls = self._resolve_aggregate_type()
    if aggregate_cls is None:
        msg = f'{type(self).__name__}: cannot auto-create aggregate, override create_aggregate()'
        raise TypeError(msg)
    return aggregate_cls()

serialization

default_retort module-attribute

default_retort = Retort(
    recipe=[
        loader(UUID, UUID),
        dumper(UUID, str),
        loader(StreamId, from_value),
        dumper(StreamId, str),
    ]
)

IEventSerializer

Bases: ABC

serialize abstractmethod
serialize(event)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def serialize(self, event: IEvent, /) -> dict[str, Any]: ...
deserialize abstractmethod
deserialize(data, event_type)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def deserialize(self, data: dict[str, Any], event_type: str, /) -> IEvent: ...

ISnapshotStateSerializer

Bases: ABC

serialize abstractmethod
serialize(state)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def serialize(self, state: object, /) -> dict[str, Any]: ...
deserialize abstractmethod
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT: ...

JsonEventSerializer

JsonEventSerializer(registry)

Bases: IEventSerializer

Source code in src/waku/eventsourcing/serialization/json.py
def __init__(self, registry: EventTypeRegistry) -> None:
    self._registry = registry
serialize
serialize(event)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def serialize(self, event: IEvent, /) -> dict[str, Any]:
    validate_dataclass_instance(event)
    return cast('dict[str, Any]', default_retort.dump(event, type(event)))
deserialize
deserialize(data, event_type)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def deserialize(self, data: dict[str, Any], event_type: str, /) -> IEvent:
    cls = self._registry.resolve(event_type)
    return default_retort.load(data, cls)

JsonSnapshotStateSerializer

Bases: ISnapshotStateSerializer

serialize
serialize(state)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def serialize(self, state: object, /) -> dict[str, Any]:
    validate_dataclass_instance(state)
    return cast('dict[str, Any]', default_retort.dump(state, type(state)))
deserialize
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT:
    return default_retort.load(data, state_type)

EventTypeRegistry

EventTypeRegistry()
Source code in src/waku/eventsourcing/serialization/registry.py
def __init__(self) -> None:
    self._name_to_type: dict[str, type[IEvent]] = {}
    self._type_to_name: dict[type[IEvent], str] = {}
    self._type_to_version: dict[type[IEvent], int] = {}
    self._frozen = False
is_frozen property
is_frozen
register
register(event_type, /, *, name=None, version=1)
Source code in src/waku/eventsourcing/serialization/registry.py
def register(self, event_type: type[IEvent], /, *, name: str | None = None, version: int = 1) -> None:
    if self._frozen:
        raise RegistryFrozenError
    type_name = name or event_type.__name__

    if event_type in self._type_to_name:
        existing_name = self._type_to_name[event_type]
        existing_version = self._type_to_version[event_type]
        if existing_name == type_name and existing_version == version:
            return
        raise ConflictingEventTypeError(event_type.__name__, existing_name, existing_version, type_name, version)

    if type_name in self._name_to_type:
        raise DuplicateEventTypeError(type_name)

    self._name_to_type[type_name] = event_type
    self._type_to_name[event_type] = type_name
    self._type_to_version[event_type] = version
add_alias
add_alias(event_type, alias)
Source code in src/waku/eventsourcing/serialization/registry.py
def add_alias(self, event_type: type[IEvent], alias: str, /) -> None:
    if self._frozen:
        raise RegistryFrozenError
    if event_type not in self._type_to_name:
        raise UnknownEventTypeError(event_type.__name__)
    if alias in self._name_to_type:
        if self._name_to_type[alias] is event_type:
            return
        raise DuplicateEventTypeError(alias)
    self._name_to_type[alias] = event_type
resolve
resolve(event_type_name)
Source code in src/waku/eventsourcing/serialization/registry.py
def resolve(self, event_type_name: str, /) -> type[IEvent]:
    try:
        return self._name_to_type[event_type_name]
    except KeyError:
        raise UnknownEventTypeError(event_type_name) from None
get_name
get_name(event_type)
Source code in src/waku/eventsourcing/serialization/registry.py
def get_name(self, event_type: type[IEvent], /) -> str:
    try:
        return self._type_to_name[event_type]
    except KeyError:
        raise UnknownEventTypeError(event_type.__name__) from None
get_version
get_version(event_type)
Source code in src/waku/eventsourcing/serialization/registry.py
def get_version(self, event_type: type[IEvent], /) -> int:
    try:
        return self._type_to_version[event_type]
    except KeyError:
        raise UnknownEventTypeError(event_type.__name__) from None
freeze
freeze()
Source code in src/waku/eventsourcing/serialization/registry.py
def freeze(self) -> None:
    self._frozen = True

interfaces

IEventSerializer

Bases: ABC

serialize abstractmethod
serialize(event)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def serialize(self, event: IEvent, /) -> dict[str, Any]: ...
deserialize abstractmethod
deserialize(data, event_type)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def deserialize(self, data: dict[str, Any], event_type: str, /) -> IEvent: ...
ISnapshotStateSerializer

Bases: ABC

serialize abstractmethod
serialize(state)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def serialize(self, state: object, /) -> dict[str, Any]: ...
deserialize abstractmethod
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT: ...

json

JsonEventSerializer
JsonEventSerializer(registry)

Bases: IEventSerializer

Source code in src/waku/eventsourcing/serialization/json.py
def __init__(self, registry: EventTypeRegistry) -> None:
    self._registry = registry
serialize
serialize(event)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def serialize(self, event: IEvent, /) -> dict[str, Any]:
    validate_dataclass_instance(event)
    return cast('dict[str, Any]', default_retort.dump(event, type(event)))
deserialize
deserialize(data, event_type)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def deserialize(self, data: dict[str, Any], event_type: str, /) -> IEvent:
    cls = self._registry.resolve(event_type)
    return default_retort.load(data, cls)
JsonSnapshotStateSerializer

Bases: ISnapshotStateSerializer

serialize
serialize(state)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def serialize(self, state: object, /) -> dict[str, Any]:
    validate_dataclass_instance(state)
    return cast('dict[str, Any]', default_retort.dump(state, type(state)))
deserialize
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT:
    return default_retort.load(data, state_type)

registry

EventTypeRegistry
EventTypeRegistry()
Source code in src/waku/eventsourcing/serialization/registry.py
def __init__(self) -> None:
    self._name_to_type: dict[str, type[IEvent]] = {}
    self._type_to_name: dict[type[IEvent], str] = {}
    self._type_to_version: dict[type[IEvent], int] = {}
    self._frozen = False
is_frozen property
is_frozen
register
register(event_type, /, *, name=None, version=1)
Source code in src/waku/eventsourcing/serialization/registry.py
def register(self, event_type: type[IEvent], /, *, name: str | None = None, version: int = 1) -> None:
    if self._frozen:
        raise RegistryFrozenError
    type_name = name or event_type.__name__

    if event_type in self._type_to_name:
        existing_name = self._type_to_name[event_type]
        existing_version = self._type_to_version[event_type]
        if existing_name == type_name and existing_version == version:
            return
        raise ConflictingEventTypeError(event_type.__name__, existing_name, existing_version, type_name, version)

    if type_name in self._name_to_type:
        raise DuplicateEventTypeError(type_name)

    self._name_to_type[type_name] = event_type
    self._type_to_name[event_type] = type_name
    self._type_to_version[event_type] = version
add_alias
add_alias(event_type, alias)
Source code in src/waku/eventsourcing/serialization/registry.py
def add_alias(self, event_type: type[IEvent], alias: str, /) -> None:
    if self._frozen:
        raise RegistryFrozenError
    if event_type not in self._type_to_name:
        raise UnknownEventTypeError(event_type.__name__)
    if alias in self._name_to_type:
        if self._name_to_type[alias] is event_type:
            return
        raise DuplicateEventTypeError(alias)
    self._name_to_type[alias] = event_type
resolve
resolve(event_type_name)
Source code in src/waku/eventsourcing/serialization/registry.py
def resolve(self, event_type_name: str, /) -> type[IEvent]:
    try:
        return self._name_to_type[event_type_name]
    except KeyError:
        raise UnknownEventTypeError(event_type_name) from None
get_name
get_name(event_type)
Source code in src/waku/eventsourcing/serialization/registry.py
def get_name(self, event_type: type[IEvent], /) -> str:
    try:
        return self._type_to_name[event_type]
    except KeyError:
        raise UnknownEventTypeError(event_type.__name__) from None
get_version
get_version(event_type)
Source code in src/waku/eventsourcing/serialization/registry.py
def get_version(self, event_type: type[IEvent], /) -> int:
    try:
        return self._type_to_version[event_type]
    except KeyError:
        raise UnknownEventTypeError(event_type.__name__) from None
freeze
freeze()
Source code in src/waku/eventsourcing/serialization/registry.py
def freeze(self) -> None:
    self._frozen = True

snapshot

ISnapshotStateSerializer

Bases: ABC

serialize abstractmethod
serialize(state)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def serialize(self, state: object, /) -> dict[str, Any]: ...
deserialize abstractmethod
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/interfaces.py
@abc.abstractmethod
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT: ...

JsonSnapshotStateSerializer

Bases: ISnapshotStateSerializer

serialize
serialize(state)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def serialize(self, state: object, /) -> dict[str, Any]:
    validate_dataclass_instance(state)
    return cast('dict[str, Any]', default_retort.dump(state, type(state)))
deserialize
deserialize(data, state_type)
Source code in src/waku/eventsourcing/serialization/json.py
@override
def deserialize(self, data: dict[str, Any], state_type: type[StateT], /) -> StateT:
    return default_retort.load(data, state_type)

InMemorySnapshotStore

InMemorySnapshotStore()

Bases: ISnapshotStore

Source code in src/waku/eventsourcing/snapshot/in_memory.py
def __init__(self) -> None:
    self._snapshots: dict[StreamId, Snapshot] = {}
load async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/in_memory.py
async def load(self, stream_id: StreamId, /) -> Snapshot | None:
    return self._snapshots.get(stream_id)
save async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/in_memory.py
async def save(self, snapshot: Snapshot, /) -> None:
    self._snapshots[snapshot.stream_id] = snapshot

ISnapshotStore

Bases: ABC

load abstractmethod async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
async def load(self, stream_id: StreamId, /) -> Snapshot | None: ...
save abstractmethod async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
async def save(self, snapshot: Snapshot, /) -> None: ...

ISnapshotStrategy

Bases: ABC

should_snapshot abstractmethod
should_snapshot(version, events_since_snapshot)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
def should_snapshot(self, version: int, events_since_snapshot: int) -> bool: ...

Snapshot dataclass

Snapshot(
    *,
    stream_id,
    state,
    version,
    state_type,
    schema_version=1,
)
stream_id instance-attribute
stream_id
state instance-attribute
state
version instance-attribute
version
state_type instance-attribute
state_type
schema_version class-attribute instance-attribute
schema_version = 1

ISnapshotMigration

Bases: ABC

from_version instance-attribute
from_version
to_version instance-attribute
to_version
migrate abstractmethod
migrate(state)
Source code in src/waku/eventsourcing/snapshot/migration.py
@abc.abstractmethod
def migrate(self, state: dict[str, Any], /) -> dict[str, Any]: ...

SnapshotMigrationChain

SnapshotMigrationChain(migrations)
Source code in src/waku/eventsourcing/snapshot/migration.py
def __init__(self, migrations: Sequence[ISnapshotMigration]) -> None:
    sorted_migrations = sorted(migrations, key=lambda m: m.from_version)
    seen: set[int] = set()
    prev_to: int | None = None
    for m in sorted_migrations:
        if m.from_version < 1:
            msg = f'Invalid from_version {m.from_version}: must be >= 1'
            raise SnapshotMigrationChainError(msg)
        if m.to_version <= m.from_version:
            msg = f'Invalid migration: to_version {m.to_version} must be > from_version {m.from_version}'
            raise SnapshotMigrationChainError(msg)
        if m.from_version in seen:
            msg = f'Duplicate snapshot migration at from_version {m.from_version}'
            raise SnapshotMigrationChainError(msg)
        if prev_to is not None and m.from_version != prev_to:
            msg = (
                f'Gap in snapshot migration chain: '
                f'migration to version {prev_to} is not followed by migration from version {prev_to} '
                f'(found from_version {m.from_version})'
            )
            raise SnapshotMigrationChainError(msg)
        seen.add(m.from_version)
        prev_to = m.to_version
    self._migrations = tuple(sorted_migrations)
migrations property
migrations
migrate
migrate(state, from_version)
Source code in src/waku/eventsourcing/snapshot/migration.py
def migrate(self, state: dict[str, Any], from_version: int) -> tuple[dict[str, Any], int]:
    current = from_version
    for m in self._migrations:
        if m.from_version == current:
            state = m.migrate(state)
            current = m.to_version
    return state, current

SnapshotConfig dataclass

SnapshotConfig(
    *,
    strategy,
    schema_version=1,
    migration_chain=_EMPTY_CHAIN,
)
strategy instance-attribute
strategy
schema_version class-attribute instance-attribute
schema_version = 1
migration_chain class-attribute instance-attribute
migration_chain = field(default=_EMPTY_CHAIN)

SnapshotConfigRegistry

SnapshotConfigRegistry(configs)
Source code in src/waku/eventsourcing/snapshot/registry.py
def __init__(self, configs: Mapping[str, SnapshotConfig]) -> None:
    self._configs = dict(configs)
get
get(aggregate_name)
Source code in src/waku/eventsourcing/snapshot/registry.py
def get(self, aggregate_name: str) -> SnapshotConfig:
    config = self._configs.get(aggregate_name)
    if config is None:
        raise SnapshotConfigNotFoundError(aggregate_name)
    return config

SnapshotEventSourcedRepository

SnapshotEventSourcedRepository(
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: EventSourcedRepository[AggregateT], ABC, Generic[AggregateT]

Source code in src/waku/eventsourcing/snapshot/repository.py
def __init__(
    self,
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(event_store)
    self._state_serializer = state_serializer
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self.aggregate_name,
    )
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
snapshot_state_type class-attribute
snapshot_state_type = None
create_aggregate
create_aggregate()
Source code in src/waku/eventsourcing/repository.py
def create_aggregate(self) -> AggregateT:
    aggregate_cls = self._resolve_aggregate_type()
    if aggregate_cls is None:
        msg = f'{type(self).__name__}: cannot auto-create aggregate, override create_aggregate()'
        raise TypeError(msg)
    return aggregate_cls()
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def load(self, aggregate_id: str) -> AggregateT:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        aggregate = self._restore_from_snapshot(snapshot)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        domain_events = [e.data for e in stored_events]
        version = stored_events[-1].position if stored_events else snapshot.version
        if domain_events:
            aggregate.load_from_history(domain_events, version)
        else:
            aggregate.mark_persisted(version)
        return aggregate

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)
save async
save(aggregate_id, aggregate, *, idempotency_key=None)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def save(
    self,
    aggregate_id: str,
    aggregate: AggregateT,
    *,
    idempotency_key: str | None = None,
) -> tuple[int, list[IEvent]]:
    new_version, events = await super().save(aggregate_id, aggregate, idempotency_key=idempotency_key)

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        stream_id = self._stream_id(aggregate_id)
        state_obj = self._snapshot_state(aggregate)
        state_data = self._state_serializer.serialize(state_obj)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version, events

EventCountStrategy

EventCountStrategy(threshold=100)

Bases: ISnapshotStrategy

Source code in src/waku/eventsourcing/snapshot/strategy.py
def __init__(self, threshold: int = 100) -> None:
    if threshold < 1:
        msg = f'Threshold must be at least 1, got {threshold}'
        raise ValueError(msg)
    self._threshold = threshold
should_snapshot
should_snapshot(version, events_since_snapshot)
Source code in src/waku/eventsourcing/snapshot/strategy.py
def should_snapshot(self, version: int, events_since_snapshot: int) -> bool:  # noqa: ARG002
    return events_since_snapshot >= self._threshold

in_memory

InMemorySnapshotStore
InMemorySnapshotStore()

Bases: ISnapshotStore

Source code in src/waku/eventsourcing/snapshot/in_memory.py
def __init__(self) -> None:
    self._snapshots: dict[StreamId, Snapshot] = {}
load async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/in_memory.py
async def load(self, stream_id: StreamId, /) -> Snapshot | None:
    return self._snapshots.get(stream_id)
save async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/in_memory.py
async def save(self, snapshot: Snapshot, /) -> None:
    self._snapshots[snapshot.stream_id] = snapshot

interfaces

Snapshot dataclass
Snapshot(
    *,
    stream_id,
    state,
    version,
    state_type,
    schema_version=1,
)
stream_id instance-attribute
stream_id
state instance-attribute
state
version instance-attribute
version
state_type instance-attribute
state_type
schema_version class-attribute instance-attribute
schema_version = 1
ISnapshotStore

Bases: ABC

load abstractmethod async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
async def load(self, stream_id: StreamId, /) -> Snapshot | None: ...
save abstractmethod async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
async def save(self, snapshot: Snapshot, /) -> None: ...
ISnapshotStrategy

Bases: ABC

should_snapshot abstractmethod
should_snapshot(version, events_since_snapshot)
Source code in src/waku/eventsourcing/snapshot/interfaces.py
@abc.abstractmethod
def should_snapshot(self, version: int, events_since_snapshot: int) -> bool: ...

manager

logger module-attribute
logger = getLogger(__name__)
SnapshotManager
SnapshotManager(store, config, state_type_name)
Source code in src/waku/eventsourcing/snapshot/manager.py
def __init__(
    self,
    store: ISnapshotStore,
    config: SnapshotConfig,
    state_type_name: str,
) -> None:
    self._store = store
    self._config = config
    self._state_type_name = state_type_name
    self._last_snapshot_versions: dict[str, int] = {}
load_snapshot async
load_snapshot(stream_id, aggregate_id)
Source code in src/waku/eventsourcing/snapshot/manager.py
async def load_snapshot(self, stream_id: StreamId, aggregate_id: str) -> Snapshot | None:
    snapshot = await self._store.load(stream_id)

    if snapshot is None:
        self._last_snapshot_versions[aggregate_id] = -1
        return None

    if snapshot.state_type != self._state_type_name:
        raise SnapshotTypeMismatchError(stream_id, self._state_type_name, snapshot.state_type)

    if snapshot.schema_version != self._config.schema_version:
        snapshot = migrate_snapshot_or_discard(
            self._config.migration_chain,
            snapshot,
            self._config.schema_version,
            stream_id,
        )
        if snapshot is None:
            self._last_snapshot_versions[aggregate_id] = -1
            return None

    self._last_snapshot_versions[aggregate_id] = snapshot.version
    return snapshot
should_save
should_save(aggregate_id, new_version)
Source code in src/waku/eventsourcing/snapshot/manager.py
def should_save(self, aggregate_id: str, new_version: int) -> bool:
    last_snapshot_version = self._last_snapshot_versions.get(aggregate_id, -1)
    events_since_snapshot = new_version - last_snapshot_version
    return self._config.strategy.should_snapshot(new_version, events_since_snapshot)
save_snapshot async
save_snapshot(stream_id, aggregate_id, state_data, version)
Source code in src/waku/eventsourcing/snapshot/manager.py
async def save_snapshot(
    self,
    stream_id: StreamId,
    aggregate_id: str,
    state_data: dict[str, Any],
    version: int,
) -> None:
    snapshot = Snapshot(
        stream_id=stream_id,
        state=state_data,
        version=version,
        state_type=self._state_type_name,
        schema_version=self._config.schema_version,
    )
    try:
        await self._store.save(snapshot)
    except Exception:
        logger.warning(
            'Failed to save snapshot for stream %s at version %d, continuing without snapshot',
            stream_id,
            version,
            exc_info=True,
        )
        return
    self._last_snapshot_versions[aggregate_id] = version

migration

logger module-attribute
logger = getLogger(__name__)
ISnapshotMigration

Bases: ABC

from_version instance-attribute
from_version
to_version instance-attribute
to_version
migrate abstractmethod
migrate(state)
Source code in src/waku/eventsourcing/snapshot/migration.py
@abc.abstractmethod
def migrate(self, state: dict[str, Any], /) -> dict[str, Any]: ...
SnapshotMigrationChain
SnapshotMigrationChain(migrations)
Source code in src/waku/eventsourcing/snapshot/migration.py
def __init__(self, migrations: Sequence[ISnapshotMigration]) -> None:
    sorted_migrations = sorted(migrations, key=lambda m: m.from_version)
    seen: set[int] = set()
    prev_to: int | None = None
    for m in sorted_migrations:
        if m.from_version < 1:
            msg = f'Invalid from_version {m.from_version}: must be >= 1'
            raise SnapshotMigrationChainError(msg)
        if m.to_version <= m.from_version:
            msg = f'Invalid migration: to_version {m.to_version} must be > from_version {m.from_version}'
            raise SnapshotMigrationChainError(msg)
        if m.from_version in seen:
            msg = f'Duplicate snapshot migration at from_version {m.from_version}'
            raise SnapshotMigrationChainError(msg)
        if prev_to is not None and m.from_version != prev_to:
            msg = (
                f'Gap in snapshot migration chain: '
                f'migration to version {prev_to} is not followed by migration from version {prev_to} '
                f'(found from_version {m.from_version})'
            )
            raise SnapshotMigrationChainError(msg)
        seen.add(m.from_version)
        prev_to = m.to_version
    self._migrations = tuple(sorted_migrations)
migrations property
migrations
migrate
migrate(state, from_version)
Source code in src/waku/eventsourcing/snapshot/migration.py
def migrate(self, state: dict[str, Any], from_version: int) -> tuple[dict[str, Any], int]:
    current = from_version
    for m in self._migrations:
        if m.from_version == current:
            state = m.migrate(state)
            current = m.to_version
    return state, current
migrate_snapshot_or_discard
migrate_snapshot_or_discard(
    chain, snapshot, target_version, stream_id
)
Source code in src/waku/eventsourcing/snapshot/migration.py
def migrate_snapshot_or_discard(
    chain: SnapshotMigrationChain,
    snapshot: Snapshot,
    target_version: int,
    stream_id: StreamId,
) -> Snapshot | None:
    migrated_state, reached = chain.migrate(snapshot.state, snapshot.schema_version)
    if reached != target_version:
        logger.warning(
            'Snapshot schema version %d does not match expected %d for stream %s. '
            'No complete migration path. Discarding snapshot and replaying from events.',
            snapshot.schema_version,
            target_version,
            stream_id,
        )
        return None
    return Snapshot(
        stream_id=snapshot.stream_id,
        state=migrated_state,
        version=snapshot.version,
        state_type=snapshot.state_type,
        schema_version=reached,
    )

registry

SnapshotConfig dataclass
SnapshotConfig(
    *,
    strategy,
    schema_version=1,
    migration_chain=_EMPTY_CHAIN,
)
strategy instance-attribute
strategy
schema_version class-attribute instance-attribute
schema_version = 1
migration_chain class-attribute instance-attribute
migration_chain = field(default=_EMPTY_CHAIN)
SnapshotConfigRegistry
SnapshotConfigRegistry(configs)
Source code in src/waku/eventsourcing/snapshot/registry.py
def __init__(self, configs: Mapping[str, SnapshotConfig]) -> None:
    self._configs = dict(configs)
get
get(aggregate_name)
Source code in src/waku/eventsourcing/snapshot/registry.py
def get(self, aggregate_name: str) -> SnapshotConfig:
    config = self._configs.get(aggregate_name)
    if config is None:
        raise SnapshotConfigNotFoundError(aggregate_name)
    return config

repository

logger module-attribute
logger = getLogger(__name__)
SnapshotEventSourcedRepository
SnapshotEventSourcedRepository(
    event_store,
    snapshot_store,
    snapshot_config_registry,
    state_serializer,
)

Bases: EventSourcedRepository[AggregateT], ABC, Generic[AggregateT]

Source code in src/waku/eventsourcing/snapshot/repository.py
def __init__(
    self,
    event_store: IEventStore,
    snapshot_store: ISnapshotStore,
    snapshot_config_registry: SnapshotConfigRegistry,
    state_serializer: ISnapshotStateSerializer,
) -> None:
    super().__init__(event_store)
    self._state_serializer = state_serializer
    config = snapshot_config_registry.get(self.aggregate_name)
    self._snapshot_manager = SnapshotManager(
        store=snapshot_store,
        config=config,
        state_type_name=self.snapshot_state_type or self.aggregate_name,
    )
snapshot_state_type class-attribute
snapshot_state_type = None
aggregate_name class-attribute
aggregate_name
max_stream_length class-attribute
max_stream_length = None
load async
load(aggregate_id)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def load(self, aggregate_id: str) -> AggregateT:
    stream_id = self._stream_id(aggregate_id)
    snapshot = await self._snapshot_manager.load_snapshot(stream_id, aggregate_id)

    if snapshot is not None:
        logger.debug('Loaded snapshot for %s/%s at version %d', self.aggregate_name, aggregate_id, snapshot.version)
        aggregate = self._restore_from_snapshot(snapshot)
        stored_events = await read_aggregate_stream(
            self._event_store,
            stream_id,
            start=snapshot.version + 1,
            max_stream_length=self.max_stream_length,
        )
        domain_events = [e.data for e in stored_events]
        version = stored_events[-1].position if stored_events else snapshot.version
        if domain_events:
            aggregate.load_from_history(domain_events, version)
        else:
            aggregate.mark_persisted(version)
        return aggregate

    logger.debug('No snapshot for %s/%s, loading from events', self.aggregate_name, aggregate_id)
    return await super().load(aggregate_id)
save async
save(aggregate_id, aggregate, *, idempotency_key=None)
Source code in src/waku/eventsourcing/snapshot/repository.py
async def save(
    self,
    aggregate_id: str,
    aggregate: AggregateT,
    *,
    idempotency_key: str | None = None,
) -> tuple[int, list[IEvent]]:
    new_version, events = await super().save(aggregate_id, aggregate, idempotency_key=idempotency_key)

    if events and self._snapshot_manager.should_save(aggregate_id, new_version):
        stream_id = self._stream_id(aggregate_id)
        state_obj = self._snapshot_state(aggregate)
        state_data = self._state_serializer.serialize(state_obj)
        await self._snapshot_manager.save_snapshot(stream_id, aggregate_id, state_data, new_version)

    return new_version, events
create_aggregate
create_aggregate()
Source code in src/waku/eventsourcing/repository.py
def create_aggregate(self) -> AggregateT:
    aggregate_cls = self._resolve_aggregate_type()
    if aggregate_cls is None:
        msg = f'{type(self).__name__}: cannot auto-create aggregate, override create_aggregate()'
        raise TypeError(msg)
    return aggregate_cls()

sqlalchemy

SqlAlchemySnapshotStore
SqlAlchemySnapshotStore(session, snapshots_table)

Bases: ISnapshotStore

Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
def __init__(self, session: AsyncSession, snapshots_table: Table) -> None:
    self._session = session
    self._snapshots = snapshots_table
load async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
async def load(self, stream_id: StreamId, /) -> Snapshot | None:
    key = str(stream_id)
    query = select(self._snapshots).where(self._snapshots.c.stream_id == key)
    result = await self._session.execute(query)
    row: Any = result.one_or_none()
    if row is None:
        return None
    return Snapshot(
        stream_id=StreamId.from_value(row.stream_id),
        state=row.state,
        version=row.version,
        state_type=row.state_type,
        schema_version=row.schema_version,
    )
save async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
async def save(self, snapshot: Snapshot, /) -> None:
    stmt = pg_insert(self._snapshots).values(
        stream_id=str(snapshot.stream_id),
        state=snapshot.state,
        version=snapshot.version,
        state_type=snapshot.state_type,
        schema_version=snapshot.schema_version,
    )
    stmt = stmt.on_conflict_do_update(
        index_elements=['stream_id'],
        set_={
            'state': stmt.excluded.state,
            'version': stmt.excluded.version,
            'state_type': stmt.excluded.state_type,
            'schema_version': stmt.excluded.schema_version,
            'updated_at': sa_func.now(),
        },
    )
    await self._session.execute(stmt)
    await self._session.flush()
make_sqlalchemy_snapshot_store
make_sqlalchemy_snapshot_store(snapshots_table)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
def make_sqlalchemy_snapshot_store(
    snapshots_table: Table,
) -> Callable[..., SqlAlchemySnapshotStore]:
    def factory(session: AsyncSession) -> SqlAlchemySnapshotStore:
        return SqlAlchemySnapshotStore(session, snapshots_table)

    return factory
bind_snapshot_tables
bind_snapshot_tables(metadata)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/tables.py
def bind_snapshot_tables(metadata: MetaData) -> Table:
    if es_snapshots_table.name in metadata.tables:
        return metadata.tables[es_snapshots_table.name]
    return es_snapshots_table.to_metadata(metadata)
store
SqlAlchemySnapshotStore
SqlAlchemySnapshotStore(session, snapshots_table)

Bases: ISnapshotStore

Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
def __init__(self, session: AsyncSession, snapshots_table: Table) -> None:
    self._session = session
    self._snapshots = snapshots_table
load async
load(stream_id)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
async def load(self, stream_id: StreamId, /) -> Snapshot | None:
    key = str(stream_id)
    query = select(self._snapshots).where(self._snapshots.c.stream_id == key)
    result = await self._session.execute(query)
    row: Any = result.one_or_none()
    if row is None:
        return None
    return Snapshot(
        stream_id=StreamId.from_value(row.stream_id),
        state=row.state,
        version=row.version,
        state_type=row.state_type,
        schema_version=row.schema_version,
    )
save async
save(snapshot)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
async def save(self, snapshot: Snapshot, /) -> None:
    stmt = pg_insert(self._snapshots).values(
        stream_id=str(snapshot.stream_id),
        state=snapshot.state,
        version=snapshot.version,
        state_type=snapshot.state_type,
        schema_version=snapshot.schema_version,
    )
    stmt = stmt.on_conflict_do_update(
        index_elements=['stream_id'],
        set_={
            'state': stmt.excluded.state,
            'version': stmt.excluded.version,
            'state_type': stmt.excluded.state_type,
            'schema_version': stmt.excluded.schema_version,
            'updated_at': sa_func.now(),
        },
    )
    await self._session.execute(stmt)
    await self._session.flush()
make_sqlalchemy_snapshot_store
make_sqlalchemy_snapshot_store(snapshots_table)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/store.py
def make_sqlalchemy_snapshot_store(
    snapshots_table: Table,
) -> Callable[..., SqlAlchemySnapshotStore]:
    def factory(session: AsyncSession) -> SqlAlchemySnapshotStore:
        return SqlAlchemySnapshotStore(session, snapshots_table)

    return factory
tables
es_snapshots_table module-attribute
es_snapshots_table = Table(
    'es_snapshots',
    _internal_metadata,
    Column('stream_id', Text, primary_key=True),
    Column('state', JSONB, nullable=False),
    Column('version', Integer, nullable=False),
    Column('state_type', Text, nullable=False),
    Column(
        'schema_version',
        Integer,
        nullable=False,
        server_default='1',
    ),
    Column(
        'created_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
    ),
    Column(
        'updated_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
        onupdate=now(),
    ),
)
bind_snapshot_tables
bind_snapshot_tables(metadata)
Source code in src/waku/eventsourcing/snapshot/sqlalchemy/tables.py
def bind_snapshot_tables(metadata: MetaData) -> Table:
    if es_snapshots_table.name in metadata.tables:
        return metadata.tables[es_snapshots_table.name]
    return es_snapshots_table.to_metadata(metadata)

strategy

EventCountStrategy
EventCountStrategy(threshold=100)

Bases: ISnapshotStrategy

Source code in src/waku/eventsourcing/snapshot/strategy.py
def __init__(self, threshold: int = 100) -> None:
    if threshold < 1:
        msg = f'Threshold must be at least 1, got {threshold}'
        raise ValueError(msg)
    self._threshold = threshold
should_snapshot
should_snapshot(version, events_since_snapshot)
Source code in src/waku/eventsourcing/snapshot/strategy.py
def should_snapshot(self, version: int, events_since_snapshot: int) -> bool:  # noqa: ARG002
    return events_since_snapshot >= self._threshold

store

InMemoryEventStore

InMemoryEventStore(registry, projections=(), enrichers=())

Bases: IEventStore

Source code in src/waku/eventsourcing/store/in_memory.py
def __init__(
    self,
    registry: EventTypeRegistry,
    projections: Sequence[IProjection] = (),
    enrichers: Sequence[IMetadataEnricher] = (),
) -> None:
    self._registry = registry
    self._streams: dict[str, list[StoredEvent]] = {}
    self._idempotency_keys: dict[str, set[str]] = {}
    self._deleted_streams: set[str] = set()
    self._global_position: int = 0
    self._lock = anyio.Lock()
    self._projections = projections
    self._enrichers = enrichers
read_stream async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]:
    async with self._lock:
        key = str(stream_id)
        if key not in self._streams:
            raise StreamNotFoundError(stream_id)
        events = self._streams[key]
        match start:
            case StreamPosition.START:
                offset = 0
            case StreamPosition.END:
                offset = max(len(events) - 1, 0)
            case int() as offset:
                pass
            case _:  # pragma: no cover
                assert_never(start)
        subset = events[offset:]
        if count is not None:
            subset = subset[:count]
        return list(subset)
delete_stream async
delete_stream(stream_id)
Source code in src/waku/eventsourcing/store/in_memory.py
async def delete_stream(self, stream_id: StreamId, /) -> None:
    async with self._lock:
        key = str(stream_id)
        if key not in self._streams:
            raise StreamNotFoundError(stream_id)
        self._deleted_streams.add(key)
read_all async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]:
    async with self._lock:
        all_events: list[StoredEvent] = []
        for key, stream_events in self._streams.items():
            if key not in self._deleted_streams:
                all_events.extend(stream_events)
        all_events.sort(key=lambda e: e.global_position)

        type_set = frozenset(event_types) if event_types else None
        filtered = [
            e
            for e in all_events
            if e.global_position > after_position and (type_set is None or e.event_type in type_set)
        ]
        if count is not None:
            filtered = filtered[:count]
        return filtered
stream_exists async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/in_memory.py
async def stream_exists(self, stream_id: StreamId, /) -> bool:
    async with self._lock:
        key = str(stream_id)
        return key in self._streams and key not in self._deleted_streams
global_head_position async
global_head_position()
Source code in src/waku/eventsourcing/store/in_memory.py
async def global_head_position(self) -> int:
    async with self._lock:
        return self._global_position - 1
read_positions async
read_positions(*, after_position, up_to_position)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    async with self._lock:
        positions: list[int] = []
        for key, stream_events in self._streams.items():
            if key in self._deleted_streams:
                continue
            positions.extend(
                event.global_position
                for event in stream_events
                if after_position < event.global_position <= up_to_position
            )
        positions.sort()
        return positions
append_to_stream async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/in_memory.py
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int:
    async with self._lock:
        key = str(stream_id)
        if key in self._deleted_streams:
            raise StreamDeletedError(stream_id)
        stream = self._streams.get(key)
        current_version = len(stream) - 1 if stream is not None else -1

        if not events:
            check_expected_version(stream_id, expected_version, current_version, exists=stream is not None)
            return current_version

        dedup_version = self._check_idempotency(stream_id, events, current_version)
        if dedup_version is not None:
            return dedup_version

        check_expected_version(stream_id, expected_version, current_version, exists=stream is not None)

        if stream is None:
            stream = []
            self._streams[key] = stream
            is_new_stream = True
        else:
            is_new_stream = False

        stored_events: list[StoredEvent] = []
        base_global_position = self._global_position
        for envelope in events:
            position = len(stream)
            stored = StoredEvent(
                event_id=uuid.uuid4(),
                stream_id=stream_id,
                event_type=self._registry.get_name(
                    type(envelope.domain_event)  # pyrefly: ignore[bad-argument-type]
                ),
                position=position,
                global_position=self._global_position,
                timestamp=datetime.now(UTC),
                data=envelope.domain_event,
                metadata=enrich_metadata(envelope.metadata, self._enrichers),
                idempotency_key=envelope.idempotency_key,
                schema_version=self._registry.get_version(
                    type(envelope.domain_event)  # pyrefly: ignore[bad-argument-type]
                ),
            )
            stream.append(stored)
            stored_events.append(stored)
            self._global_position += 1

        stream_keys = self._idempotency_keys.setdefault(key, set())
        for envelope in events:
            stream_keys.add(envelope.idempotency_key)

        try:
            for projection in self._projections:
                await projection.project(stored_events)
        except Exception:
            self._rollback_append(key, stream, events, base_global_position, is_new_stream=is_new_stream)
            raise

        return len(stream) - 1

IEventReader

Bases: ABC

read_stream abstractmethod async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]: ...
read_all abstractmethod async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]: ...
stream_exists abstractmethod async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def stream_exists(self, stream_id: StreamId, /) -> bool: ...
global_head_position abstractmethod async
global_head_position()

Return the highest global position in the store, or -1 if empty.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def global_head_position(self) -> int:
    """Return the highest global position in the store, or ``-1`` if empty."""
    ...
read_positions abstractmethod async
read_positions(*, after_position, up_to_position)

Return committed global positions in the range (after_position, up_to_position].

Positions are returned in ascending order.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    """Return committed global positions in the range ``(after_position, up_to_position]``.

    Positions are returned in ascending order.
    """
    ...

IEventStore

Bases: IEventReader, IEventWriter, ABC

append_to_stream abstractmethod async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int: ...
delete_stream abstractmethod async
delete_stream(stream_id)

Mark a stream as permanently deleted.

Deleted streams are excluded from read_all, read_positions, and stream_exists. Appending to a deleted stream raises StreamDeletedError. Events remain accessible via read_stream for audit purposes.

Raises StreamNotFoundError if the stream does not exist. No-op if already deleted.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def delete_stream(self, stream_id: StreamId, /) -> None:
    """Mark a stream as permanently deleted.

    Deleted streams are excluded from ``read_all``, ``read_positions``,
    and ``stream_exists``. Appending to a deleted stream raises
    ``StreamDeletedError``. Events remain accessible via ``read_stream``
    for audit purposes.

    Raises ``StreamNotFoundError`` if the stream does not exist.
    No-op if already deleted.
    """
    ...
read_stream abstractmethod async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]: ...
read_all abstractmethod async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]: ...
stream_exists abstractmethod async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def stream_exists(self, stream_id: StreamId, /) -> bool: ...
global_head_position abstractmethod async
global_head_position()

Return the highest global position in the store, or -1 if empty.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def global_head_position(self) -> int:
    """Return the highest global position in the store, or ``-1`` if empty."""
    ...
read_positions abstractmethod async
read_positions(*, after_position, up_to_position)

Return committed global positions in the range (after_position, up_to_position].

Positions are returned in ascending order.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    """Return committed global positions in the range ``(after_position, up_to_position]``.

    Positions are returned in ascending order.
    """
    ...

IEventWriter

Bases: ABC

append_to_stream abstractmethod async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int: ...
delete_stream abstractmethod async
delete_stream(stream_id)

Mark a stream as permanently deleted.

Deleted streams are excluded from read_all, read_positions, and stream_exists. Appending to a deleted stream raises StreamDeletedError. Events remain accessible via read_stream for audit purposes.

Raises StreamNotFoundError if the stream does not exist. No-op if already deleted.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def delete_stream(self, stream_id: StreamId, /) -> None:
    """Mark a stream as permanently deleted.

    Deleted streams are excluded from ``read_all``, ``read_positions``,
    and ``stream_exists``. Appending to a deleted stream raises
    ``StreamDeletedError``. Events remain accessible via ``read_stream``
    for audit purposes.

    Raises ``StreamNotFoundError`` if the stream does not exist.
    No-op if already deleted.
    """
    ...

in_memory

logger module-attribute
logger = getLogger(__name__)
InMemoryEventStore
InMemoryEventStore(registry, projections=(), enrichers=())

Bases: IEventStore

Source code in src/waku/eventsourcing/store/in_memory.py
def __init__(
    self,
    registry: EventTypeRegistry,
    projections: Sequence[IProjection] = (),
    enrichers: Sequence[IMetadataEnricher] = (),
) -> None:
    self._registry = registry
    self._streams: dict[str, list[StoredEvent]] = {}
    self._idempotency_keys: dict[str, set[str]] = {}
    self._deleted_streams: set[str] = set()
    self._global_position: int = 0
    self._lock = anyio.Lock()
    self._projections = projections
    self._enrichers = enrichers
read_stream async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]:
    async with self._lock:
        key = str(stream_id)
        if key not in self._streams:
            raise StreamNotFoundError(stream_id)
        events = self._streams[key]
        match start:
            case StreamPosition.START:
                offset = 0
            case StreamPosition.END:
                offset = max(len(events) - 1, 0)
            case int() as offset:
                pass
            case _:  # pragma: no cover
                assert_never(start)
        subset = events[offset:]
        if count is not None:
            subset = subset[:count]
        return list(subset)
delete_stream async
delete_stream(stream_id)
Source code in src/waku/eventsourcing/store/in_memory.py
async def delete_stream(self, stream_id: StreamId, /) -> None:
    async with self._lock:
        key = str(stream_id)
        if key not in self._streams:
            raise StreamNotFoundError(stream_id)
        self._deleted_streams.add(key)
read_all async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]:
    async with self._lock:
        all_events: list[StoredEvent] = []
        for key, stream_events in self._streams.items():
            if key not in self._deleted_streams:
                all_events.extend(stream_events)
        all_events.sort(key=lambda e: e.global_position)

        type_set = frozenset(event_types) if event_types else None
        filtered = [
            e
            for e in all_events
            if e.global_position > after_position and (type_set is None or e.event_type in type_set)
        ]
        if count is not None:
            filtered = filtered[:count]
        return filtered
stream_exists async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/in_memory.py
async def stream_exists(self, stream_id: StreamId, /) -> bool:
    async with self._lock:
        key = str(stream_id)
        return key in self._streams and key not in self._deleted_streams
global_head_position async
global_head_position()
Source code in src/waku/eventsourcing/store/in_memory.py
async def global_head_position(self) -> int:
    async with self._lock:
        return self._global_position - 1
read_positions async
read_positions(*, after_position, up_to_position)
Source code in src/waku/eventsourcing/store/in_memory.py
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    async with self._lock:
        positions: list[int] = []
        for key, stream_events in self._streams.items():
            if key in self._deleted_streams:
                continue
            positions.extend(
                event.global_position
                for event in stream_events
                if after_position < event.global_position <= up_to_position
            )
        positions.sort()
        return positions
append_to_stream async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/in_memory.py
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int:
    async with self._lock:
        key = str(stream_id)
        if key in self._deleted_streams:
            raise StreamDeletedError(stream_id)
        stream = self._streams.get(key)
        current_version = len(stream) - 1 if stream is not None else -1

        if not events:
            check_expected_version(stream_id, expected_version, current_version, exists=stream is not None)
            return current_version

        dedup_version = self._check_idempotency(stream_id, events, current_version)
        if dedup_version is not None:
            return dedup_version

        check_expected_version(stream_id, expected_version, current_version, exists=stream is not None)

        if stream is None:
            stream = []
            self._streams[key] = stream
            is_new_stream = True
        else:
            is_new_stream = False

        stored_events: list[StoredEvent] = []
        base_global_position = self._global_position
        for envelope in events:
            position = len(stream)
            stored = StoredEvent(
                event_id=uuid.uuid4(),
                stream_id=stream_id,
                event_type=self._registry.get_name(
                    type(envelope.domain_event)  # pyrefly: ignore[bad-argument-type]
                ),
                position=position,
                global_position=self._global_position,
                timestamp=datetime.now(UTC),
                data=envelope.domain_event,
                metadata=enrich_metadata(envelope.metadata, self._enrichers),
                idempotency_key=envelope.idempotency_key,
                schema_version=self._registry.get_version(
                    type(envelope.domain_event)  # pyrefly: ignore[bad-argument-type]
                ),
            )
            stream.append(stored)
            stored_events.append(stored)
            self._global_position += 1

        stream_keys = self._idempotency_keys.setdefault(key, set())
        for envelope in events:
            stream_keys.add(envelope.idempotency_key)

        try:
            for projection in self._projections:
                await projection.project(stored_events)
        except Exception:
            self._rollback_append(key, stream, events, base_global_position, is_new_stream=is_new_stream)
            raise

        return len(stream) - 1

interfaces

IEventReader

Bases: ABC

read_stream abstractmethod async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]: ...
read_all abstractmethod async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]: ...
stream_exists abstractmethod async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def stream_exists(self, stream_id: StreamId, /) -> bool: ...
global_head_position abstractmethod async
global_head_position()

Return the highest global position in the store, or -1 if empty.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def global_head_position(self) -> int:
    """Return the highest global position in the store, or ``-1`` if empty."""
    ...
read_positions abstractmethod async
read_positions(*, after_position, up_to_position)

Return committed global positions in the range (after_position, up_to_position].

Positions are returned in ascending order.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    """Return committed global positions in the range ``(after_position, up_to_position]``.

    Positions are returned in ascending order.
    """
    ...
IEventWriter

Bases: ABC

append_to_stream abstractmethod async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int: ...
delete_stream abstractmethod async
delete_stream(stream_id)

Mark a stream as permanently deleted.

Deleted streams are excluded from read_all, read_positions, and stream_exists. Appending to a deleted stream raises StreamDeletedError. Events remain accessible via read_stream for audit purposes.

Raises StreamNotFoundError if the stream does not exist. No-op if already deleted.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def delete_stream(self, stream_id: StreamId, /) -> None:
    """Mark a stream as permanently deleted.

    Deleted streams are excluded from ``read_all``, ``read_positions``,
    and ``stream_exists``. Appending to a deleted stream raises
    ``StreamDeletedError``. Events remain accessible via ``read_stream``
    for audit purposes.

    Raises ``StreamNotFoundError`` if the stream does not exist.
    No-op if already deleted.
    """
    ...
IEventStore

Bases: IEventReader, IEventWriter, ABC

append_to_stream abstractmethod async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int: ...
delete_stream abstractmethod async
delete_stream(stream_id)

Mark a stream as permanently deleted.

Deleted streams are excluded from read_all, read_positions, and stream_exists. Appending to a deleted stream raises StreamDeletedError. Events remain accessible via read_stream for audit purposes.

Raises StreamNotFoundError if the stream does not exist. No-op if already deleted.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def delete_stream(self, stream_id: StreamId, /) -> None:
    """Mark a stream as permanently deleted.

    Deleted streams are excluded from ``read_all``, ``read_positions``,
    and ``stream_exists``. Appending to a deleted stream raises
    ``StreamDeletedError``. Events remain accessible via ``read_stream``
    for audit purposes.

    Raises ``StreamNotFoundError`` if the stream does not exist.
    No-op if already deleted.
    """
    ...
read_stream abstractmethod async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]: ...
read_all abstractmethod async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]: ...
stream_exists abstractmethod async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def stream_exists(self, stream_id: StreamId, /) -> bool: ...
global_head_position abstractmethod async
global_head_position()

Return the highest global position in the store, or -1 if empty.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def global_head_position(self) -> int:
    """Return the highest global position in the store, or ``-1`` if empty."""
    ...
read_positions abstractmethod async
read_positions(*, after_position, up_to_position)

Return committed global positions in the range (after_position, up_to_position].

Positions are returned in ascending order.

Source code in src/waku/eventsourcing/store/interfaces.py
@abc.abstractmethod
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    """Return committed global positions in the range ``(after_position, up_to_position]``.

    Positions are returned in ascending order.
    """
    ...

sqlalchemy

SqlAlchemyEventStore
SqlAlchemyEventStore(
    session,
    serializer,
    registry,
    tables,
    upcaster_chain,
    projections=(),
    enrichers=(),
)

Bases: IEventStore

Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
def __init__(
    self,
    session: AsyncSession,
    serializer: IEventSerializer,
    registry: EventTypeRegistry,
    tables: EventStoreTables,
    upcaster_chain: UpcasterChain,
    projections: Sequence[IProjection] = (),
    enrichers: Sequence[IMetadataEnricher] = (),
) -> None:
    self._session = session
    self._serializer = serializer
    self._registry = registry
    self._streams = tables.streams
    self._events = tables.events
    self._upcaster_chain = upcaster_chain
    self._projections = projections
    self._enrichers = enrichers
read_stream async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]:
    key = str(stream_id)

    if count == 0:
        await self._ensure_stream_exists(stream_id)
        return []

    if start is StreamPosition.END:
        return await self._read_stream_end(stream_id, key)

    match start:
        case StreamPosition.START:
            offset = 0
        case int() as offset:
            pass
        case _:  # pragma: no cover
            assert_never(start)

    query = (
        select(self._events)
        .where(self._events.c.stream_id == key)
        .where(self._events.c.position >= offset)
        .order_by(self._events.c.position)
    )
    if count is not None:
        query = query.limit(count)

    result = await self._session.execute(query)
    rows = result.fetchall()

    if not rows:
        await self._ensure_stream_exists(stream_id)

    return [
        row_to_stored_event(
            row, registry=self._registry, upcaster_chain=self._upcaster_chain, serializer=self._serializer
        )
        for row in rows
    ]
read_all async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]:
    query = (
        select(self._events)
        .join(self._streams, self._events.c.stream_id == self._streams.c.stream_id)
        .where(self._events.c.global_position > after_position)
        .where(self._not_deleted)
        .order_by(self._events.c.global_position)
    )
    if event_types:
        query = query.where(self._events.c.event_type.in_(event_types))
    if count is not None:
        query = query.limit(count)

    result = await self._session.execute(query)
    rows = result.fetchall()
    return [
        row_to_stored_event(
            row, registry=self._registry, upcaster_chain=self._upcaster_chain, serializer=self._serializer
        )
        for row in rows
    ]
stream_exists async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def stream_exists(self, stream_id: StreamId, /) -> bool:
    key = str(stream_id)
    query = select(self._streams.c.stream_id).where(
        self._streams.c.stream_id == key,
        self._not_deleted,
    )
    result = await self._session.execute(query)
    return result.scalar_one_or_none() is not None
global_head_position async
global_head_position()
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def global_head_position(self) -> int:
    query = select(sa_func.coalesce(sa_func.max(self._events.c.global_position), -1))
    result = await self._session.execute(query)
    return int(result.scalar_one())
read_positions async
read_positions(*, after_position, up_to_position)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    query = (
        select(self._events.c.global_position)
        .join(self._streams, self._events.c.stream_id == self._streams.c.stream_id)
        .where(self._events.c.global_position > after_position)
        .where(self._events.c.global_position <= up_to_position)
        .where(self._not_deleted)
        .order_by(self._events.c.global_position)
    )
    result = await self._session.execute(query)
    return [row[0] for row in result.fetchall()]
delete_stream async
delete_stream(stream_id)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def delete_stream(self, stream_id: StreamId, /) -> None:
    stream_row = await self._get_stream(stream_id)
    if stream_row is None:
        raise StreamNotFoundError(stream_id)
    if stream_row.deleted_at is not None:
        return
    await self._session.execute(
        self._streams.update().where(self._streams.c.stream_id == str(stream_id)).values(deleted_at=sa_func.now())
    )
append_to_stream async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int:
    if not events:
        return await self._resolve_current_version(stream_id, expected_version)

    dedup_version = await self._check_idempotency(stream_id, events)
    if dedup_version is not None:
        return dedup_version

    current_version = await self._resolve_current_version(stream_id, expected_version)
    new_version = current_version + len(events)

    try:
        async with self._session.begin_nested():
            await self._ensure_stream_row(stream_id)
            await self._update_stream_version(stream_id, current_version, new_version)
            stored_events = await self._insert_events(stream_id, events, start_position=current_version + 1)
    except IntegrityError as exc:
        if IDEMPOTENCY_KEY_CONSTRAINT in str(exc):
            logger.warning(
                'Idempotency race condition on stream %s: duplicate key caught by DB constraint',
                stream_id,
            )
            dedup_version = await self._check_idempotency(stream_id, events)
            if dedup_version is not None:
                return dedup_version
            logger.exception(  # pragma: no cover
                'Idempotency re-check returned no match after IntegrityError on stream %s — '
                'this should not happen under normal conditions',
                stream_id,
            )
            raise DuplicateIdempotencyKeyError(
                stream_id,
                reason='conflict with existing keys',
            ) from exc  # pragma: no cover
        raise  # pragma: no cover

    for projection in self._projections:
        await projection.project(stored_events)

    return new_version
EventStoreTables dataclass
EventStoreTables(streams, events)
streams instance-attribute
streams
events instance-attribute
events
make_sqlalchemy_event_store
make_sqlalchemy_event_store(tables)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
def make_sqlalchemy_event_store(tables: EventStoreTables) -> SqlAlchemyEventStoreFactory:
    def factory(
        session: AsyncSession,
        serializer: IEventSerializer,
        registry: EventTypeRegistry,
        upcaster_chain: UpcasterChain,
        projections: Sequence[IProjection] = (),
        enrichers: Sequence[IMetadataEnricher] = (),
    ) -> SqlAlchemyEventStore:
        return SqlAlchemyEventStore(session, serializer, registry, tables, upcaster_chain, projections, enrichers)

    return factory
bind_event_store_tables
bind_event_store_tables(metadata)
Source code in src/waku/eventsourcing/store/sqlalchemy/tables.py
def bind_event_store_tables(metadata: MetaData) -> EventStoreTables:
    streams = (
        metadata.tables[es_streams_table.name]
        if es_streams_table.name in metadata.tables
        else es_streams_table.to_metadata(metadata)
    )
    events = (
        metadata.tables[es_events_table.name]
        if es_events_table.name in metadata.tables
        else es_events_table.to_metadata(metadata)
    )
    return EventStoreTables(streams=streams, events=events)
store
logger module-attribute
logger = getLogger(__name__)
SqlAlchemyEventStoreFactory

Bases: Protocol

SqlAlchemyEventStore
SqlAlchemyEventStore(
    session,
    serializer,
    registry,
    tables,
    upcaster_chain,
    projections=(),
    enrichers=(),
)

Bases: IEventStore

Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
def __init__(
    self,
    session: AsyncSession,
    serializer: IEventSerializer,
    registry: EventTypeRegistry,
    tables: EventStoreTables,
    upcaster_chain: UpcasterChain,
    projections: Sequence[IProjection] = (),
    enrichers: Sequence[IMetadataEnricher] = (),
) -> None:
    self._session = session
    self._serializer = serializer
    self._registry = registry
    self._streams = tables.streams
    self._events = tables.events
    self._upcaster_chain = upcaster_chain
    self._projections = projections
    self._enrichers = enrichers
read_stream async
read_stream(stream_id, /, *, start=START, count=None)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_stream(
    self,
    stream_id: StreamId,
    /,
    *,
    start: int | StreamPosition = StreamPosition.START,
    count: int | None = None,
) -> list[StoredEvent]:
    key = str(stream_id)

    if count == 0:
        await self._ensure_stream_exists(stream_id)
        return []

    if start is StreamPosition.END:
        return await self._read_stream_end(stream_id, key)

    match start:
        case StreamPosition.START:
            offset = 0
        case int() as offset:
            pass
        case _:  # pragma: no cover
            assert_never(start)

    query = (
        select(self._events)
        .where(self._events.c.stream_id == key)
        .where(self._events.c.position >= offset)
        .order_by(self._events.c.position)
    )
    if count is not None:
        query = query.limit(count)

    result = await self._session.execute(query)
    rows = result.fetchall()

    if not rows:
        await self._ensure_stream_exists(stream_id)

    return [
        row_to_stored_event(
            row, registry=self._registry, upcaster_chain=self._upcaster_chain, serializer=self._serializer
        )
        for row in rows
    ]
read_all async
read_all(
    *, after_position=-1, count=None, event_types=None
)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_all(
    self,
    *,
    after_position: int = -1,
    count: int | None = None,
    event_types: Sequence[str] | None = None,
) -> list[StoredEvent]:
    query = (
        select(self._events)
        .join(self._streams, self._events.c.stream_id == self._streams.c.stream_id)
        .where(self._events.c.global_position > after_position)
        .where(self._not_deleted)
        .order_by(self._events.c.global_position)
    )
    if event_types:
        query = query.where(self._events.c.event_type.in_(event_types))
    if count is not None:
        query = query.limit(count)

    result = await self._session.execute(query)
    rows = result.fetchall()
    return [
        row_to_stored_event(
            row, registry=self._registry, upcaster_chain=self._upcaster_chain, serializer=self._serializer
        )
        for row in rows
    ]
stream_exists async
stream_exists(stream_id)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def stream_exists(self, stream_id: StreamId, /) -> bool:
    key = str(stream_id)
    query = select(self._streams.c.stream_id).where(
        self._streams.c.stream_id == key,
        self._not_deleted,
    )
    result = await self._session.execute(query)
    return result.scalar_one_or_none() is not None
global_head_position async
global_head_position()
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def global_head_position(self) -> int:
    query = select(sa_func.coalesce(sa_func.max(self._events.c.global_position), -1))
    result = await self._session.execute(query)
    return int(result.scalar_one())
read_positions async
read_positions(*, after_position, up_to_position)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def read_positions(
    self,
    *,
    after_position: int,
    up_to_position: int,
) -> list[int]:
    query = (
        select(self._events.c.global_position)
        .join(self._streams, self._events.c.stream_id == self._streams.c.stream_id)
        .where(self._events.c.global_position > after_position)
        .where(self._events.c.global_position <= up_to_position)
        .where(self._not_deleted)
        .order_by(self._events.c.global_position)
    )
    result = await self._session.execute(query)
    return [row[0] for row in result.fetchall()]
delete_stream async
delete_stream(stream_id)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def delete_stream(self, stream_id: StreamId, /) -> None:
    stream_row = await self._get_stream(stream_id)
    if stream_row is None:
        raise StreamNotFoundError(stream_id)
    if stream_row.deleted_at is not None:
        return
    await self._session.execute(
        self._streams.update().where(self._streams.c.stream_id == str(stream_id)).values(deleted_at=sa_func.now())
    )
append_to_stream async
append_to_stream(stream_id, /, events, *, expected_version)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
async def append_to_stream(
    self,
    stream_id: StreamId,
    /,
    events: Sequence[EventEnvelope],
    *,
    expected_version: ExpectedVersion,
) -> int:
    if not events:
        return await self._resolve_current_version(stream_id, expected_version)

    dedup_version = await self._check_idempotency(stream_id, events)
    if dedup_version is not None:
        return dedup_version

    current_version = await self._resolve_current_version(stream_id, expected_version)
    new_version = current_version + len(events)

    try:
        async with self._session.begin_nested():
            await self._ensure_stream_row(stream_id)
            await self._update_stream_version(stream_id, current_version, new_version)
            stored_events = await self._insert_events(stream_id, events, start_position=current_version + 1)
    except IntegrityError as exc:
        if IDEMPOTENCY_KEY_CONSTRAINT in str(exc):
            logger.warning(
                'Idempotency race condition on stream %s: duplicate key caught by DB constraint',
                stream_id,
            )
            dedup_version = await self._check_idempotency(stream_id, events)
            if dedup_version is not None:
                return dedup_version
            logger.exception(  # pragma: no cover
                'Idempotency re-check returned no match after IntegrityError on stream %s — '
                'this should not happen under normal conditions',
                stream_id,
            )
            raise DuplicateIdempotencyKeyError(
                stream_id,
                reason='conflict with existing keys',
            ) from exc  # pragma: no cover
        raise  # pragma: no cover

    for projection in self._projections:
        await projection.project(stored_events)

    return new_version
make_sqlalchemy_event_store
make_sqlalchemy_event_store(tables)
Source code in src/waku/eventsourcing/store/sqlalchemy/store.py
def make_sqlalchemy_event_store(tables: EventStoreTables) -> SqlAlchemyEventStoreFactory:
    def factory(
        session: AsyncSession,
        serializer: IEventSerializer,
        registry: EventTypeRegistry,
        upcaster_chain: UpcasterChain,
        projections: Sequence[IProjection] = (),
        enrichers: Sequence[IMetadataEnricher] = (),
    ) -> SqlAlchemyEventStore:
        return SqlAlchemyEventStore(session, serializer, registry, tables, upcaster_chain, projections, enrichers)

    return factory
tables
IDEMPOTENCY_KEY_CONSTRAINT module-attribute
IDEMPOTENCY_KEY_CONSTRAINT = 'uq_es_events_idempotency_key'
es_streams_table module-attribute
es_streams_table = Table(
    'es_streams',
    _internal_metadata,
    Column('stream_id', Text, primary_key=True),
    Column('stream_type', Text, nullable=False),
    Column(
        'version',
        Integer,
        nullable=False,
        server_default='0',
    ),
    Column(
        'created_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
    ),
    Column(
        'updated_at',
        TIMESTAMP(timezone=True),
        server_default=now(),
        onupdate=now(),
    ),
    Column(
        'deleted_at',
        TIMESTAMP(timezone=True),
        nullable=True,
    ),
)
es_events_table module-attribute
es_events_table = Table(
    'es_events',
    _internal_metadata,
    Column(
        'event_id', UUID(as_uuid=True), primary_key=True
    ),
    Column('stream_id', Text, nullable=False),
    Column('event_type', Text, nullable=False),
    Column('position', Integer, nullable=False),
    Column(
        'global_position',
        BigInteger,
        Identity(
            always=True, start=0, minvalue=0, cycle=False
        ),
        nullable=False,
    ),
    Column('data', JSONB, nullable=False),
    Column('metadata', JSONB, nullable=False),
    Column(
        'timestamp',
        TIMESTAMP(timezone=True),
        nullable=False,
    ),
    Column(
        'schema_version',
        Integer,
        nullable=False,
        server_default='1',
    ),
    Column('idempotency_key', Text, nullable=False),
    UniqueConstraint(
        'stream_id',
        'position',
        name='uq_es_events_stream_id_position',
    ),
    UniqueConstraint(
        'stream_id',
        'idempotency_key',
        name=IDEMPOTENCY_KEY_CONSTRAINT,
    ),
    Index(
        'ix_es_events_global_position', 'global_position'
    ),
    Index('ix_es_events_event_type', 'event_type'),
)
EventStoreTables dataclass
EventStoreTables(streams, events)
streams instance-attribute
streams
events instance-attribute
events
bind_event_store_tables
bind_event_store_tables(metadata)
Source code in src/waku/eventsourcing/store/sqlalchemy/tables.py
def bind_event_store_tables(metadata: MetaData) -> EventStoreTables:
    streams = (
        metadata.tables[es_streams_table.name]
        if es_streams_table.name in metadata.tables
        else es_streams_table.to_metadata(metadata)
    )
    events = (
        metadata.tables[es_events_table.name]
        if es_events_table.name in metadata.tables
        else es_events_table.to_metadata(metadata)
    )
    return EventStoreTables(streams=streams, events=events)

testing

DeciderSpec

DeciderSpec(decider)

Bases: Generic[StateT, CommandT, EventT]

Given/When/Then DSL for testing IDecider implementations.

Example::

DeciderSpec.for_(decider).given([event]).when(command).then([expected])
Source code in src/waku/eventsourcing/testing.py
def __init__(self, decider: IDecider[StateT, CommandT, EventT]) -> None:
    self._decider = decider
    self._events: list[EventT] = []
for_ classmethod
for_(decider)
Source code in src/waku/eventsourcing/testing.py
@classmethod
def for_(cls, decider: IDecider[StateT, CommandT, EventT]) -> DeciderSpec[StateT, CommandT, EventT]:
    return cls(decider)
given
given(events)
Source code in src/waku/eventsourcing/testing.py
def given(self, events: Sequence[EventT]) -> DeciderSpec[StateT, CommandT, EventT]:
    self._events = list(events)
    return self
when
when(command)
Source code in src/waku/eventsourcing/testing.py
def when(self, command: CommandT) -> _DeciderWhenResult[StateT, CommandT, EventT]:
    state = self._decider.initial_state()
    for event in self._events:
        state = self._decider.evolve(state, event)
    return _DeciderWhenResult(self._decider, state, command)
then_state
then_state(predicate)
Source code in src/waku/eventsourcing/testing.py
def then_state(self, predicate: Callable[[StateT], bool]) -> None:
    state = self._decider.initial_state()
    for event in self._events:
        state = self._decider.evolve(state, event)
    assert predicate(state), f'State predicate failed for state: {state}'  # noqa: S101

AggregateSpec

AggregateSpec(aggregate_type)

Bases: Generic[AggregateT]

Given/When/Then DSL for testing EventSourcedAggregate implementations.

Example::

AggregateSpec.for_(OrderAggregate).given([OrderCreated(...)]).when(lambda agg: agg.cancel()).then([
    OrderCancelled(...)
])
Source code in src/waku/eventsourcing/testing.py
def __init__(self, aggregate_type: type[AggregateT]) -> None:
    self._aggregate_type = aggregate_type
    self._events: list[IEvent] = []
for_ classmethod
for_(aggregate_type)
Source code in src/waku/eventsourcing/testing.py
@classmethod
def for_(cls, aggregate_type: type[AggregateT]) -> AggregateSpec[AggregateT]:
    return cls(aggregate_type)
given
given(events)
Source code in src/waku/eventsourcing/testing.py
def given(self, events: Sequence[IEvent]) -> AggregateSpec[AggregateT]:
    self._events = list(events)
    return self
when
when(action)
Source code in src/waku/eventsourcing/testing.py
def when(self, action: Callable[[AggregateT], None]) -> _AggregateWhenResult[AggregateT]:
    aggregate = self._hydrate()
    return _AggregateWhenResult(aggregate, action)
then_state
then_state(predicate)
Source code in src/waku/eventsourcing/testing.py
def then_state(self, predicate: Callable[[AggregateT], Any]) -> None:
    aggregate = self._hydrate()
    assert predicate(aggregate), f'State predicate failed for aggregate: {aggregate}'  # noqa: S101

wait_for_projection async

wait_for_projection(
    checkpoint_store,
    event_reader,
    projection_name,
    *,
    deadline=5.0,
    poll_interval=0.1,
)

Poll until a projection's checkpoint reaches the global head position.

Returns immediately when the event store is empty (head position == -1).

PARAMETER DESCRIPTION
checkpoint_store

Store to read projection checkpoints from.

TYPE: ICheckpointStore

event_reader

Event reader to determine the global head position.

TYPE: IEventReader

projection_name

Name of the projection to wait for.

TYPE: str

deadline

Maximum seconds to wait before raising TimeoutError.

TYPE: float DEFAULT: 5.0

poll_interval

Seconds between checkpoint polls.

TYPE: float DEFAULT: 0.1

RAISES DESCRIPTION
TimeoutError

If the projection does not catch up within deadline seconds.

Source code in src/waku/eventsourcing/testing.py
async def wait_for_projection(
    checkpoint_store: ICheckpointStore,
    event_reader: IEventReader,
    projection_name: str,
    *,
    deadline: float = 5.0,
    poll_interval: float = 0.1,
) -> None:
    """Poll until a projection's checkpoint reaches the global head position.

    Returns immediately when the event store is empty (head position == -1).

    Args:
        checkpoint_store: Store to read projection checkpoints from.
        event_reader: Event reader to determine the global head position.
        projection_name: Name of the projection to wait for.
        deadline: Maximum seconds to wait before raising ``TimeoutError``.
        poll_interval: Seconds between checkpoint polls.

    Raises:
        TimeoutError: If the projection does not catch up within *deadline* seconds.
    """
    head = await event_reader.global_head_position()
    if head == -1:
        return

    try:
        with anyio.fail_after(deadline):
            while True:
                checkpoint = await checkpoint_store.load(projection_name)
                if checkpoint is not None and checkpoint.position >= head:
                    return
                await anyio.sleep(poll_interval)
    except TimeoutError:
        msg = f'Projection {projection_name!r} did not catch up within {deadline}s'
        raise TimeoutError(msg) from None

wait_for_all_projections async

wait_for_all_projections(
    checkpoint_store,
    event_reader,
    projection_registry,
    *,
    deadline=10.0,
    poll_interval=0.1,
)

Poll until every registered catch-up projection has caught up.

Delegates to :func:wait_for_projection for each binding in the registry.

PARAMETER DESCRIPTION
checkpoint_store

Store to read projection checkpoints from.

TYPE: ICheckpointStore

event_reader

Event reader to determine the global head position.

TYPE: IEventReader

projection_registry

Registry of catch-up projection bindings.

TYPE: CatchUpProjectionRegistry

deadline

Maximum seconds to wait per projection.

TYPE: float DEFAULT: 10.0

poll_interval

Seconds between checkpoint polls.

TYPE: float DEFAULT: 0.1

RAISES DESCRIPTION
TimeoutError

If any projection does not catch up within deadline seconds.

Source code in src/waku/eventsourcing/testing.py
async def wait_for_all_projections(
    checkpoint_store: ICheckpointStore,
    event_reader: IEventReader,
    projection_registry: CatchUpProjectionRegistry,
    *,
    deadline: float = 10.0,
    poll_interval: float = 0.1,
) -> None:
    """Poll until every registered catch-up projection has caught up.

    Delegates to :func:`wait_for_projection` for each binding in the registry.

    Args:
        checkpoint_store: Store to read projection checkpoints from.
        event_reader: Event reader to determine the global head position.
        projection_registry: Registry of catch-up projection bindings.
        deadline: Maximum seconds to wait *per projection*.
        poll_interval: Seconds between checkpoint polls.

    Raises:
        TimeoutError: If any projection does not catch up within *deadline* seconds.
    """
    for binding in projection_registry:
        await wait_for_projection(
            checkpoint_store=checkpoint_store,
            event_reader=event_reader,
            projection_name=binding.projection.projection_name,
            deadline=deadline,
            poll_interval=poll_interval,
        )

upcasting

UpcasterChain

UpcasterChain(upcasters_by_type)
Source code in src/waku/eventsourcing/upcasting/chain.py
def __init__(self, upcasters_by_type: Mapping[str, Sequence[IEventUpcaster]]) -> None:
    chains: dict[str, tuple[IEventUpcaster, ...]] = {}
    for event_type, upcasters in upcasters_by_type.items():
        sorted_upcasters = sorted(upcasters, key=lambda u: u.from_version)
        seen: set[int] = set()
        for u in sorted_upcasters:
            if u.from_version < 1:
                msg = f'Invalid from_version {u.from_version} for event type {event_type!r}: must be >= 1'
                raise UpcasterChainError(msg)
            if u.from_version in seen:
                msg = f'Duplicate upcaster for event type {event_type!r} at from_version {u.from_version}'
                raise UpcasterChainError(msg)
            seen.add(u.from_version)
        chains[event_type] = tuple(sorted_upcasters)
    self._chains = chains
upcast
upcast(event_type, data, schema_version)
Source code in src/waku/eventsourcing/upcasting/chain.py
def upcast(self, event_type: str, data: dict[str, Any], schema_version: int) -> dict[str, Any]:
    upcasters = self._chains.get(event_type)
    if not upcasters:
        return data
    if schema_version > upcasters[-1].from_version:
        return data
    for u in upcasters:
        if u.from_version >= schema_version:
            data = u.upcast(data)
    return data

FnUpcaster

FnUpcaster(from_version, fn)

Bases: IEventUpcaster

Source code in src/waku/eventsourcing/upcasting/fn.py
def __init__(self, from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> None:
    self.from_version = from_version
    self._fn = fn
from_version instance-attribute
from_version = from_version
upcast
upcast(data)
Source code in src/waku/eventsourcing/upcasting/fn.py
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]:
    return self._fn(data)

IEventUpcaster

Bases: ABC

from_version instance-attribute
from_version
upcast abstractmethod
upcast(data)
Source code in src/waku/eventsourcing/upcasting/interfaces.py
@abc.abstractmethod
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]: ...

add_field

add_field(from_version, *, field, default)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def add_field(from_version: int, *, field: str, default: Any) -> IEventUpcaster:
    def _add(data: dict[str, Any]) -> dict[str, Any]:
        result = dict(data)
        if field not in result:
            result[field] = copy.copy(default)
        return result

    return FnUpcaster(from_version, fn=_add)

noop

noop(from_version)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def noop(from_version: int) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=dict)

remove_field

remove_field(from_version, *, field)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def remove_field(from_version: int, *, field: str) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=lambda data: {k: v for k, v in data.items() if k != field})

rename_field

rename_field(from_version, *, old, new)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def rename_field(from_version: int, *, old: str, new: str) -> IEventUpcaster:
    def _rename(data: dict[str, Any]) -> dict[str, Any]:
        result = {k: v for k, v in data.items() if k != old}
        if old in data:
            result[new] = data[old]
        return result

    return FnUpcaster(from_version, fn=_rename)

upcast

upcast(from_version, fn)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def upcast(from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=fn)

chain

UpcasterChain
UpcasterChain(upcasters_by_type)
Source code in src/waku/eventsourcing/upcasting/chain.py
def __init__(self, upcasters_by_type: Mapping[str, Sequence[IEventUpcaster]]) -> None:
    chains: dict[str, tuple[IEventUpcaster, ...]] = {}
    for event_type, upcasters in upcasters_by_type.items():
        sorted_upcasters = sorted(upcasters, key=lambda u: u.from_version)
        seen: set[int] = set()
        for u in sorted_upcasters:
            if u.from_version < 1:
                msg = f'Invalid from_version {u.from_version} for event type {event_type!r}: must be >= 1'
                raise UpcasterChainError(msg)
            if u.from_version in seen:
                msg = f'Duplicate upcaster for event type {event_type!r} at from_version {u.from_version}'
                raise UpcasterChainError(msg)
            seen.add(u.from_version)
        chains[event_type] = tuple(sorted_upcasters)
    self._chains = chains
upcast
upcast(event_type, data, schema_version)
Source code in src/waku/eventsourcing/upcasting/chain.py
def upcast(self, event_type: str, data: dict[str, Any], schema_version: int) -> dict[str, Any]:
    upcasters = self._chains.get(event_type)
    if not upcasters:
        return data
    if schema_version > upcasters[-1].from_version:
        return data
    for u in upcasters:
        if u.from_version >= schema_version:
            data = u.upcast(data)
    return data

fn

FnUpcaster
FnUpcaster(from_version, fn)

Bases: IEventUpcaster

Source code in src/waku/eventsourcing/upcasting/fn.py
def __init__(self, from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> None:
    self.from_version = from_version
    self._fn = fn
from_version instance-attribute
from_version = from_version
upcast
upcast(data)
Source code in src/waku/eventsourcing/upcasting/fn.py
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]:
    return self._fn(data)

helpers

noop
noop(from_version)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def noop(from_version: int) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=dict)
rename_field
rename_field(from_version, *, old, new)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def rename_field(from_version: int, *, old: str, new: str) -> IEventUpcaster:
    def _rename(data: dict[str, Any]) -> dict[str, Any]:
        result = {k: v for k, v in data.items() if k != old}
        if old in data:
            result[new] = data[old]
        return result

    return FnUpcaster(from_version, fn=_rename)
add_field
add_field(from_version, *, field, default)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def add_field(from_version: int, *, field: str, default: Any) -> IEventUpcaster:
    def _add(data: dict[str, Any]) -> dict[str, Any]:
        result = dict(data)
        if field not in result:
            result[field] = copy.copy(default)
        return result

    return FnUpcaster(from_version, fn=_add)
remove_field
remove_field(from_version, *, field)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def remove_field(from_version: int, *, field: str) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=lambda data: {k: v for k, v in data.items() if k != field})
upcast
upcast(from_version, fn)
Source code in src/waku/eventsourcing/upcasting/helpers.py
def upcast(from_version: int, fn: Callable[[dict[str, Any]], dict[str, Any]]) -> IEventUpcaster:
    return FnUpcaster(from_version, fn=fn)

interfaces

IEventUpcaster

Bases: ABC

from_version instance-attribute
from_version
upcast abstractmethod
upcast(data)
Source code in src/waku/eventsourcing/upcasting/interfaces.py
@abc.abstractmethod
def upcast(self, data: dict[str, Any], /) -> dict[str, Any]: ...

exceptions

WakuError

Bases: Exception

extensions

ApplicationExtension module-attribute

ModuleExtension module-attribute

DEFAULT_EXTENSIONS module-attribute

DEFAULT_EXTENSIONS = (
    ValidationExtension(
        [DependenciesAccessibleRule()], strict=True
    ),
)

AfterApplicationInit

Bases: Protocol

Extension for application post-initialization actions.

after_app_init async

after_app_init(app)
Source code in src/waku/extensions/protocols.py
async def after_app_init(self, app: WakuApplication) -> None: ...

OnApplicationInit

Bases: Protocol

Extension for application pre-initialization actions.

on_app_init async

on_app_init(app)
Source code in src/waku/extensions/protocols.py
async def on_app_init(self, app: WakuApplication) -> None: ...

OnApplicationShutdown

Bases: Protocol

Extension for application shutdown actions.

on_app_shutdown async

on_app_shutdown(app)
Source code in src/waku/extensions/protocols.py
async def on_app_shutdown(self, app: WakuApplication) -> None: ...

OnModuleConfigure

Bases: Protocol

Extension for module configuration.

on_module_configure

on_module_configure(metadata)

Perform actions before module metadata transformed to module.

Source code in src/waku/extensions/protocols.py
def on_module_configure(self, metadata: ModuleMetadata) -> None:
    """Perform actions before module metadata transformed to module."""
    ...

OnModuleDestroy

Bases: Protocol

Extension for module destroying.

on_module_destroy async

on_module_destroy(module)
Source code in src/waku/extensions/protocols.py
async def on_module_destroy(self, module: Module) -> None: ...

OnModuleInit

Bases: Protocol

Extension for module initialization.

on_module_init async

on_module_init(module)
Source code in src/waku/extensions/protocols.py
async def on_module_init(self, module: Module) -> None: ...

OnModuleRegistration

Bases: Protocol

Extension for contributing providers to module metadata during registration.

This hook runs after all module metadata is collected but before Module objects are created. Use this for cross-module aggregation that produces providers which should belong to the owning module.

Can be declared at both application level (passed to WakuFactory) and module level (in module's extensions list).

Execution order
  1. Application-level extensions (assigned to root module)
  2. Module-level extensions (in topological order)
Key differences from OnModuleConfigure
  • Runs after ALL modules are collected (cross-module visibility)
  • Receives registry with access to all modules' metadata
  • Can add providers to owning module

on_module_registration

on_module_registration(registry, owning_module, context)

Contribute providers to module metadata before Module objects are created.

PARAMETER DESCRIPTION
registry

Registry of all collected module metadata. Use find_extensions() to discover extensions across modules, add_provider() to contribute.

TYPE: ModuleMetadataRegistry

owning_module

The module type that owns this extension. Providers added via registry.add_provider() should target this module.

TYPE: ModuleType

context

Application context passed to WakuFactory (read-only).

TYPE: Mapping[Any, Any] | None

Source code in src/waku/extensions/protocols.py
def on_module_registration(
    self,
    registry: ModuleMetadataRegistry,
    owning_module: ModuleType,
    context: Mapping[Any, Any] | None,
) -> None:
    """Contribute providers to module metadata before Module objects are created.

    Args:
        registry: Registry of all collected module metadata. Use find_extensions()
                  to discover extensions across modules, add_provider() to contribute.
        owning_module: The module type that owns this extension. Providers
                      added via registry.add_provider() should target this module.
        context: Application context passed to WakuFactory (read-only).
    """
    ...

ExtensionRegistry

ExtensionRegistry()

Registry for extensions.

This registry maintains references to all extensions in the application, allowing for centralized management and discovery.

Source code in src/waku/extensions/registry.py
def __init__(self) -> None:
    self._app_extensions: dict[type[ApplicationExtension], list[ApplicationExtension]] = defaultdict(list)
    self._module_extensions: dict[ModuleType, list[ModuleExtension]] = defaultdict(list)

register_application_extension

register_application_extension(extension)

Register an application extension with optional priority and tags.

Source code in src/waku/extensions/registry.py
def register_application_extension(self, extension: ApplicationExtension) -> Self:
    """Register an application extension with optional priority and tags."""
    ext_type = type(extension)
    extension_bases = [
        base
        for base in inspect.getmro(ext_type)
        if (isinstance(base, ApplicationExtension) and base != ext_type)  # type: ignore[unreachable]
    ]
    for base in extension_bases:
        self._app_extensions[cast('type[ApplicationExtension]', base)].append(extension)
    return self

register_module_extension

register_module_extension(module_type, extension)
Source code in src/waku/extensions/registry.py
def register_module_extension(self, module_type: ModuleType, extension: ModuleExtension) -> Self:
    self._module_extensions[module_type].append(extension)
    return self

get_application_extensions

get_application_extensions(extension_type)
Source code in src/waku/extensions/registry.py
def get_application_extensions(self, extension_type: type[_AppExtT]) -> list[_AppExtT]:
    return cast('list[_AppExtT]', self._app_extensions.get(cast('type[ApplicationExtension]', extension_type), []))

get_module_extensions

get_module_extensions(module_type, extension_type)
Source code in src/waku/extensions/registry.py
def get_module_extensions(self, module_type: ModuleType, extension_type: type[_ModExtT]) -> list[_ModExtT]:
    extensions = cast('list[_ModExtT]', self._module_extensions.get(module_type, []))
    return [ext for ext in extensions if isinstance(ext, extension_type)]

protocols

Extension protocols for application and module lifecycle hooks.

ApplicationExtension module-attribute

ModuleExtension module-attribute

OnApplicationInit

Bases: Protocol

Extension for application pre-initialization actions.

on_app_init async
on_app_init(app)
Source code in src/waku/extensions/protocols.py
async def on_app_init(self, app: WakuApplication) -> None: ...

AfterApplicationInit

Bases: Protocol

Extension for application post-initialization actions.

after_app_init async
after_app_init(app)
Source code in src/waku/extensions/protocols.py
async def after_app_init(self, app: WakuApplication) -> None: ...

OnApplicationShutdown

Bases: Protocol

Extension for application shutdown actions.

on_app_shutdown async
on_app_shutdown(app)
Source code in src/waku/extensions/protocols.py
async def on_app_shutdown(self, app: WakuApplication) -> None: ...

OnModuleRegistration

Bases: Protocol

Extension for contributing providers to module metadata during registration.

This hook runs after all module metadata is collected but before Module objects are created. Use this for cross-module aggregation that produces providers which should belong to the owning module.

Can be declared at both application level (passed to WakuFactory) and module level (in module's extensions list).

Execution order
  1. Application-level extensions (assigned to root module)
  2. Module-level extensions (in topological order)
Key differences from OnModuleConfigure
  • Runs after ALL modules are collected (cross-module visibility)
  • Receives registry with access to all modules' metadata
  • Can add providers to owning module
on_module_registration
on_module_registration(registry, owning_module, context)

Contribute providers to module metadata before Module objects are created.

PARAMETER DESCRIPTION
registry

Registry of all collected module metadata. Use find_extensions() to discover extensions across modules, add_provider() to contribute.

TYPE: ModuleMetadataRegistry

owning_module

The module type that owns this extension. Providers added via registry.add_provider() should target this module.

TYPE: ModuleType

context

Application context passed to WakuFactory (read-only).

TYPE: Mapping[Any, Any] | None

Source code in src/waku/extensions/protocols.py
def on_module_registration(
    self,
    registry: ModuleMetadataRegistry,
    owning_module: ModuleType,
    context: Mapping[Any, Any] | None,
) -> None:
    """Contribute providers to module metadata before Module objects are created.

    Args:
        registry: Registry of all collected module metadata. Use find_extensions()
                  to discover extensions across modules, add_provider() to contribute.
        owning_module: The module type that owns this extension. Providers
                      added via registry.add_provider() should target this module.
        context: Application context passed to WakuFactory (read-only).
    """
    ...

OnModuleConfigure

Bases: Protocol

Extension for module configuration.

on_module_configure
on_module_configure(metadata)

Perform actions before module metadata transformed to module.

Source code in src/waku/extensions/protocols.py
def on_module_configure(self, metadata: ModuleMetadata) -> None:
    """Perform actions before module metadata transformed to module."""
    ...

OnModuleInit

Bases: Protocol

Extension for module initialization.

on_module_init async
on_module_init(module)
Source code in src/waku/extensions/protocols.py
async def on_module_init(self, module: Module) -> None: ...

OnModuleDestroy

Bases: Protocol

Extension for module destroying.

on_module_destroy async
on_module_destroy(module)
Source code in src/waku/extensions/protocols.py
async def on_module_destroy(self, module: Module) -> None: ...

registry

Extension registry for centralized management of extensions.

ExtensionRegistry

ExtensionRegistry()

Registry for extensions.

This registry maintains references to all extensions in the application, allowing for centralized management and discovery.

Source code in src/waku/extensions/registry.py
def __init__(self) -> None:
    self._app_extensions: dict[type[ApplicationExtension], list[ApplicationExtension]] = defaultdict(list)
    self._module_extensions: dict[ModuleType, list[ModuleExtension]] = defaultdict(list)
register_application_extension
register_application_extension(extension)

Register an application extension with optional priority and tags.

Source code in src/waku/extensions/registry.py
def register_application_extension(self, extension: ApplicationExtension) -> Self:
    """Register an application extension with optional priority and tags."""
    ext_type = type(extension)
    extension_bases = [
        base
        for base in inspect.getmro(ext_type)
        if (isinstance(base, ApplicationExtension) and base != ext_type)  # type: ignore[unreachable]
    ]
    for base in extension_bases:
        self._app_extensions[cast('type[ApplicationExtension]', base)].append(extension)
    return self
register_module_extension
register_module_extension(module_type, extension)
Source code in src/waku/extensions/registry.py
def register_module_extension(self, module_type: ModuleType, extension: ModuleExtension) -> Self:
    self._module_extensions[module_type].append(extension)
    return self
get_application_extensions
get_application_extensions(extension_type)
Source code in src/waku/extensions/registry.py
def get_application_extensions(self, extension_type: type[_AppExtT]) -> list[_AppExtT]:
    return cast('list[_AppExtT]', self._app_extensions.get(cast('type[ApplicationExtension]', extension_type), []))
get_module_extensions
get_module_extensions(module_type, extension_type)
Source code in src/waku/extensions/registry.py
def get_module_extensions(self, module_type: ModuleType, extension_type: type[_ModExtT]) -> list[_ModExtT]:
    extensions = cast('list[_ModExtT]', self._module_extensions.get(module_type, []))
    return [ext for ext in extensions if isinstance(ext, extension_type)]

factory

ContainerConfig dataclass

ContainerConfig(
    *,
    lock_factory=Lock,
    start_scope=None,
    skip_validation=False,
)

lock_factory class-attribute instance-attribute

lock_factory = Lock

start_scope class-attribute instance-attribute

start_scope = None

skip_validation class-attribute instance-attribute

skip_validation = False

WakuFactory

WakuFactory(
    root_module_type,
    /,
    context=None,
    lifespan=(),
    extensions=DEFAULT_EXTENSIONS,
    container_config=None,
)
Source code in src/waku/factory.py
def __init__(
    self,
    root_module_type: ModuleType,
    /,
    context: dict[Any, Any] | None = None,
    lifespan: Sequence[LifespanFunc] = (),
    extensions: Sequence[ApplicationExtension] = DEFAULT_EXTENSIONS,
    container_config: ContainerConfig | None = None,
) -> None:
    self._root_module_type = root_module_type

    self._context = context
    self._lifespan = lifespan
    self._extensions = extensions
    self._container_config = container_config or ContainerConfig()

create

create()
Source code in src/waku/factory.py
def create(self) -> WakuApplication:
    registry = ModuleRegistryBuilder(
        self._root_module_type,
        context=self._context,
        app_extensions=self._extensions,
    ).build()

    container = self._build_container(registry.providers)
    return WakuApplication(
        container=container,
        registry=registry,
        lifespan=self._lifespan,
        extension_registry=self._build_extension_registry(registry.modules),
    )

lifespan

LifespanFunc module-attribute

LifespanFunc = (
    Callable[
        ['WakuApplication'],
        AbstractAsyncContextManager[None],
    ]
    | AbstractAsyncContextManager[None]
)

LifespanWrapper

LifespanWrapper(lifespan_func)
Source code in src/waku/lifespan.py
def __init__(self, lifespan_func: LifespanFunc) -> None:
    self._lifespan_func = lifespan_func

lifespan async

lifespan(app)
Source code in src/waku/lifespan.py
@asynccontextmanager
async def lifespan(self, app: WakuApplication) -> AsyncGenerator[None]:
    ctx_manager = (
        self._lifespan_func
        if isinstance(self._lifespan_func, AbstractAsyncContextManager)
        else self._lifespan_func(app)
    )
    async with ctx_manager:
        yield

messaging

MessageT module-attribute

MessageT = TypeVar(
    'MessageT', bound=IMessage, contravariant=True
)

ResponseT module-attribute

ResponseT = TypeVar(
    'ResponseT', default=None, covariant=True
)

CallNext module-attribute

CallNext = Callable[[], Awaitable[ResponseT]]

RequestT module-attribute

RequestT = TypeVar(
    'RequestT', bound=IRequest[Any], contravariant=True
)

MessagingConfig dataclass

MessagingConfig(
    *, pipeline_behaviors=(), endpoints=(), routing=()
)

Configuration for the messaging extension.

ATTRIBUTE DESCRIPTION
pipeline_behaviors

A sequence of pipeline behavior configurations that will be applied to the messaging pipeline. Behaviors are executed in the order they are defined. Defaults to an empty sequence.

TYPE: Sequence[type[IPipelineBehavior[Any, Any]]]

endpoints

A sequence of endpoint entries defining available message endpoints. Defaults to an empty sequence.

TYPE: Sequence[EndpointEntry]

routing

A sequence of route descriptors mapping messages to endpoints. Defaults to an empty sequence.

TYPE: Sequence[RouteDescriptor | ModuleRouteDescriptor]

Example
config = MessagingConfig(
    pipeline_behaviors=[
        LoggingBehavior,
        ValidationBehavior,
    ]
)

pipeline_behaviors class-attribute instance-attribute

pipeline_behaviors = ()

endpoints class-attribute instance-attribute

endpoints = ()

routing class-attribute instance-attribute

routing = ()

IEvent

Bases: IMessage

Marker for event-type messages. Optional for messaging, required for event sourcing.

IMessage

IPipelineBehavior

Bases: ABC, Generic[MessageT, ResponseT]

handle abstractmethod async

handle(message, /, call_next)
Source code in src/waku/messaging/contracts/pipeline.py
@abstractmethod
async def handle(
    self,
    message: MessageT,
    /,
    call_next: CallNext[ResponseT],
) -> ResponseT: ...

IRequest

Bases: IMessage, Generic[ResponseT]

Marker interface for request-type objects (commands/queries).

Example::

@dataclass(frozen=True)
class GetUserQuery(IRequest[UserDTO]):
    user_id: str


@dataclass(frozen=True)
class CreateOrderCommand(IRequest):  # void command, returns None
    order_id: str

HandlerAlreadyRegistered

HandlerAlreadyRegistered(message_type, handler_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage], handler_type: HandlerType) -> None:
    self.message_type = message_type
    self.handler_type = handler_type

message_type instance-attribute

message_type = message_type

handler_type instance-attribute

handler_type = handler_type

HandlerNotFound

HandlerNotFound(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type

message_type instance-attribute

message_type = message_type

MultipleHandlersRegistered

MultipleHandlersRegistered(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type

message_type instance-attribute

message_type = message_type

NoRouteError

NoRouteError(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type

message_type instance-attribute

message_type = message_type

EventHandler

Bases: MessageHandler[MessageT, None]

handle abstractmethod async

handle(message)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, message: MessageT, /) -> None:
    raise NotImplementedError

MessageHandler

Bases: ABC, Generic[MessageT, ResponseT]

handle abstractmethod async

handle(message)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, message: MessageT, /) -> ResponseT:
    raise NotImplementedError

RequestHandler

Bases: MessageHandler[RequestT, ResponseT]

handle abstractmethod async

handle(request)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, request: RequestT, /) -> ResponseT:
    raise NotImplementedError

MessageBus

MessageBus(container, dispatcher, envelope_factory, router)

Bases: IMessageBus

Source code in src/waku/messaging/impl.py
def __init__(
    self,
    container: AsyncContainer,
    dispatcher: MessageDispatcher,
    envelope_factory: EnvelopeFactory,
    router: MessageRouter,
) -> None:
    self._container = container
    self._dispatcher = dispatcher
    self._envelope_factory = envelope_factory
    self._router = router

invoke async

invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)
Source code in src/waku/messaging/impl.py
@override
async def invoke(self, request: IRequest[Any], /) -> Any:
    envelope = self._create_envelope(request)
    with message_context_scope(envelope):
        return await self._dispatcher.invoke_request(request)

send async

send(message)
Source code in src/waku/messaging/impl.py
@override
async def send(self, message: IMessage, /) -> None:
    envelope = self._create_envelope(message)
    endpoints = self._router.resolve(type(message))
    if not endpoints:
        raise NoRouteError(type(message))
    for endpoint in endpoints:
        await endpoint.dispatch(envelope, self._container)

publish async

publish(message)
Source code in src/waku/messaging/impl.py
@override
async def publish(self, message: IMessage, /) -> None:
    envelope = self._create_envelope(message)
    for endpoint in self._router.resolve(type(message)):
        await endpoint.dispatch(envelope, self._container)

IMessageBus

Bases: ISender, IPublisher, ABC

Unified bus -- inject the narrowest interface needed.

publish abstractmethod async

publish(message)

Fan-out to all subscribers. Routable through endpoints/transports.

Silent no-op if no subscribers exist.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def publish(self, message: IMessage, /) -> None:
    """Fan-out to all subscribers. Routable through endpoints/transports.

    Silent no-op if no subscribers exist.
    """

invoke abstractmethod async

invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)

In-process request/response. Always inline, never routed.

Requires exactly one handler registered for the request type. Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def invoke(self, request: IRequest[ResponseT], /) -> ResponseT:
    """In-process request/response. Always inline, never routed.

    Requires exactly one handler registered for the request type.
    Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.
    """

send abstractmethod async

send(message)

Fire-and-forget. Routable through endpoints/transports.

Raises NoRouteError if no route is configured for the message type.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def send(self, message: IMessage, /) -> None:
    """Fire-and-forget. Routable through endpoints/transports.

    Raises NoRouteError if no route is configured for the message type.
    """

IPublisher

Bases: ABC

Publish messages to all subscribers.

publish abstractmethod async

publish(message)

Fan-out to all subscribers. Routable through endpoints/transports.

Silent no-op if no subscribers exist.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def publish(self, message: IMessage, /) -> None:
    """Fan-out to all subscribers. Routable through endpoints/transports.

    Silent no-op if no subscribers exist.
    """

ISender

Bases: ABC

Send messages through the messaging pipeline.

invoke abstractmethod async

invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)

In-process request/response. Always inline, never routed.

Requires exactly one handler registered for the request type. Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def invoke(self, request: IRequest[ResponseT], /) -> ResponseT:
    """In-process request/response. Always inline, never routed.

    Requires exactly one handler registered for the request type.
    Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.
    """

send abstractmethod async

send(message)

Fire-and-forget. Routable through endpoints/transports.

Raises NoRouteError if no route is configured for the message type.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def send(self, message: IMessage, /) -> None:
    """Fire-and-forget. Routable through endpoints/transports.

    Raises NoRouteError if no route is configured for the message type.
    """

MessagingExtension

MessagingExtension()

Bases: OnModuleConfigure

Source code in src/waku/messaging/modules.py
def __init__(self) -> None:
    self._registry = MessageRegistry()

registry property

registry

on_module_configure

on_module_configure(metadata)
Source code in src/waku/messaging/modules.py
@override
def on_module_configure(self, metadata: 'ModuleMetadata') -> None:
    pass  # No-op: implements OnModuleConfigure for discovery via find_extensions()

bind

bind(
    message_type: type[_ReqT],
    handler_type: type[RequestHandler[_ReqT, Any]],
    *,
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]]
    | None = None,
) -> Self
bind(
    message_type: type[_MsgT],
    handler_type: type[EventHandler[_MsgT]],
    *additional_handlers: type[EventHandler[_MsgT]],
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]]
    | None = None,
) -> Self
bind(
    message_type,
    handler_type,
    *additional_handlers,
    behaviors=None,
)
Source code in src/waku/messaging/modules.py
def bind(
    self,
    message_type: type[IMessage],
    handler_type: 'type[MessageHandler[Any, Any]]',
    *additional_handlers: 'type[MessageHandler[Any, Any]]',
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]] | None = None,
) -> Self:
    self._registry.handler_map.bind(message_type, handler_type)
    for additional in additional_handlers:
        self._registry.handler_map.bind(message_type, additional)
    if behaviors:
        entry: PipelineBehaviorMapEntry[Any, Any] = PipelineBehaviorMapEntry.for_message(message_type)
        self._registry.behavior_map.bind(entry, behaviors)
    return self

MessagingModule

register classmethod

register(config=None)
Source code in src/waku/messaging/modules.py
@classmethod
def register(cls, config: MessagingConfig | None = None, /) -> DynamicModule:
    config_ = config or MessagingConfig()
    return DynamicModule(
        parent_module=cls,
        providers=[
            scoped(WithParents[IMessageBus], MessageBus),  # ty:ignore[not-subscriptable]
            singleton(EnvelopeFactory),
            scoped(MessageDispatcher),
            transient(MessageContext, get_message_context),
            *cls._create_pipeline_behavior_providers(config_),
        ],
        extensions=[
            MessageRegistryAggregator(config_),
            EndpointLifecycleExtension(),
        ],
        is_global=True,
    )

external_endpoint

external_endpoint(uri)
Source code in src/waku/messaging/endpoints/base.py
def external_endpoint(uri: str) -> ExternalEntry:
    return ExternalEntry(uri=uri)

local_queue

local_queue(uri, *, stop_timeout=5.0, max_buffer_size=inf)
Source code in src/waku/messaging/endpoints/base.py
def local_queue(
    uri: str,
    *,
    stop_timeout: float = 5.0,
    max_buffer_size: float = math.inf,
) -> LocalQueueEntry:
    return LocalQueueEntry(
        uri=uri,
        stop_timeout=stop_timeout,
        max_buffer_size=max_buffer_size,
    )

route

route(message_type)
Source code in src/waku/messaging/router.py
def route(message_type: type[IMessage]) -> RouteBuilder:
    return RouteBuilder(message_type)

route_module

route_module(module_type)
Source code in src/waku/messaging/router.py
def route_module(module_type: ModuleType) -> ModuleRouteBuilder:
    return ModuleRouteBuilder(module_type)

behaviors

transactional

logger module-attribute
logger = getLogger(__name__)
TransactionalBehavior
TransactionalBehavior(uow)

Bases: IPipelineBehavior[Any, Any]

Source code in src/waku/messaging/behaviors/transactional.py
def __init__(self, uow: IUnitOfWork) -> None:
    self._uow = uow
handle async
handle(_message, /, call_next)
Source code in src/waku/messaging/behaviors/transactional.py
async def handle(self, _message: Any, /, call_next: CallNext[Any]) -> Any:
    try:
        result = await call_next()
    except Exception:
        await self._safe_rollback()
        raise
    try:
        await self._uow.commit()
    except Exception:
        await self._safe_rollback()
        raise
    return result

config

MessagingConfig dataclass

MessagingConfig(
    *, pipeline_behaviors=(), endpoints=(), routing=()
)

Configuration for the messaging extension.

ATTRIBUTE DESCRIPTION
pipeline_behaviors

A sequence of pipeline behavior configurations that will be applied to the messaging pipeline. Behaviors are executed in the order they are defined. Defaults to an empty sequence.

TYPE: Sequence[type[IPipelineBehavior[Any, Any]]]

endpoints

A sequence of endpoint entries defining available message endpoints. Defaults to an empty sequence.

TYPE: Sequence[EndpointEntry]

routing

A sequence of route descriptors mapping messages to endpoints. Defaults to an empty sequence.

TYPE: Sequence[RouteDescriptor | ModuleRouteDescriptor]

Example
config = MessagingConfig(
    pipeline_behaviors=[
        LoggingBehavior,
        ValidationBehavior,
    ]
)
pipeline_behaviors class-attribute instance-attribute
pipeline_behaviors = ()
endpoints class-attribute instance-attribute
endpoints = ()
routing class-attribute instance-attribute
routing = ()

context

MessageContext dataclass

MessageContext(
    *, correlation_id, causation_id, message_id, headers
)
correlation_id instance-attribute
correlation_id
causation_id instance-attribute
causation_id
message_id instance-attribute
message_id
headers instance-attribute
headers

get_message_context

get_message_context()
Source code in src/waku/messaging/context.py
def get_message_context() -> MessageContext:
    ctx = _message_context.get()
    if ctx is None:
        msg = 'No active message context. This function must be called within a MessageBus operation.'
        raise RuntimeError(msg)
    return ctx

try_get_message_context

try_get_message_context()
Source code in src/waku/messaging/context.py
def try_get_message_context() -> MessageContext | None:
    return _message_context.get()

set_message_context

set_message_context(ctx)
Source code in src/waku/messaging/context.py
def set_message_context(ctx: MessageContext) -> Token[MessageContext | None]:
    return _message_context.set(ctx)

reset_message_context

reset_message_context(token)
Source code in src/waku/messaging/context.py
def reset_message_context(token: Token[MessageContext | None]) -> None:
    _message_context.reset(token)

message_context_scope

message_context_scope(envelope)
Source code in src/waku/messaging/context.py
@contextlib.contextmanager
def message_context_scope(envelope: MessageEnvelope[Any]) -> Generator[None]:
    ctx = MessageContext(
        correlation_id=envelope.correlation_id,
        causation_id=envelope.causation_id,
        message_id=envelope.message_id,
        headers=envelope.headers,
    )
    token = set_message_context(ctx)
    try:
        yield
    finally:
        reset_message_context(token)

contracts

MessageT module-attribute

MessageT = TypeVar(
    'MessageT', bound=IMessage, contravariant=True
)

ResponseT module-attribute

ResponseT = TypeVar(
    'ResponseT', default=None, covariant=True
)

CallNext module-attribute

CallNext = Callable[[], Awaitable[ResponseT]]

RequestT module-attribute

RequestT = TypeVar(
    'RequestT', bound=IRequest[Any], contravariant=True
)

IEvent

Bases: IMessage

Marker for event-type messages. Optional for messaging, required for event sourcing.

IMessage

IPipelineBehavior

Bases: ABC, Generic[MessageT, ResponseT]

handle abstractmethod async
handle(message, /, call_next)
Source code in src/waku/messaging/contracts/pipeline.py
@abstractmethod
async def handle(
    self,
    message: MessageT,
    /,
    call_next: CallNext[ResponseT],
) -> ResponseT: ...

IRequest

Bases: IMessage, Generic[ResponseT]

Marker interface for request-type objects (commands/queries).

Example::

@dataclass(frozen=True)
class GetUserQuery(IRequest[UserDTO]):
    user_id: str


@dataclass(frozen=True)
class CreateOrderCommand(IRequest):  # void command, returns None
    order_id: str

envelope

T module-attribute
T = TypeVar('T')
MessageEnvelope dataclass
MessageEnvelope(
    *,
    message_id,
    correlation_id,
    causation_id,
    message_type,
    timestamp,
    payload,
    headers=dict(),
)

Bases: Generic[T]

message_id instance-attribute
message_id
correlation_id instance-attribute
correlation_id
causation_id instance-attribute
causation_id
message_type instance-attribute
message_type
timestamp instance-attribute
timestamp
payload instance-attribute
payload
headers class-attribute instance-attribute
headers = field(default_factory=dict)

event

IEvent

Bases: IMessage

Marker for event-type messages. Optional for messaging, required for event sourcing.

factory

T module-attribute
T = TypeVar('T')
EnvelopeFactory
create classmethod
create(
    message,
    *,
    message_id=None,
    correlation_id=None,
    causation_id=None,
    headers=None,
)
Source code in src/waku/messaging/contracts/factory.py
@classmethod
def create(
    cls,
    message: T,
    *,
    message_id: UUID | None = None,
    correlation_id: UUID | None = None,
    causation_id: UUID | None = None,
    headers: Mapping[str, str] | None = None,
) -> MessageEnvelope[T]:
    message_id_ = message_id or uuid4()
    return MessageEnvelope(
        message_id=message_id_,
        correlation_id=correlation_id or uuid4(),
        causation_id=causation_id or message_id_,
        message_type=f'{type(message).__module__}.{type(message).__qualname__}',
        timestamp=datetime.now(tz=UTC),
        payload=message,
        headers=headers or {},
    )

handler

HandlerType module-attribute
HandlerType = 'type[MessageHandler[Any, Any]]'

message

MessageT module-attribute
MessageT = TypeVar(
    'MessageT', bound=IMessage, contravariant=True
)
ResponseT module-attribute
ResponseT = TypeVar(
    'ResponseT', default=None, covariant=True
)
IMessage

pipeline

CallNext module-attribute
CallNext = Callable[[], Awaitable[ResponseT]]
IPipelineBehavior

Bases: ABC, Generic[MessageT, ResponseT]

handle abstractmethod async
handle(message, /, call_next)
Source code in src/waku/messaging/contracts/pipeline.py
@abstractmethod
async def handle(
    self,
    message: MessageT,
    /,
    call_next: CallNext[ResponseT],
) -> ResponseT: ...

request

RequestT module-attribute
RequestT = TypeVar(
    'RequestT', bound=IRequest[Any], contravariant=True
)
IRequest

Bases: IMessage, Generic[ResponseT]

Marker interface for request-type objects (commands/queries).

Example::

@dataclass(frozen=True)
class GetUserQuery(IRequest[UserDTO]):
    user_id: str


@dataclass(frozen=True)
class CreateOrderCommand(IRequest):  # void command, returns None
    order_id: str

dispatcher

MessageDispatcher

MessageDispatcher(container, registry)
Source code in src/waku/messaging/dispatcher.py
def __init__(
    self,
    container: AsyncContainer,
    registry: MessageRegistry,
) -> None:
    self._container = container
    self._registry = registry
invoke_request async
invoke_request(request)
Source code in src/waku/messaging/dispatcher.py
async def invoke_request(self, request: IRequest[ResponseT]) -> ResponseT:
    request_type = type(request)
    handlers = self._registry.handler_map.get_handler_types(request_type)
    if len(handlers) == 0:
        raise HandlerNotFound(request_type)
    if len(handlers) > 1:
        raise MultipleHandlersRegistered(request_type)
    handler_type = handlers[0]
    handler = cast('MessageHandler[IRequest[ResponseT], ResponseT]', await self._container.get(handler_type))
    behaviors = await self._resolve_behaviors(request_type)
    return await PipelineExecutor.execute(message=request, handler=handler, behaviors=behaviors)  # pyrefly: ignore[bad-return]
execute_for_handler async
execute_for_handler(message, handler_type)
Source code in src/waku/messaging/dispatcher.py
async def execute_for_handler(self, message: IMessage, handler_type: HandlerType) -> None:
    handler = await self._container.get(handler_type)
    behaviors = await self._resolve_behaviors(type(message))
    await PipelineExecutor.execute(message=message, handler=handler, behaviors=behaviors)

endpoints

base

DEFAULT_ENDPOINT_URI module-attribute
DEFAULT_ENDPOINT_URI = '__default__'
EndpointEntry module-attribute
EndpointEntry = LocalQueueEntry | ExternalEntry
LocalQueueEntry dataclass
LocalQueueEntry(
    *, uri, stop_timeout=5.0, max_buffer_size=inf
)
uri instance-attribute
uri
stop_timeout class-attribute instance-attribute
stop_timeout = 5.0
max_buffer_size class-attribute instance-attribute
max_buffer_size = inf
ExternalEntry dataclass
ExternalEntry(*, uri)
uri instance-attribute
uri
Endpoint
Endpoint(uri)

Bases: ABC

Source code in src/waku/messaging/endpoints/base.py
def __init__(self, uri: str) -> None:
    self._uri = uri
uri property
uri
dispatch abstractmethod async
dispatch(envelope, scope)
Source code in src/waku/messaging/endpoints/base.py
@abstractmethod
async def dispatch(self, envelope: MessageEnvelope[Any], scope: AsyncContainer) -> None: ...
start abstractmethod async
start()
Source code in src/waku/messaging/endpoints/base.py
@abstractmethod
async def start(self) -> None: ...
stop abstractmethod async
stop()
Source code in src/waku/messaging/endpoints/base.py
@abstractmethod
async def stop(self) -> None: ...
local_queue
local_queue(uri, *, stop_timeout=5.0, max_buffer_size=inf)
Source code in src/waku/messaging/endpoints/base.py
def local_queue(
    uri: str,
    *,
    stop_timeout: float = 5.0,
    max_buffer_size: float = math.inf,
) -> LocalQueueEntry:
    return LocalQueueEntry(
        uri=uri,
        stop_timeout=stop_timeout,
        max_buffer_size=max_buffer_size,
    )
external_endpoint
external_endpoint(uri)
Source code in src/waku/messaging/endpoints/base.py
def external_endpoint(uri: str) -> ExternalEntry:
    return ExternalEntry(uri=uri)

local_queue

logger module-attribute
logger = getLogger(__name__)
LocalQueueEndpoint
LocalQueueEndpoint(
    *,
    uri,
    handler_subscriptions,
    container,
    stop_timeout,
    max_buffer_size,
)

Bases: Endpoint

Source code in src/waku/messaging/endpoints/local_queue.py
def __init__(
    self,
    *,
    uri: str,
    handler_subscriptions: HandlerSubscriptions,
    container: AsyncContainer,
    stop_timeout: float,
    max_buffer_size: float,
) -> None:
    super().__init__(uri=uri)
    self._handler_subscriptions = handler_subscriptions
    self._container = container
    self._stop_timeout = stop_timeout
    send, receive = create_memory_object_stream[MessageEnvelope[Any]](max_buffer_size=max_buffer_size)
    self._send_stream: MemoryObjectSendStream[MessageEnvelope[Any]] = send
    self._receive_stream: MemoryObjectReceiveStream[MessageEnvelope[Any]] = receive
    self._worker_task: asyncio.Task[None] | None = None
    self._stopped = False
uri property
uri
dispatch async
dispatch(envelope, scope)
Source code in src/waku/messaging/endpoints/local_queue.py
@override
async def dispatch(self, envelope: MessageEnvelope[Any], scope: AsyncContainer) -> None:
    if self._stopped:
        logger.warning('Message dropped: endpoint %s is stopped (message_id=%s)', self._uri, envelope.message_id)
        return
    await self._send_stream.send(envelope)
start async
start()
Source code in src/waku/messaging/endpoints/local_queue.py
async def start(self) -> None:
    self._worker_task = asyncio.create_task(self._worker_loop())
stop async
stop()
Source code in src/waku/messaging/endpoints/local_queue.py
async def stop(self) -> None:
    self._stopped = True
    self._send_stream.close()
    if self._worker_task is None:
        return
    try:
        with anyio.fail_after(self._stop_timeout):
            await self._worker_task
    except TimeoutError:
        logger.warning(
            'Worker task for %s did not terminate within %.1fs, cancelling',
            self._uri,
            self._stop_timeout,
        )
        self._worker_task.cancel()
        with contextlib.suppress(asyncio.CancelledError):
            await self._worker_task
    finally:
        self._worker_task = None

exceptions

MessagingError

Bases: WakuError

Base exception for all messaging-related errors.

MapFrozenError

MapFrozenError()

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self) -> None:
    super().__init__('Cannot modify map after it is frozen')

ImproperlyConfiguredError

Bases: MessagingError

Raised when messaging configuration is invalid.

HandlerAlreadyRegistered

HandlerAlreadyRegistered(message_type, handler_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage], handler_type: HandlerType) -> None:
    self.message_type = message_type
    self.handler_type = handler_type
message_type instance-attribute
message_type = message_type
handler_type instance-attribute
handler_type = handler_type

HandlerNotFound

HandlerNotFound(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type
message_type instance-attribute
message_type = message_type

MultipleHandlersRegistered

MultipleHandlersRegistered(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type
message_type instance-attribute
message_type = message_type

NoRouteError

NoRouteError(message_type)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage]) -> None:
    self.message_type = message_type
message_type instance-attribute
message_type = message_type

PipelineBehaviorAlreadyRegistered

PipelineBehaviorAlreadyRegistered(
    message_type, behavior_type
)

Bases: MessagingError

Source code in src/waku/messaging/exceptions.py
def __init__(self, message_type: type[IMessage], behavior_type: type[IPipelineBehavior[Any, Any]]) -> None:
    self.message_type = message_type
    self.behavior_type = behavior_type
message_type instance-attribute
message_type = message_type
behavior_type instance-attribute
behavior_type = behavior_type

handler

MessageHandler

Bases: ABC, Generic[MessageT, ResponseT]

handle abstractmethod async
handle(message)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, message: MessageT, /) -> ResponseT:
    raise NotImplementedError

RequestHandler

Bases: MessageHandler[RequestT, ResponseT]

handle abstractmethod async
handle(request)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, request: RequestT, /) -> ResponseT:
    raise NotImplementedError

EventHandler

Bases: MessageHandler[MessageT, None]

handle abstractmethod async
handle(message)
Source code in src/waku/messaging/handler.py
@abc.abstractmethod
async def handle(self, message: MessageT, /) -> None:
    raise NotImplementedError

handler_map

HandlerMap

HandlerMap()
Source code in src/waku/messaging/handler_map.py
def __init__(self) -> None:
    self._registry: dict[type[IMessage], list[HandlerType]] = {}
    self._frozen = False
freeze
freeze()
Source code in src/waku/messaging/handler_map.py
def freeze(self) -> None:
    self._frozen = True
bind
bind(message_type, handler_type)
Source code in src/waku/messaging/handler_map.py
def bind(self, message_type: type[IMessage], handler_type: HandlerType) -> Self:
    if self._frozen:
        raise MapFrozenError
    existing = self._registry.setdefault(message_type, [])
    if handler_type in existing:
        raise HandlerAlreadyRegistered(message_type, handler_type)
    existing.append(handler_type)
    return self
merge
merge(other)
Source code in src/waku/messaging/handler_map.py
def merge(self, other: HandlerMap) -> Self:
    if self._frozen:
        raise MapFrozenError
    for message_type, handler_types in other._registry.items():
        for handler_type in handler_types:
            self.bind(message_type, handler_type)
    return self
get_handler_types
get_handler_types(message_type)
Source code in src/waku/messaging/handler_map.py
def get_handler_types(self, message_type: type[IMessage]) -> Sequence[HandlerType]:
    return self._registry.get(message_type, ())
handler_types
handler_types()
Source code in src/waku/messaging/handler_map.py
def handler_types(self) -> Iterator[HandlerType]:
    for handlers in self._registry.values():
        yield from handlers
items
items()
Source code in src/waku/messaging/handler_map.py
def items(self) -> Iterator[tuple[type[IMessage], tuple[HandlerType, ...]]]:
    for msg_type, handlers in self._registry.items():
        yield msg_type, tuple(handlers)

impl

MessageBus

MessageBus(container, dispatcher, envelope_factory, router)

Bases: IMessageBus

Source code in src/waku/messaging/impl.py
def __init__(
    self,
    container: AsyncContainer,
    dispatcher: MessageDispatcher,
    envelope_factory: EnvelopeFactory,
    router: MessageRouter,
) -> None:
    self._container = container
    self._dispatcher = dispatcher
    self._envelope_factory = envelope_factory
    self._router = router
invoke async
invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)
Source code in src/waku/messaging/impl.py
@override
async def invoke(self, request: IRequest[Any], /) -> Any:
    envelope = self._create_envelope(request)
    with message_context_scope(envelope):
        return await self._dispatcher.invoke_request(request)
send async
send(message)
Source code in src/waku/messaging/impl.py
@override
async def send(self, message: IMessage, /) -> None:
    envelope = self._create_envelope(message)
    endpoints = self._router.resolve(type(message))
    if not endpoints:
        raise NoRouteError(type(message))
    for endpoint in endpoints:
        await endpoint.dispatch(envelope, self._container)
publish async
publish(message)
Source code in src/waku/messaging/impl.py
@override
async def publish(self, message: IMessage, /) -> None:
    envelope = self._create_envelope(message)
    for endpoint in self._router.resolve(type(message)):
        await endpoint.dispatch(envelope, self._container)

interfaces

ISender

Bases: ABC

Send messages through the messaging pipeline.

invoke abstractmethod async
invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)

In-process request/response. Always inline, never routed.

Requires exactly one handler registered for the request type. Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def invoke(self, request: IRequest[ResponseT], /) -> ResponseT:
    """In-process request/response. Always inline, never routed.

    Requires exactly one handler registered for the request type.
    Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.
    """
send abstractmethod async
send(message)

Fire-and-forget. Routable through endpoints/transports.

Raises NoRouteError if no route is configured for the message type.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def send(self, message: IMessage, /) -> None:
    """Fire-and-forget. Routable through endpoints/transports.

    Raises NoRouteError if no route is configured for the message type.
    """

IPublisher

Bases: ABC

Publish messages to all subscribers.

publish abstractmethod async
publish(message)

Fan-out to all subscribers. Routable through endpoints/transports.

Silent no-op if no subscribers exist.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def publish(self, message: IMessage, /) -> None:
    """Fan-out to all subscribers. Routable through endpoints/transports.

    Silent no-op if no subscribers exist.
    """

IMessageBus

Bases: ISender, IPublisher, ABC

Unified bus -- inject the narrowest interface needed.

publish abstractmethod async
publish(message)

Fan-out to all subscribers. Routable through endpoints/transports.

Silent no-op if no subscribers exist.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def publish(self, message: IMessage, /) -> None:
    """Fan-out to all subscribers. Routable through endpoints/transports.

    Silent no-op if no subscribers exist.
    """
invoke abstractmethod async
invoke(request: IRequest[None]) -> None
invoke(request: IRequest[ResponseT]) -> ResponseT
invoke(request)

In-process request/response. Always inline, never routed.

Requires exactly one handler registered for the request type. Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def invoke(self, request: IRequest[ResponseT], /) -> ResponseT:
    """In-process request/response. Always inline, never routed.

    Requires exactly one handler registered for the request type.
    Raises HandlerNotFound if zero, MultipleHandlersRegistered if >1.
    """
send abstractmethod async
send(message)

Fire-and-forget. Routable through endpoints/transports.

Raises NoRouteError if no route is configured for the message type.

Source code in src/waku/messaging/interfaces.py
@abc.abstractmethod
async def send(self, message: IMessage, /) -> None:
    """Fire-and-forget. Routable through endpoints/transports.

    Raises NoRouteError if no route is configured for the message type.
    """

modules

MessagingModule

register classmethod
register(config=None)
Source code in src/waku/messaging/modules.py
@classmethod
def register(cls, config: MessagingConfig | None = None, /) -> DynamicModule:
    config_ = config or MessagingConfig()
    return DynamicModule(
        parent_module=cls,
        providers=[
            scoped(WithParents[IMessageBus], MessageBus),  # ty:ignore[not-subscriptable]
            singleton(EnvelopeFactory),
            scoped(MessageDispatcher),
            transient(MessageContext, get_message_context),
            *cls._create_pipeline_behavior_providers(config_),
        ],
        extensions=[
            MessageRegistryAggregator(config_),
            EndpointLifecycleExtension(),
        ],
        is_global=True,
    )

MessagingExtension

MessagingExtension()

Bases: OnModuleConfigure

Source code in src/waku/messaging/modules.py
def __init__(self) -> None:
    self._registry = MessageRegistry()
registry property
registry
on_module_configure
on_module_configure(metadata)
Source code in src/waku/messaging/modules.py
@override
def on_module_configure(self, metadata: 'ModuleMetadata') -> None:
    pass  # No-op: implements OnModuleConfigure for discovery via find_extensions()
bind
bind(
    message_type: type[_ReqT],
    handler_type: type[RequestHandler[_ReqT, Any]],
    *,
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]]
    | None = None,
) -> Self
bind(
    message_type: type[_MsgT],
    handler_type: type[EventHandler[_MsgT]],
    *additional_handlers: type[EventHandler[_MsgT]],
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]]
    | None = None,
) -> Self
bind(
    message_type,
    handler_type,
    *additional_handlers,
    behaviors=None,
)
Source code in src/waku/messaging/modules.py
def bind(
    self,
    message_type: type[IMessage],
    handler_type: 'type[MessageHandler[Any, Any]]',
    *additional_handlers: 'type[MessageHandler[Any, Any]]',
    behaviors: Sequence[type[IPipelineBehavior[Any, Any]]] | None = None,
) -> Self:
    self._registry.handler_map.bind(message_type, handler_type)
    for additional in additional_handlers:
        self._registry.handler_map.bind(message_type, additional)
    if behaviors:
        entry: PipelineBehaviorMapEntry[Any, Any] = PipelineBehaviorMapEntry.for_message(message_type)
        self._registry.behavior_map.bind(entry, behaviors)
    return self

MessageRegistryAggregator

MessageRegistryAggregator(config)

Bases: OnModuleRegistration

Source code in src/waku/messaging/modules.py
def __init__(self, config: MessagingConfig) -> None:
    self._config = config
on_module_registration
on_module_registration(registry, owning_module, context)
Source code in src/waku/messaging/modules.py
@override
def on_module_registration(
    self,
    registry: ModuleMetadataRegistry,
    owning_module: 'ModuleType',
    context: Mapping[Any, Any] | None,
) -> None:
    aggregated = MessageRegistry()
    module_routing_map: dict[ModuleType, dict[type[IMessage], Sequence[HandlerType]]] = {}

    for module_type, ext in registry.find_extensions(MessagingExtension):
        try:
            aggregated.merge(ext.registry)
        except HandlerAlreadyRegistered as exc:
            msg = f'{exc} (from module {module_type.__qualname__})'
            raise ImproperlyConfiguredError(msg) from exc
        if ext.registry.handler_map:
            module_routing_map[module_type] = dict(ext.registry.handler_map.items())
        for provider in self._handler_providers(ext.registry):
            registry.add_provider(module_type, provider)

    self._validate_request_handler_counts(aggregated)

    for provider in self._collector_providers(aggregated):
        registry.add_provider(owning_module, provider)

    aggregated.freeze()
    registry.add_provider(owning_module, object_(aggregated))

    routing_table = RoutingTableBuilder(
        self._config,
        aggregated=aggregated,
        module_routing_map=module_routing_map,
    ).build()
    registry.add_provider(owning_module, object_(routing_table))
    registry.add_provider(owning_module, singleton(MessageRouter, _build_router))

EndpointLifecycleExtension

Bases: AfterApplicationInit, OnApplicationShutdown

after_app_init async
after_app_init(app)
Source code in src/waku/messaging/modules.py
@override
async def after_app_init(self, app: 'WakuApplication') -> None:
    router = await app.container.get(MessageRouter)
    for endpoint in router.endpoints:
        await endpoint.start()
on_app_shutdown async
on_app_shutdown(app)
Source code in src/waku/messaging/modules.py
@override
async def on_app_shutdown(self, app: 'WakuApplication') -> None:
    router = await app.container.get(MessageRouter)
    for endpoint in reversed(router.endpoints):
        await endpoint.stop()

pipeline

PipelineExecutor

execute async staticmethod
execute(*, message, handler, behaviors)
Source code in src/waku/messaging/pipeline/executor.py
@staticmethod
async def execute(
    *,
    message: IMessage,
    handler: _MessageHandler[_T],
    behaviors: Sequence[IPipelineBehavior[Any, _T]],
) -> _T:
    async def terminal() -> _T:
        return await handler.handle(message)

    if not behaviors:
        return await terminal()

    async def step(idx: int) -> _T:
        if idx >= len(behaviors):
            return await terminal()
        return await behaviors[idx].handle(
            message,
            call_next=lambda: step(idx + 1),
        )

    return await step(0)

executor

PipelineExecutor
execute async staticmethod
execute(*, message, handler, behaviors)
Source code in src/waku/messaging/pipeline/executor.py
@staticmethod
async def execute(
    *,
    message: IMessage,
    handler: _MessageHandler[_T],
    behaviors: Sequence[IPipelineBehavior[Any, _T]],
) -> _T:
    async def terminal() -> _T:
        return await handler.handle(message)

    if not behaviors:
        return await terminal()

    async def step(idx: int) -> _T:
        if idx >= len(behaviors):
            return await terminal()
        return await behaviors[idx].handle(
            message,
            call_next=lambda: step(idx + 1),
        )

    return await step(0)

map

PipelineBehaviorMapRegistry module-attribute
PipelineBehaviorMapEntry dataclass
PipelineBehaviorMapEntry(
    message_type, di_lookup_type, behavior_types=list()
)

Bases: Generic[MessageT, ResponseT]

message_type instance-attribute
message_type
di_lookup_type instance-attribute
di_lookup_type
behavior_types class-attribute instance-attribute
behavior_types = field(default_factory=list)
for_message classmethod
for_message(message_type)
Source code in src/waku/messaging/pipeline/map.py
@classmethod
def for_message(cls, message_type: type[IMessage]) -> Self:
    response_type = get_response_type(message_type)
    di_lookup_type = IPipelineBehavior[message_type, response_type]  # type: ignore[valid-type]
    return cls(message_type=message_type, di_lookup_type=di_lookup_type)  # type: ignore[type-abstract]
add
add(behavior_type)
Source code in src/waku/messaging/pipeline/map.py
def add(self, behavior_type: type[IPipelineBehavior[Any, Any]]) -> None:
    if behavior_type in self.behavior_types:
        raise PipelineBehaviorAlreadyRegistered(self.message_type, behavior_type)
    self.behavior_types.append(behavior_type)
PipelineBehaviorMap
PipelineBehaviorMap()
Source code in src/waku/messaging/pipeline/map.py
def __init__(self) -> None:
    self._registry: PipelineBehaviorMapRegistry[Any, Any] = {}
    self._frozen = False
is_frozen property
is_frozen
freeze
freeze()
Source code in src/waku/messaging/pipeline/map.py
def freeze(self) -> None:
    self._frozen = True
bind
bind(entry, behavior_types)
Source code in src/waku/messaging/pipeline/map.py
def bind(
    self,
    entry: PipelineBehaviorMapEntry[Any, Any],
    behavior_types: Sequence[type[IPipelineBehavior[Any, Any]]],
) -> Self:
    if self._frozen:
        raise MapFrozenError
    if entry.message_type not in self._registry:
        self._registry[entry.message_type] = entry

    existing = self._registry[entry.message_type]
    for behavior_type in behavior_types:
        existing.add(behavior_type)
    return self
merge
merge(other)
Source code in src/waku/messaging/pipeline/map.py
def merge(self, other: PipelineBehaviorMap) -> Self:
    if self._frozen:
        raise MapFrozenError
    for other_entry in other.entries():
        if other_entry.message_type not in self._registry:
            self._registry[other_entry.message_type] = PipelineBehaviorMapEntry(
                message_type=other_entry.message_type,
                di_lookup_type=other_entry.di_lookup_type,
            )
        target = self._registry[other_entry.message_type]
        for behavior_type in other_entry.behavior_types:
            target.add(behavior_type)
    return self
entries
entries()
Source code in src/waku/messaging/pipeline/map.py
def entries(self) -> Iterator[PipelineBehaviorMapEntry[Any, Any]]:
    yield from self._registry.values()
has_behaviors
has_behaviors(message_type)
Source code in src/waku/messaging/pipeline/map.py
def has_behaviors(self, message_type: type[IMessage]) -> bool:
    return message_type in self._registry and len(self._registry[message_type].behavior_types) > 0
get_behavior_types
get_behavior_types(message_type)
Source code in src/waku/messaging/pipeline/map.py
def get_behavior_types(self, message_type: type[IMessage]) -> Sequence[type[IPipelineBehavior[Any, Any]]]:
    return self._registry[message_type].behavior_types
get_lookup_type
get_lookup_type(message_type)
Source code in src/waku/messaging/pipeline/map.py
def get_lookup_type(self, message_type: type[IMessage]) -> type[IPipelineBehavior[Any, Any]]:
    return self._registry[message_type].di_lookup_type

registry

MessageRegistry dataclass

MessageRegistry(
    *,
    handler_map=HandlerMap(),
    behavior_map=PipelineBehaviorMap(),
)
handler_map class-attribute instance-attribute
handler_map = field(default_factory=HandlerMap)
behavior_map class-attribute instance-attribute
behavior_map = field(default_factory=PipelineBehaviorMap)
merge
merge(other)
Source code in src/waku/messaging/registry.py
def merge(self, other: MessageRegistry) -> None:
    self.handler_map.merge(other.handler_map)
    self.behavior_map.merge(other.behavior_map)
freeze
freeze()
Source code in src/waku/messaging/registry.py
def freeze(self) -> None:
    self.handler_map.freeze()
    self.behavior_map.freeze()

router

HandlerSubscriptions module-attribute

HandlerSubscriptions = (
    'Mapping[type[IMessage], frozenset[HandlerType]]'
)

RoutingTable dataclass

RoutingTable(
    entries=(),
    type_routes=(lambda: MappingProxyType({}))(),
    endpoint_subscriptions=(lambda: MappingProxyType({}))(),
)
entries class-attribute instance-attribute
entries = ()
type_routes class-attribute instance-attribute
type_routes = field(
    default_factory=lambda: MappingProxyType({})
)
endpoint_subscriptions class-attribute instance-attribute
endpoint_subscriptions = field(
    default_factory=lambda: MappingProxyType({})
)

MessageRouter

MessageRouter(routes, endpoints)
Source code in src/waku/messaging/router.py
def __init__(
    self,
    routes: Mapping[type[IMessage], Sequence[Endpoint]],
    endpoints: Sequence[Endpoint],
) -> None:
    self._routes = routes
    self._endpoints = endpoints
endpoints property
endpoints
resolve
resolve(message_type)
Source code in src/waku/messaging/router.py
def resolve(self, message_type: type[IMessage]) -> Sequence[Endpoint]:
    return self._routes.get(message_type, ())

RouteDescriptor dataclass

RouteDescriptor(message_type, endpoint_uri)
message_type instance-attribute
message_type
endpoint_uri instance-attribute
endpoint_uri

ModuleRouteDescriptor dataclass

ModuleRouteDescriptor(module_type, endpoint_uri)
module_type instance-attribute
module_type
endpoint_uri instance-attribute
endpoint_uri

RouteBuilder

RouteBuilder(message_type)
Source code in src/waku/messaging/router.py
def __init__(self, message_type: type[IMessage]) -> None:
    self._message_type = message_type
to
to(endpoint_uri)
Source code in src/waku/messaging/router.py
def to(self, endpoint_uri: str) -> RouteDescriptor:
    return RouteDescriptor(self._message_type, endpoint_uri)

ModuleRouteBuilder

ModuleRouteBuilder(module_type)
Source code in src/waku/messaging/router.py
def __init__(self, module_type: ModuleType) -> None:
    self._module_type = module_type
to
to(endpoint_uri)
Source code in src/waku/messaging/router.py
def to(self, endpoint_uri: str) -> ModuleRouteDescriptor:
    return ModuleRouteDescriptor(self._module_type, endpoint_uri)

route

route(message_type)
Source code in src/waku/messaging/router.py
def route(message_type: type[IMessage]) -> RouteBuilder:
    return RouteBuilder(message_type)

route_module

route_module(module_type)
Source code in src/waku/messaging/router.py
def route_module(module_type: ModuleType) -> ModuleRouteBuilder:
    return ModuleRouteBuilder(module_type)

routing_builder

ModuleRoutingMap module-attribute

ModuleRoutingMap = Mapping[
    'ModuleType',
    Mapping[type['IMessage'], Sequence['HandlerType']],
]

RoutingTableBuilder

RoutingTableBuilder(
    config, *, aggregated, module_routing_map
)
Source code in src/waku/messaging/routing_builder.py
def __init__(
    self,
    config: MessagingConfig,
    *,
    aggregated: MessageRegistry,
    module_routing_map: ModuleRoutingMap,
) -> None:
    self._config = config
    self._registry = aggregated
    self._module_routing_map = module_routing_map
    self._type_routes: defaultdict[type[IMessage], list[str]] = defaultdict(list)
    self._endpoint_handlers: defaultdict[str, defaultdict[type[IMessage], set[HandlerType]]] = defaultdict(
        lambda: defaultdict(set)
    )
    self._per_type_overrides: set[type[IMessage]] = set()
build
build()
Source code in src/waku/messaging/routing_builder.py
def build(self) -> RoutingTable:
    endpoints = self._collect_endpoint_entries()
    self._apply_routes(endpoints)
    self._assign_unrouted_to_default()
    self._ensure_default_endpoint(endpoints)
    return self._assemble(endpoints)

sqla

uow

SqlAlchemyUnitOfWork
SqlAlchemyUnitOfWork(session)

Bases: IUnitOfWork

Source code in src/waku/messaging/sqla/uow.py
def __init__(self, session: AsyncSession) -> None:
    self._session = session
commit async
commit()
Source code in src/waku/messaging/sqla/uow.py
async def commit(self) -> None:
    await self._session.commit()
rollback async
rollback()
Source code in src/waku/messaging/sqla/uow.py
async def rollback(self) -> None:
    await self._session.rollback()

transport

interfaces

ITransport

Bases: Protocol

send async
send(envelope)
Source code in src/waku/messaging/transport/interfaces.py
async def send(self, envelope: MessageEnvelope[Any]) -> None: ...
publish async
publish(envelope)
Source code in src/waku/messaging/transport/interfaces.py
async def publish(self, envelope: MessageEnvelope[Any]) -> None: ...

modules

ModuleType module-attribute

ModuleType = type[object | HasModuleMetadata]

DynamicModule dataclass

DynamicModule(
    *,
    providers=list(),
    imports=list(),
    exports=list(),
    extensions=list(),
    is_global=False,
    id=uuid4(),
    parent_module,
)

Bases: ModuleMetadata

providers class-attribute instance-attribute

providers = field(default_factory=list)

List of providers for dependency injection.

imports class-attribute instance-attribute

imports = field(default_factory=list)

List of modules imported by this module.

exports class-attribute instance-attribute

exports = field(default_factory=list)

List of types or modules exported by this module.

extensions class-attribute instance-attribute

extensions = field(default_factory=list)

List of module extensions for lifecycle hooks.

is_global class-attribute instance-attribute

is_global = False

Whether this module is global or not.

id class-attribute instance-attribute

id = field(default_factory=uuid4)

parent_module instance-attribute

parent_module

HasModuleMetadata

Bases: Protocol

ModuleCompiler

extract_metadata

extract_metadata(module_type)
Source code in src/waku/modules/_metadata.py
def extract_metadata(self, module_type: ModuleType | DynamicModule) -> tuple[ModuleType, ModuleMetadata]:
    try:
        return self._extract_metadata(cast('Hashable', module_type))
    except AttributeError:
        msg = f'{type(module_type).__name__} is not module'
        raise ValueError(msg) from None

ModuleMetadata dataclass

ModuleMetadata(
    *,
    providers=list(),
    imports=list(),
    exports=list(),
    extensions=list(),
    is_global=False,
    id=uuid4(),
)

providers class-attribute instance-attribute

providers = field(default_factory=list)

List of providers for dependency injection.

imports class-attribute instance-attribute

imports = field(default_factory=list)

List of modules imported by this module.

exports class-attribute instance-attribute

exports = field(default_factory=list)

List of types or modules exported by this module.

extensions class-attribute instance-attribute

extensions = field(default_factory=list)

List of module extensions for lifecycle hooks.

is_global class-attribute instance-attribute

is_global = False

Whether this module is global or not.

id class-attribute instance-attribute

id = field(default_factory=uuid4)

ModuleMetadataRegistry

ModuleMetadataRegistry(metadata_by_type, topological_order)

Registry providing access to collected module metadata.

Provides read access to all modules' metadata for aggregation purposes, with controlled write access through explicit methods.

This class is used during the module registration phase to enable cross-module aggregation of providers.

Source code in src/waku/modules/_metadata_registry.py
def __init__(
    self,
    metadata_by_type: dict[ModuleType, ModuleMetadata],
    topological_order: tuple[ModuleType, ...],
) -> None:
    self._metadata_by_type = metadata_by_type
    self._topological_order = topological_order

modules property

modules

All module types in topological order (dependencies first).

get_metadata

get_metadata(module_type)

Get metadata for a specific module.

Source code in src/waku/modules/_metadata_registry.py
def get_metadata(self, module_type: ModuleType) -> ModuleMetadata:
    """Get metadata for a specific module."""
    return self._metadata_by_type[module_type]

find_extensions

find_extensions(protocol)

Find all extensions of a given type across all modules.

Yields (module_type, extension) pairs in topological order. This is useful for aggregating data from extensions across modules.

PARAMETER DESCRIPTION
protocol

The extension protocol/type to search for.

TYPE: type[_ExtT]

YIELDS DESCRIPTION
tuple[ModuleType, _ExtT]

Tuples of (module_type, extension) for each matching extension.

Source code in src/waku/modules/_metadata_registry.py
def find_extensions(self, protocol: type[_ExtT]) -> Iterator[tuple[ModuleType, _ExtT]]:
    """Find all extensions of a given type across all modules.

    Yields (module_type, extension) pairs in topological order.
    This is useful for aggregating data from extensions across modules.

    Args:
        protocol: The extension protocol/type to search for.

    Yields:
        Tuples of (module_type, extension) for each matching extension.
    """
    for module_type in self._topological_order:
        metadata = self._metadata_by_type[module_type]
        for ext in metadata.extensions:
            if isinstance(ext, protocol):
                yield module_type, ext

add_provider

add_provider(module_type, provider)

Add a provider to a module's metadata.

This is the preferred way to add providers during registration hooks. The provider will become part of the owning module.

PARAMETER DESCRIPTION
module_type

The module to add the provider to.

TYPE: ModuleType

provider

The provider specification to add.

TYPE: Provider

RAISES DESCRIPTION
KeyError

If module_type is not in the registry.

Source code in src/waku/modules/_metadata_registry.py
def add_provider(self, module_type: ModuleType, provider: Provider) -> None:
    """Add a provider to a module's metadata.

    This is the preferred way to add providers during registration hooks.
    The provider will become part of the owning module.

    Args:
        module_type: The module to add the provider to.
        provider: The provider specification to add.

    Raises:
        KeyError: If module_type is not in the registry.
    """
    self._metadata_by_type[module_type].providers.append(provider)

Module

Module(module_type, metadata)
Source code in src/waku/modules/_module.py
def __init__(self, module_type: ModuleType, metadata: ModuleMetadata) -> None:
    self.id: Final[UUID] = metadata.id
    self.target: Final[ModuleType] = module_type

    self.providers: Final[Sequence[Provider]] = metadata.providers
    self.imports: Final[Sequence[ModuleType | DynamicModule]] = metadata.imports
    self.exports: Final[Sequence[type[object] | ModuleType | DynamicModule]] = metadata.exports
    self.extensions: Final[Sequence[ModuleExtension]] = metadata.extensions
    self.is_global: Final[bool] = metadata.is_global

    self._provider: BaseProvider | None = None

id instance-attribute

id = id

target instance-attribute

target = module_type

providers instance-attribute

providers = providers

imports instance-attribute

imports = imports

exports instance-attribute

exports = exports

extensions instance-attribute

extensions = extensions

is_global instance-attribute

is_global = is_global

name property

name

provider property

provider

create_provider

create_provider()
Source code in src/waku/modules/_module.py
def create_provider(self) -> BaseProvider:
    cls = cast('type[_ModuleProvider]', type(f'{self.name}Provider', (_ModuleProvider,), {}))
    self._provider = cls(self.providers)
    return self._provider

ModuleRegistry

ModuleRegistry(
    *, compiler, root_module, modules, providers, adjacency
)

Immutable registry and graph for module queries, traversal, and lookups.

Source code in src/waku/modules/_registry.py
def __init__(
    self,
    *,
    compiler: ModuleCompiler,
    root_module: Module,
    modules: dict[UUID, Module],
    providers: list[BaseProvider],
    adjacency: AdjacencyMatrix,
) -> None:
    self._compiler = compiler
    self._root_module = root_module
    self._modules = modules
    self._providers = tuple(providers)
    self._adjacency = adjacency
    self._parent_to_module = self._build_parent_mapping(modules)

root_module property

root_module

modules property

modules

providers property

providers

compiler property

compiler

get

get(module_type)
Source code in src/waku/modules/_registry.py
def get(self, module_type: ModuleType | DynamicModule) -> Module:
    # For plain module classes, check if they're registered via parent mapping first.
    # This handles the case where ConfigModule.register() was imported,
    # but ConfigModule (the class) is being exported.
    if isinstance(module_type, type) and module_type in self._parent_to_module:
        return self._parent_to_module[module_type]

    module_id = self.compiler.extract_metadata(module_type)[1].id
    return self.get_by_id(module_id)

get_by_id

get_by_id(module_id)
Source code in src/waku/modules/_registry.py
def get_by_id(self, module_id: UUID) -> Module:
    module = self._modules.get(module_id)
    if module is None:
        msg = f'Module with ID {module_id} is not registered in the graph.'
        raise KeyError(msg)
    return module

traverse

traverse(from_=None)

Traverse the module graph in depth-first post-order (children before parent) recursively.

PARAMETER DESCRIPTION
from_

Start module (default: root)

TYPE: Module | None DEFAULT: None

YIELDS DESCRIPTION
Module

Each traversed module (post-order)

TYPE:: Module

Source code in src/waku/modules/_registry.py
def traverse(self, from_: Module | None = None) -> Iterator[Module]:
    """Traverse the module graph in depth-first post-order (children before parent) recursively.

    Args:
        from_: Start module (default: root)

    Yields:
        Module: Each traversed module (post-order)
    """
    start_module = from_ or self._root_module
    visited: set[UUID] = set()

    def _dfs(module: Module) -> Iterator[Module]:
        if module.id in visited:
            return

        visited.add(module.id)

        # Process children first (maintain original order)
        neighbor_ids = self._adjacency[module.id]
        for neighbor_id in neighbor_ids:
            if neighbor_id == module.id:
                continue
            neighbor = self.get_by_id(neighbor_id)
            yield from _dfs(neighbor)

        # Process current module after children (post-order)
        yield module

    yield from _dfs(start_module)

ModuleRegistryBuilder

ModuleRegistryBuilder(
    root_module_type,
    compiler=None,
    context=None,
    app_extensions=(),
)
Source code in src/waku/modules/_registry_builder.py
def __init__(
    self,
    root_module_type: ModuleType,
    compiler: ModuleCompiler | None = None,
    context: dict[Any, Any] | None = None,
    app_extensions: Sequence[ApplicationExtension] = (),
) -> None:
    self._compiler: Final = compiler or ModuleCompiler()
    self._root_module_type: Final = root_module_type
    self._context: Final = context
    self._app_extensions: Final = app_extensions
    self._modules: dict[UUID, Module] = {}
    self._providers: list[BaseProvider] = []

    self._metadata_cache: dict[ModuleType | DynamicModule, tuple[ModuleType, ModuleMetadata]] = {}

build

build()
Source code in src/waku/modules/_registry_builder.py
def build(self) -> ModuleRegistry:
    modules, adjacency = self._collect_modules()
    self._execute_registration_hooks(modules)
    root_module = self._register_modules(modules)
    return self._build_registry(root_module, adjacency)

module

module(
    *,
    providers=(),
    imports=(),
    exports=(),
    extensions=(),
    is_global=False,
)

Decorator to define a module.

PARAMETER DESCRIPTION
providers

Sequence of providers for dependency injection.

TYPE: Sequence[Provider] DEFAULT: ()

imports

Sequence of modules imported by this module.

TYPE: Sequence[ModuleType | DynamicModule] DEFAULT: ()

exports

Sequence of types or modules exported by this module.

TYPE: Sequence[type[object] | ModuleType | DynamicModule] DEFAULT: ()

extensions

Sequence of module extensions for lifecycle hooks.

TYPE: Sequence[ModuleExtension] DEFAULT: ()

is_global

Whether this module is global or not.

TYPE: bool DEFAULT: False

Source code in src/waku/modules/_metadata.py
def module(
    *,
    providers: Sequence[Provider] = (),
    imports: Sequence[ModuleType | DynamicModule] = (),
    exports: Sequence[type[object] | ModuleType | DynamicModule] = (),
    extensions: Sequence[ModuleExtension] = (),
    is_global: bool = False,
) -> Callable[[type[_T]], type[_T]]:
    """Decorator to define a module.

    Args:
        providers: Sequence of providers for dependency injection.
        imports: Sequence of modules imported by this module.
        exports: Sequence of types or modules exported by this module.
        extensions: Sequence of module extensions for lifecycle hooks.
        is_global: Whether this module is global or not.
    """

    def decorator(cls: type[_T]) -> type[_T]:
        metadata = ModuleMetadata(
            providers=list(providers),
            imports=list(imports),
            exports=list(exports),
            extensions=list(extensions),
            is_global=is_global,
        )
        for extension in metadata.extensions:
            if isinstance(extension, OnModuleConfigure):
                extension.on_module_configure(metadata)

        setattr(cls, _MODULE_METADATA_KEY, metadata)
        return cls

    return decorator

testing

override

override(container, *providers, context=None)

Temporarily override providers and/or context in an AsyncContainer for testing.

PARAMETER DESCRIPTION
container

The container whose providers/context will be overridden.

TYPE: AsyncContainer

*providers

Providers to override in the container.

TYPE: BaseProvider DEFAULT: ()

context

Context values to override.

TYPE: dict[Any, Any] | None DEFAULT: None

YIELDS DESCRIPTION
None

Context in which the container uses the overridden providers/context.

TYPE:: Generator[None]

Example
from waku import WakuFactory, module
from waku.di import Scope, singleton
from waku.testing import override


class Service: ...


class ServiceOverride(Service): ...


# Override providers
with override(application.container, singleton(ServiceOverride, provided_type=Service)):
    service = await application.container.get(Service)
    assert isinstance(service, ServiceOverride)

# Override context
with override(application.container, context={int: 123}):
    ...
RAISES DESCRIPTION
ValueError

If container is not at root (APP) scope.

Source code in src/waku/testing.py
@contextmanager
def override(
    container: AsyncContainer,
    *providers: BaseProvider,
    context: dict[Any, Any] | None = None,
) -> Generator[None]:
    """Temporarily override providers and/or context in an AsyncContainer for testing.

    Args:
        container: The container whose providers/context will be overridden.
        *providers: Providers to override in the container.
        context: Context values to override.

    Yields:
        None: Context in which the container uses the overridden providers/context.

    Example:
        ```python
        from waku import WakuFactory, module
        from waku.di import Scope, singleton
        from waku.testing import override


        class Service: ...


        class ServiceOverride(Service): ...


        # Override providers
        with override(application.container, singleton(ServiceOverride, provided_type=Service)):
            service = await application.container.get(Service)
            assert isinstance(service, ServiceOverride)

        # Override context
        with override(application.container, context={int: 123}):
            ...
        ```

    Raises:
        ValueError: If container is not at root (APP) scope.
    """
    if container.scope != Scope.APP:
        msg = (
            f'override() only supports root (APP scope) containers, '
            f'got {container.scope.name} scope. '
            f'Use application.container instead of a scoped container.'
        )
        raise ValueError(msg)

    _mark_as_overrides(providers)

    original_context = container._context or {}  # noqa: SLF001
    merged_context = {**original_context, **(context or {})}
    new_container = make_async_container(
        _container_provider(container),
        *providers,
        context=merged_context,
        start_scope=container.scope,
        validation_settings=STRICT_VALIDATION,
    )

    # Only copy cache when no providers are overridden (context-only override)
    # Provider overrides may have transitive effects, so rebuild everything
    if not providers:
        _copy_cache(container, new_container)

    _swap(container, new_container)
    container._cache[CONTAINER_KEY] = container  # noqa: SLF001
    try:
        yield
    finally:
        _swap(new_container, container)

create_test_app async

create_test_app(
    *,
    base=None,
    providers=(),
    imports=(),
    extensions=(),
    app_extensions=DEFAULT_EXTENSIONS,
    context=None,
)

Create a minimal test application with given configuration.

Useful for testing extensions and module configurations in isolation without needing to set up a full application structure.

PARAMETER DESCRIPTION
base

Base module to build upon. When provided, the test module imports this module and providers act as overrides.

TYPE: ModuleType | DynamicModule | None DEFAULT: None

providers

Providers to register in the test module. When base is provided, these override existing providers.

TYPE: Sequence[Provider] DEFAULT: ()

imports

Additional modules to import into the test module.

TYPE: Sequence[ModuleType | DynamicModule] DEFAULT: ()

extensions

Module extensions to register.

TYPE: Sequence[ModuleExtension] DEFAULT: ()

app_extensions

Application extensions to register (default: DEFAULT_EXTENSIONS).

TYPE: Sequence[ApplicationExtension] DEFAULT: DEFAULT_EXTENSIONS

context

Context values to pass to the container.

TYPE: dict[Any, Any] | None DEFAULT: None

YIELDS DESCRIPTION
AsyncGenerator[WakuApplication]

Initialized WakuApplication.

Example
from waku.testing import create_test_app
from waku.di import singleton


class IRepository(Protocol):
    async def get(self, id: str) -> Entity: ...


class FakeRepository(IRepository):
    async def get(self, id: str) -> Entity:
        return Entity(id=id)


# Create test app from scratch
async def test_my_extension():
    extension = MyExtension().bind(SomeEvent, SomeHandler)

    async with create_test_app(
        extensions=[extension],
        providers=[singleton(IRepository, FakeRepository)],
    ) as app:
        service = await app.container.get(MyService)
        result = await service.do_something()
        assert result == expected


# Create test app based on existing module with overrides
async def test_with_base_module():
    async with create_test_app(
        base=AppModule,
        providers=[singleton(IRepository, FakeRepository)],
    ) as app:
        # FakeRepository replaces the real one from AppModule
        repo = await app.container.get(IRepository)
        assert isinstance(repo, FakeRepository)
Source code in src/waku/testing.py
@asynccontextmanager
async def create_test_app(
    *,
    base: ModuleType | DynamicModule | None = None,
    providers: Sequence[Provider] = (),
    imports: Sequence[ModuleType | DynamicModule] = (),
    extensions: Sequence[ModuleExtension] = (),
    app_extensions: Sequence[ApplicationExtension] = DEFAULT_EXTENSIONS,
    context: dict[Any, Any] | None = None,
) -> AsyncGenerator[WakuApplication]:
    """Create a minimal test application with given configuration.

    Useful for testing extensions and module configurations in isolation
    without needing to set up a full application structure.

    Args:
        base: Base module to build upon. When provided, the test module
            imports this module and providers act as overrides.
        providers: Providers to register in the test module.
            When `base` is provided, these override existing providers.
        imports: Additional modules to import into the test module.
        extensions: Module extensions to register.
        app_extensions: Application extensions to register (default: DEFAULT_EXTENSIONS).
        context: Context values to pass to the container.

    Yields:
        Initialized WakuApplication.

    Example:
        ```python
        from waku.testing import create_test_app
        from waku.di import singleton


        class IRepository(Protocol):
            async def get(self, id: str) -> Entity: ...


        class FakeRepository(IRepository):
            async def get(self, id: str) -> Entity:
                return Entity(id=id)


        # Create test app from scratch
        async def test_my_extension():
            extension = MyExtension().bind(SomeEvent, SomeHandler)

            async with create_test_app(
                extensions=[extension],
                providers=[singleton(IRepository, FakeRepository)],
            ) as app:
                service = await app.container.get(MyService)
                result = await service.do_something()
                assert result == expected


        # Create test app based on existing module with overrides
        async def test_with_base_module():
            async with create_test_app(
                base=AppModule,
                providers=[singleton(IRepository, FakeRepository)],
            ) as app:
                # FakeRepository replaces the real one from AppModule
                repo = await app.container.get(IRepository)
                assert isinstance(repo, FakeRepository)
        ```
    """
    all_imports = list(imports)
    if base is not None:
        all_imports.insert(0, base)

    override_providers = list(providers)
    if base is not None:
        _mark_as_overrides(override_providers)

    @module(
        providers=override_providers,
        imports=all_imports,
        extensions=list(extensions),
    )
    class _TestModule:
        pass

    app = WakuFactory(_TestModule, context=context, extensions=app_extensions).create()
    async with app:
        yield app

uow

IUnitOfWork

Bases: Protocol

commit async

commit()
Source code in src/waku/uow.py
async def commit(self) -> None: ...

rollback async

rollback()
Source code in src/waku/uow.py
async def rollback(self) -> None: ...

validation

ValidationRule

Bases: Protocol

validate

validate(context)
Source code in src/waku/validation/_abc.py
def validate(self, context: ValidationContext) -> list[ValidationError]: ...

ValidationError

Bases: WakuError

ValidationContext dataclass

ValidationContext(*, app)

app instance-attribute

app

ValidationExtension

ValidationExtension(rules, *, strict=True)

Bases: AfterApplicationInit

Source code in src/waku/validation/_extension.py
def __init__(self, rules: Sequence[ValidationRule], *, strict: bool = True) -> None:
    self.rules = rules
    self.strict: Final = strict

rules instance-attribute

rules = rules

strict instance-attribute

strict = strict

after_app_init async

after_app_init(app)
Source code in src/waku/validation/_extension.py
async def after_app_init(self, app: WakuApplication) -> None:
    context = ValidationContext(app=app)

    errors_chain = chain.from_iterable(rule.validate(context) for rule in self.rules)
    if errors := list(errors_chain):
        self._raise(errors)

rules

DependenciesAccessibleRule

DependenciesAccessibleRule(cache_size=1000)

Bases: ValidationRule

Validates that all dependencies required by providers are accessible.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, cache_size: int = 1000) -> None:
    self._cache = LRUCache[set[type[object]]](cache_size)
    self._types_extractor = ModuleTypesExtractor(self._cache)
validate
validate(context)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def validate(self, context: ValidationContext) -> list[ValidationError]:
    self._cache.clear()

    registry = context.app.registry
    modules = list(registry.modules)
    container = context.app.container

    strategies: list[AccessibilityStrategy] = [
        GlobalProvidersStrategy(modules, container, self._types_extractor, registry),
        LocalProvidersStrategy(self._types_extractor),
        ContextVarsStrategy(self._types_extractor),
        ImportedModulesStrategy(registry, self._types_extractor),
    ]

    checker = DependencyAccessChecker(strategies)
    errors: list[ValidationError] = []

    for module in modules:
        for factory in module.provider.factories:
            inaccessible_deps = checker.find_inaccessible_dependencies(
                dependencies=factory.dependencies,
                module=module,
            )
            errors.extend(
                DependencyInaccessibleError(
                    required_type=dep_type,
                    required_by=factory.source,
                    from_module=module,
                )
                for dep_type in inaccessible_deps
            )

    return errors

DependencyInaccessibleError

DependencyInaccessibleError(
    required_type, required_by, from_module
)

Bases: ValidationError

Error indicating a dependency is not accessible to a provider/module.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(
    self,
    required_type: type[object],
    required_by: object,
    from_module: Module,
) -> None:
    self.required_type = required_type
    self.required_by = required_by
    self.from_module = from_module
    super().__init__(str(self))
required_type instance-attribute
required_type = required_type
required_by instance-attribute
required_by = required_by
from_module instance-attribute
from_module = from_module

dependency_accessible

DependencyInaccessibleError
DependencyInaccessibleError(
    required_type, required_by, from_module
)

Bases: ValidationError

Error indicating a dependency is not accessible to a provider/module.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(
    self,
    required_type: type[object],
    required_by: object,
    from_module: Module,
) -> None:
    self.required_type = required_type
    self.required_by = required_by
    self.from_module = from_module
    super().__init__(str(self))
required_type instance-attribute
required_type = required_type
required_by instance-attribute
required_by = required_by
from_module instance-attribute
from_module = from_module
AccessibilityStrategy

Bases: ABC

Strategy for checking if a type is accessible to a module.

is_accessible abstractmethod
is_accessible(required_type, module)

Check if the required type is accessible to the given module.

Source code in src/waku/validation/rules/dependency_accessible.py
@abstractmethod
def is_accessible(self, required_type: type[object], module: Module) -> bool:
    """Check if the required type is accessible to the given module."""
GlobalProvidersStrategy
GlobalProvidersStrategy(
    modules, container, types_extractor, registry
)

Bases: AccessibilityStrategy

Check if type is provided by a global module or APP-scoped context.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(
    self,
    modules: Sequence[Module],
    container: AsyncContainer,
    types_extractor: ModuleTypesExtractor,
    registry: ModuleRegistry,
) -> None:
    self._global_types = self._build_global_types(modules, container, types_extractor, registry)
is_accessible
is_accessible(required_type, module)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def is_accessible(self, required_type: type[object], module: Module) -> bool:
    return required_type in self._global_types
LocalProvidersStrategy
LocalProvidersStrategy(types_extractor)

Bases: AccessibilityStrategy

Check if type is provided by the module itself.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, types_extractor: ModuleTypesExtractor) -> None:
    self._types_extractor = types_extractor
is_accessible
is_accessible(required_type, module)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def is_accessible(self, required_type: type[object], module: Module) -> bool:
    return required_type in self._types_extractor.get_provided_types(module)
ContextVarsStrategy
ContextVarsStrategy(types_extractor)

Bases: AccessibilityStrategy

Check if type is provided by application or request container context.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, types_extractor: ModuleTypesExtractor) -> None:
    self._types_extractor = types_extractor
is_accessible
is_accessible(required_type, module)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def is_accessible(self, required_type: type[object], module: Module) -> bool:
    return required_type in self._types_extractor.get_context_vars(module)
ImportedModulesStrategy
ImportedModulesStrategy(registry, types_extractor)

Bases: AccessibilityStrategy

Check if type is accessible via imported modules (direct export or re-export).

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, registry: ModuleRegistry, types_extractor: ModuleTypesExtractor) -> None:
    self._registry = registry
    self._types_extractor = types_extractor
is_accessible
is_accessible(required_type, module)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def is_accessible(self, required_type: type[object], module: Module) -> bool:
    for imported in module.imports:
        imported_module = self._registry.get(imported)
        if self._is_directly_exported(required_type, imported_module):
            return True
        if self._is_reexported(required_type, imported_module):
            return True
    return False
DependencyAccessChecker
DependencyAccessChecker(strategies)

Handles dependency accessibility checks between modules.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, strategies: Sequence[AccessibilityStrategy]) -> None:
    self._strategies = strategies
find_inaccessible_dependencies
find_inaccessible_dependencies(dependencies, module)
Source code in src/waku/validation/rules/dependency_accessible.py
def find_inaccessible_dependencies(
    self,
    dependencies: Sequence[DependencyKey],
    module: Module,
) -> Iterable[type[object]]:
    for dependency in dependencies:
        if not self._is_accessible(dependency.type_hint, module):
            yield dependency.type_hint
DependenciesAccessibleRule
DependenciesAccessibleRule(cache_size=1000)

Bases: ValidationRule

Validates that all dependencies required by providers are accessible.

Source code in src/waku/validation/rules/dependency_accessible.py
def __init__(self, cache_size: int = 1000) -> None:
    self._cache = LRUCache[set[type[object]]](cache_size)
    self._types_extractor = ModuleTypesExtractor(self._cache)
validate
validate(context)
Source code in src/waku/validation/rules/dependency_accessible.py
@override
def validate(self, context: ValidationContext) -> list[ValidationError]:
    self._cache.clear()

    registry = context.app.registry
    modules = list(registry.modules)
    container = context.app.container

    strategies: list[AccessibilityStrategy] = [
        GlobalProvidersStrategy(modules, container, self._types_extractor, registry),
        LocalProvidersStrategy(self._types_extractor),
        ContextVarsStrategy(self._types_extractor),
        ImportedModulesStrategy(registry, self._types_extractor),
    ]

    checker = DependencyAccessChecker(strategies)
    errors: list[ValidationError] = []

    for module in modules:
        for factory in module.provider.factories:
            inaccessible_deps = checker.find_inaccessible_dependencies(
                dependencies=factory.dependencies,
                module=module,
            )
            errors.extend(
                DependencyInaccessibleError(
                    required_type=dep_type,
                    required_by=factory.source,
                    from_module=module,
                )
                for dep_type in inaccessible_deps
            )

    return errors