# Copyright © The Debusine Developers
# See the AUTHORS file at the top-level directory of this distribution
#
# This file is part of Debusine. It is subject to the license terms
# in the LICENSE file found in the top-level directory of this
# distribution. No part of Debusine, including this file, may be copied,
# modified, propagated, or distributed except according to the terms
# contained in the LICENSE file.
"""
Base Workflow infrastructure.
See :ref:`explanation-workflows` for a high level explanation of concepts used
here.
"""
import logging
from abc import ABCMeta, abstractmethod
from collections.abc import Callable
from functools import partial
from typing import Any, Literal, cast, override
from debusine.artifacts.models import (
ArtifactCategory,
BareDataCategory,
CollectionCategory,
TaskTypes,
)
from debusine.client.models import LookupChildType, RuntimeParameter
from debusine.db.models import (
Collection,
CollectionItem,
TaskDatabase,
WorkRequest,
Workspace,
)
from debusine.db.models.work_requests import (
InternalTaskError,
StatusChangeError,
)
from debusine.server.tasks.base import BaseServerTask
from debusine.server.workflows.models import (
BaseWorkflowData,
WorkRequestWorkflowData,
)
from debusine.tasks import TaskConfigError
from debusine.tasks.models import (
ActionUpdateCollectionWithArtifacts,
ActionUpdateCollectionWithData,
BaseDynamicTaskData,
BaseTaskData,
InputArtifact,
LookupMultiple,
LookupSingle,
OutputData,
OutputDataError,
)
from debusine.tasks.server import TaskDatabaseInterface
logger = logging.getLogger(__name__)
[docs]
class WorkflowValidationError(Exception):
"""Raised if a workflow fails to validate its inputs."""
[docs]
class WorkflowRunError(Exception):
"""Running a workflow orchestrator or callback failed."""
[docs]
def __init__(
self, work_request: WorkRequest, message: str, code: str
) -> None:
"""Construct the exception."""
self.work_request = work_request
self.message = message
self.code = code
[docs]
class Workflow[WD: BaseWorkflowData, DTD: BaseDynamicTaskData](
BaseServerTask[WD, DTD], metaclass=ABCMeta
):
"""
Base class for workflow orchestrators.
This is the base API for running :py:class:`WorkflowInstance` logic.
"""
TASK_TYPE = TaskTypes.WORKFLOW
[docs]
@classmethod
def from_name(cls, name: str) -> type['Workflow[Any, Any]']:
"""Instantiate a workflow by name."""
res = super().class_from_name(TaskTypes.WORKFLOW, name)
return cast(type[Workflow[Any, Any]], res)
[docs]
@classmethod
def validate_static_parameters(cls, data: dict[str, Any]) -> None:
"""Validate WorkflowTemplate static_parameters."""
# By default nothing is enforced
[docs]
@classmethod
def validate_runtime_parameters(
cls, data: dict[str, Any] | Literal[RuntimeParameter.ANY]
) -> None:
"""Validate WorkflowTemplate runtime_parameters."""
if data == RuntimeParameter.ANY:
return
assert isinstance(data, dict)
for key, value in data.items():
if key not in cls.task_data_type.model_fields:
raise WorkflowValidationError(
f"{key} is not a task_data parameter for {cls.__name__}"
)
if value == RuntimeParameter.ANY:
continue
if isinstance(value, str):
raise WorkflowValidationError(
f"{key} is set to a string ({value!r}), not a list of "
f"valid values."
)
if isinstance(value, list) and all(
isinstance(x, str) for x in value
):
continue
raise WorkflowValidationError(
f"{key} is set to {value!r}, not a list of valid values."
)
# Do nothing by default
[docs]
def ensure_dynamic_data(self, task_database: TaskDatabaseInterface) -> None:
"""Ensure that this workflow's dynamic task data has been computed."""
if self.work_request.dynamic_task_data is None:
dynamic_task_data = self.compute_dynamic_data(task_database)
self.work_request.dynamic_task_data = dynamic_task_data.model_dump(
mode="json", exclude_unset=True
)
self.work_request.save()
self.dynamic_data = self.dynamic_task_data_type(
**self.work_request.dynamic_task_data
)
[docs]
@abstractmethod
def populate(self) -> None:
"""
Create the initial WorkRequest structure.
This is called once, when the workflow first becomes runnable.
:py:meth:`validate_input` will already have been called.
This method is required to be idempotent: calling it multiple times
with the same argument MUST result in the same
:py:class:`WorkRequest` structure as calling it once.
"""
[docs]
def get_callback(self, step: str) -> Callable[[], bool]:
"""
Get the callback implementation for a given workflow step name.
By default, subclasses may provide ``callback_{step.replace('-',
'_')}`` methods to implement workflow callbacks with the
corresponding ``step`` set in their workflow data. Alternatively,
they may override this method to provide a more complex mapping from
work request to callback implementation.
"""
callback_method = getattr(
self, f"callback_{step.replace('-', '_')}", None
)
if callback_method is None:
raise NotImplementedError(
f"Unhandled workflow callback step: {step}"
)
assert callable(callback_method)
# https://github.com/python/mypy/issues/20748
return callback_method # type: ignore[no-any-return]
[docs]
def callback(self, work_request: "WorkRequest") -> bool:
"""
Perform an orchestration step.
Called with the workflow callback work request (note that this is
not the same as ``self.work_request``) when the workflow node
becomes ready to execute.
Called with a :py:class:`WorkRequest` of type internal/workflow to
perform an orchestration step triggered by a workflow callback.
This method is required to be idempotent: calling it multiple times
with the same argument MUST result in the same
:py:class:`WorkRequest` structure as calling it once.
Subclasses may provide ``callback_{step.replace('-', '_')}`` methods
to implement workflow callbacks with the corresponding ``step`` set
in their workflow data, or override ``get_callback``.
:return: True if the callback succeeded, otherwise False.
"""
step = work_request.workflow_data.step
if step is None:
raise NotImplementedError(
"Workflow callback called without setting `step`"
)
succeeded = self.get_callback(step)()
assert isinstance(succeeded, bool)
return succeeded
[docs]
def orchestrate_child(self, work_request: WorkRequest) -> None:
"""
Orchestrate a child work request in whatever way is appropriate.
Silently skips non-pending/running work requests.
"""
if (
work_request.status == WorkRequest.Statuses.BLOCKED
and work_request.can_be_automatically_unblocked()
):
work_request.mark_pending()
if work_request.status in {
WorkRequest.Statuses.PENDING,
WorkRequest.Statuses.RUNNING,
}:
# This may result in orchestrator errors, but if so the errors
# will be logged. Workflows should not immediately propagate
# the error; they will eventually be marked as COMPLETED/ERROR
# once all their children have completed or aborted.
orchestrate_workflow(work_request)
[docs]
@staticmethod
def provides_artifact(
work_request: WorkRequest,
category: ArtifactCategory,
name: str,
*,
data: dict[str, Any] | None = None,
artifact_filters: dict[str, Any] | None = None, # noqa: U100
) -> None:
"""
Indicate work_request will provide an artifact.
:param work_request: work request that will provide the artifact
:param category: category of the artifact that will be provided
:param name: name of this item in the workflow’s internal collection
:param data: add it to the data dictionary for the event reaction
:param artifact_filters: for the
:ref:`action-update-collection-with-artifacts` action, to allow
workflows to add filtering
:raise LookupError: if a key in "data" starts with ``promise_``
Create an event reaction for ``on_creation`` adding a promise: this
work request will create an artifact.
Create an event reaction for ``on_success`` to update the collection
with the relevant artifact.
"""
if "/" in name:
# This wouldn't work well, because "/" is used to separate
# lookup string segments.
raise ValueError('Collection item name may not contain "/".')
if data is not None:
for key, value in data.items():
if key.startswith("promise_"):
raise ValueError(
f'Field name "{key}" starting with '
f'"promise_" is not allowed.'
)
# work_request is part of a workflow
assert work_request.parent is not None
try:
work_request.lookup_single(
f"internal@collections/name:{name}",
expect_type=LookupChildType.ARTIFACT_OR_PROMISE,
)
except KeyError:
work_request.process_update_collection_with_data(
[
ActionUpdateCollectionWithData(
collection="internal@collections",
name_template=name,
category=BareDataCategory.PROMISE,
data={
"promise_work_request_id": work_request.id,
"promise_workflow_id": work_request.parent.id,
"promise_category": category,
**(data or {}),
},
)
]
)
artifact_filters = artifact_filters or {}
work_request.add_event_reaction(
"on_success",
ActionUpdateCollectionWithArtifacts(
collection="internal@collections",
name_template=name,
variables=data,
artifact_filters={**artifact_filters, "category": category},
),
)
[docs]
@staticmethod
def requires_artifact(
work_request: WorkRequest, lookup: LookupSingle | LookupMultiple
) -> None:
"""
Indicate that work_request requires input (lookup).
:param work_request: for each lookup result call
``work_request.add_dependency(promise["promise_work_request_id"])``
:param lookup: resolve the lookup and iterate over the results
(for PROMISES only)
"""
if isinstance(lookup, LookupMultiple):
results = work_request.lookup_multiple(
lookup, expect_type=LookupChildType.ARTIFACT_OR_PROMISE
)
else:
results = [
work_request.lookup_single(
lookup, expect_type=LookupChildType.ARTIFACT_OR_PROMISE
)
]
for result in results:
collection_item = result.collection_item
if (
collection_item is not None
and collection_item.child_type == CollectionItem.Types.BARE
and collection_item.category == BareDataCategory.PROMISE
):
work_request_dependency = WorkRequest.objects.get(
id=collection_item.data["promise_work_request_id"]
)
work_request.add_dependency(work_request_dependency)
[docs]
def work_request_ensure_child_worker(
self,
*,
task_name: str,
task_data: BaseTaskData | None = None,
workflow_data: WorkRequestWorkflowData,
relative_priority: int = 0,
) -> WorkRequest:
"""
Create a child WORKER work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:param relative_priority: if creating a new work request, set the
base priority of the child to the effective priority of this
workflow plus this relative priority.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.WORKER,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_worker(
task_name=task_name,
task_data=task_data,
workflow_data=workflow_data,
relative_priority=relative_priority,
)
[docs]
def work_request_ensure_child_server(
self,
*,
task_name: str,
task_data: BaseTaskData | None = None,
workflow_data: WorkRequestWorkflowData,
) -> WorkRequest:
"""
Create a child SERVER work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.SERVER,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_server(
task_name=task_name,
task_data=task_data,
workflow_data=workflow_data,
)
[docs]
def work_request_ensure_child_internal(
self,
*,
task_name: str,
workflow_data: WorkRequestWorkflowData,
relative_priority: int = 0,
) -> WorkRequest:
"""
Create a child INTERNAL work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:param relative_priority: if creating a new work request, set the
base priority of the child to the effective priority of this
workflow plus this relative priority.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.INTERNAL,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_internal(
task_name=task_name,
workflow_data=workflow_data,
relative_priority=relative_priority,
)
[docs]
def work_request_ensure_child_workflow(
self,
*,
task_name: str,
task_data: BaseTaskData | None = None,
workflow_data: WorkRequestWorkflowData,
relative_priority: int = 0,
) -> WorkRequest:
"""
Create a child WORKFLOW work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:param relative_priority: if creating a new work request, set the
base priority of the child to the effective priority of this
workflow plus this relative priority.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.WORKFLOW,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_workflow(
task_name=task_name,
task_data=task_data,
workflow_data=workflow_data,
relative_priority=relative_priority,
)
[docs]
def work_request_ensure_child_signing(
self,
*,
task_name: str,
task_data: BaseTaskData | None = None,
workflow_data: WorkRequestWorkflowData,
) -> WorkRequest:
"""
Create a child SIGNING work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:param relative_priority: if creating a new work request, set the
base priority of the child to the effective priority of this
workflow plus this relative priority.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.SIGNING,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_signing(
task_name=task_name,
task_data=task_data,
workflow_data=workflow_data,
)
[docs]
def work_request_ensure_child_wait(
self,
*,
task_name: str,
task_data: BaseTaskData | None = None,
workflow_data: WorkRequestWorkflowData,
) -> WorkRequest:
"""
Create a child WAIT work request if one does not already exist.
This assumes that ``workflow_data.step`` is unique among work
request children with this task type and name, and that it is stable
even if the workflow is orchestrated multiple times.
:param task_name: the task name for the child work request.
:param task_data: the task data for the child work request.
:param workflow_data: the workflow data for the child work request.
:return: new or existing :py:class:`WorkRequest`.
"""
try:
return self.work_request.children.get(
task_type=TaskTypes.WAIT,
task_name=task_name,
workflow_data_json__step=workflow_data.step,
)
except WorkRequest.DoesNotExist:
return self.work_request.create_child_wait(
task_name=task_name,
task_data=task_data,
workflow_data=workflow_data,
)
[docs]
def lookup_singleton_collection(
self,
category: CollectionCategory,
*,
workspace: Workspace | None = None,
) -> Collection:
"""Look up a singleton collection related to this workflow."""
return self.work_request.lookup_single(
f"_@{category}",
workspace=workspace or self.workspace,
expect_type=LookupChildType.COLLECTION,
).collection
@override
def _execute(self) -> WorkRequest.Results:
"""Unused abstract method from DBTask: populate() is used instead."""
raise NotImplementedError()
[docs]
def describe_exc(exception: Exception) -> str:
"""User-facing description of exception."""
message = str(exception)
if not message:
return repr(exception)
return message
[docs]
def orchestrate_workflow(work_request: WorkRequest) -> bool:
"""
Orchestrate a workflow in whatever way is appropriate.
For a workflow callback, run ``callback`` and mark the work request as
completed. For a workflow, run ``populate`` and unblock workflow
children, but leave the workflow running until all its children have
finished. For any other work request, log an error.
Return False if any errors were logged, otherwise True.
"""
def run_or_raise_workflow_run_error[R: Any](
func: Callable[[], R], message_template: str, code: str
) -> R:
try:
return func()
except Exception as exc:
message = describe_exc(exc)
raise WorkflowRunError(
work_request, message_template.format(message=message), code
) from exc
try:
run_or_raise_workflow_run_error(
work_request.mark_running,
"Failed to mark work request as running",
"status-change-failed",
)
match (work_request.task_type, work_request.task_name):
case (TaskTypes.INTERNAL, "workflow") | (TaskTypes.WORKFLOW, _):
try:
orchestrator = work_request.get_task()
except InternalTaskError as exc:
raise WorkflowRunError(
work_request, str(exc), "configure-failed"
) from exc
except TaskConfigError as exc:
raise WorkflowRunError(
work_request,
f"Failed to configure: {exc}",
"configure-failed",
) from exc
else:
assert isinstance(orchestrator, Workflow)
case _:
raise WorkflowRunError(
work_request,
"Does not have a workflow orchestrator",
"configure-failed",
)
match (work_request.task_type, work_request.task_name):
case (TaskTypes.INTERNAL, "workflow"):
parent = orchestrator.work_request
# Workflow callbacks compute dynamic data for their parent
# workflow. If this fails, it's probably least confusing to
# log information about both the workflow callback and the
# workflow.
run_or_raise_workflow_run_error(
partial(
orchestrator.ensure_dynamic_data, TaskDatabase(parent)
),
f"Failed to compute dynamic data for "
f"{parent.task_type}/{parent.task_name} ({parent.id}): "
f"{{message}}",
"dynamic-data-failed",
)
succeeded = run_or_raise_workflow_run_error(
partial(orchestrator.callback, work_request),
"Orchestrator failed: {message}",
"orchestrator-failed",
)
work_request.mark_completed(
WorkRequest.Results.SUCCESS
if succeeded
else WorkRequest.Results.FAILURE
)
case (TaskTypes.WORKFLOW, _):
with work_request.running_workflow_orchestrator():
run_or_raise_workflow_run_error(
partial(
orchestrator.ensure_dynamic_data,
TaskDatabase(orchestrator.work_request),
),
"Failed to compute dynamic data: {message}",
"dynamic-data-failed",
)
run_or_raise_workflow_run_error(
orchestrator.populate,
"Orchestrator failed: {message}",
"orchestrator-failed",
)
if work_request.children.exists():
orchestrator.work_request.unblock_workflow_children()
work_request.maybe_finish_workflow()
# The workflow is left running until all its children have
# finished.
case _: # pragma: no cover
# Already checked above.
raise AssertionError(
f"Unexpected work request: "
f"{work_request.task_type}/{work_request.task_name}"
)
return True
except WorkflowRunError as exc:
logger.warning(
"Error running work request %s/%s (%s): %s",
exc.work_request.task_type,
exc.work_request.task_name,
exc.work_request.id,
exc.message,
exc_info=True,
)
try:
work_request.mark_completed(
WorkRequest.Results.ERROR,
output_data=OutputData(
errors=[OutputDataError(message=exc.message, code=exc.code)]
),
)
except StatusChangeError:
pass
return False