Skip to content

Standard Nodes and Effects

Generic Utilities

spawn_node

spawn_node(node_type: type[N], **args: Any) -> NodeBuilder[Any, Any]

A convenience helper to write effect triggering functions.

Attributes:

Name Type Description
node_type

The type of the node to spawn (e.g., Branch).

args

Arguments to populate the node fields, passed to Node.spawn.

Source code in src/delphyne/stdlib/nodes.py
20
21
22
23
24
25
26
27
28
29
30
31
def spawn_node[N: dp.Node](
    node_type: type[N], **args: Any
) -> dp.NodeBuilder[Any, Any]:
    """
    A convenience helper to write effect triggering functions.

    Attributes:
        node_type: The type of the node to spawn (e.g., `Branch`).
        args: Arguments to populate the node fields, passed to
            [`Node.spawn`][delphyne.core.trees.Node.spawn].
    """
    return dp.NodeBuilder(lambda spawner: node_type.spawn(spawner, **args))

FromPolicy

FromPolicy = Callable[[Any], T]

Type for an inner-policy-dependent data field.

We use Any instead of introducing an inner policy type parameter P, since Node is not parametric either. Thus, this alias is mostly meant for readability and expressing intent.

NodeMeta

Abstract base class for node metadata.

Nodes can feature fields with arbitrary metadata accessible to policies (e.g., meta field of Branch). Typing those fields with NodeMeta instead of object or Any allows for better type safety. In particular, it prevents errors that arise from accidentally passing uninstantiated parametric inner policy fields.

Source code in src/delphyne/stdlib/nodes.py
44
45
46
47
48
49
50
51
52
53
54
55
class NodeMeta:
    """
    Abstract base class for node metadata.

    Nodes can feature fields with arbitrary metadata accessible to
    policies (e.g., `meta` field of `Branch`). Typing those fields with
    `NodeMeta` instead of `object` or `Any` allows for better type
    safety. In particular, it prevents errors that arise from
    accidentally passing uninstantiated parametric inner policy fields.
    """

    pass

TypedSpaceElementRef dataclass

Bases: Generic[T_inv]

A wrapper around a global space element reference that carries phantom type information.

See branch for an example of usage.

Source code in src/delphyne/stdlib/nodes.py
66
67
68
69
70
71
72
73
74
75
76
@dataclass(frozen=True)
class TypedSpaceElementRef(Generic[T_inv]):
    """
    A wrapper around a global space element reference that carries
    phantom type information.

    See `branch` for an example of usage.
    """

    node: dp.refs.GlobalNodeRef
    element: dp.refs.SpaceElementRef

Skippable dataclass

Bases: Node

Base class for skippable nodes that contain meta-information and can be safely skipped by policies.

Many standard policies such as dfs handle such nodes by default. An example child class is Hindsight. We choose not to make [Value][delphyne.Value] skippable since eliminating value information must be explicit. The [Message][delphyne.Message] node is also not skippable, since doing so would risk [elim_messages`][delphyne.elim_messages] not being called and the messages being lost.

Source code in src/delphyne/stdlib/nodes.py
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
@dataclass(frozen=True)
class Skippable(dp.Node):
    """
    Base class for skippable nodes that contain meta-information and can
    be safely skipped by policies.

    Many standard policies such as `dfs` handle such nodes by default.
    An example child class is `Hindsight. We choose not to make `Value`
    skippable since eliminating value information must be explicit. The
    `Message` node is also not skippable, since doing so would risk
    `elim_messages` not being called and the messages being lost.
    """

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def valid_action(self, action: object) -> bool:
        return action is None

Branch

Branch dataclass

Bases: Node

The standard Branch effect.

Can be triggered using the branch function, which allows branching over elements of an opaque space.

Source code in src/delphyne/stdlib/nodes.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@dataclass(frozen=True)
class Branch(dp.Node):
    """
    The standard `Branch` effect.

    Can be triggered using the `branch` function, which allows branching
    over elements of an opaque space.
    """

    cands: OpaqueSpace[Any, Any]
    meta: FromPolicy[NodeMeta | None]

    @override
    def navigate(self) -> dp.Navigation:
        return (yield self.cands)

    @override
    def primary_space(self) -> dp.Space[Any]:
        return self.cands

branch

branch(
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    inner_policy_type: type[P] | None = None,
) -> Strategy[Branch, P, T]
branch(
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    return_ref: Literal[True],
    inner_policy_type: type[P] | None = None,
) -> Strategy[Branch, P, tuple[T, TypedSpaceElementRef[T]]]
branch(
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    return_ref: bool = False,
    inner_policy_type: type[P] | None = None,
) -> Strategy[Branch, P, T | tuple[T, TypedSpaceElementRef[T]]]

Branch over the elements of an opaque space.

Parameters:

Name Type Description Default
cands Opaque[P, T]

An opaque space, which can be defined from either a query or a strategy via the using method.

required
meta Callable[[P], NodeMeta | None]

An optional mapping from the ambient inner policy to arbitrary metadata accessible to search policies.

lambda _: None
inner_policy_type type[P] | None

Ambient inner policy type. This information is not used at runtime but it can be provided to help type inference when necessary.

None
return_ref bool

Whether to return a typed reference to the space of candidates along with the chosen element.

False
Source code in src/delphyne/stdlib/nodes.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def branch[P, T](
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    return_ref: bool = False,
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Branch, P, T | tuple[T, TypedSpaceElementRef[T]]]:
    """
    Branch over the elements of an opaque space.

    Arguments:
        cands: An opaque space, which can be defined from either a query
            or a strategy via the `using` method.
        meta: An optional mapping from the ambient inner policy to
            arbitrary metadata accessible to search policies.
        inner_policy_type: Ambient inner policy type. This information
            is not used at runtime but it can be provided to help type
            inference when necessary.
        return_ref: Whether to return a typed reference to the
            space of candidates along with the chosen element.
    """
    recv = yield spawn_node(Branch, cands=cands, meta=meta)
    ret = cast(T, recv.action)
    if not return_ref:
        return ret
    if not isinstance(recv.value_ref, dp.refs.SpaceElementRef):
        # The error below should virtually never happen in practice when
        # using policies that make no assumptions about the specific
        # type of candidates (by a parametricity argument).
        raise ValueError(
            "When calling `branch` with `return_ref=True`, only space "
            "elements can be sent back by the policy (and not nontrivial "
            "assemblies of space elements)."
        )
    ref = TypedSpaceElementRef[T](node=recv.node_ref, element=recv.value_ref)
    return (ret, ref)

Run dataclass

Bases: Branch

A degenerate branching node for which only one candidate is to be explored, allowing to extract an element from an opaque space without actual branching.

Spawning such a node instead of a Branch node provides a semantic hint to search policies that no actual branching should be performed (policies can choose to disregard this hint).

Source code in src/delphyne/stdlib/nodes.py
195
196
197
198
199
200
201
202
203
204
205
@dataclass(frozen=True)
class Run(Branch):
    """
    A degenerate branching node for which only one candidate is to be
    explored, allowing to extract an element from an opaque space without
    actual branching.

    Spawning such a node instead of a `Branch` node provides a
    semantic hint to search policies that no actual branching should be
    performed (policies can choose to disregard this hint).
    """

run

run(
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    inner_policy_type: type[P] | None = None,
) -> Strategy[Run, P, T]

Obtain a single element from an opaque space without branching.

See Run for more details.

Parameters:

Name Type Description Default
cands Opaque[P, T]

An opaque space, which can be defined from either a query or a strategy via the using method.

required
meta Callable[[P], NodeMeta | None]

An optional mapping from the ambient inner policy to arbitrary metadata accessible to search policies.

lambda _: None
inner_policy_type type[P] | None

Ambient inner policy type. This information is not used at runtime but it can be provided to help type inference when necessary.

None
Source code in src/delphyne/stdlib/nodes.py
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
def run[P, T](
    cands: Opaque[P, T],
    *,
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Run, P, T]:
    """
    Obtain a single element from an opaque space without branching.

    See `Run` for more details.

    Arguments:
        cands: An opaque space, which can be defined from either a query
            or a strategy via the `using` method.
        meta: An optional mapping from the ambient inner policy to
            arbitrary metadata accessible to search policies.
        inner_policy_type: Ambient inner policy type. This information
            is not used at runtime but it can be provided to help type
            inference when necessary.
    """
    recv = yield spawn_node(Run, cands=cands, meta=meta)
    return cast(T, recv.action)

Fail

Fail dataclass

Bases: Node

The standard Fail effect.

Can be triggered using the fail and ensure functions.

Source code in src/delphyne/stdlib/nodes.py
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
@dataclass(frozen=True)
class Fail(dp.Node):
    """
    The standard `Fail` effect.

    Can be triggered using the `fail` and `ensure` functions.
    """

    error: dp.Error

    @override
    def leaf_node(self) -> bool:
        return True

    @override
    def navigate(self) -> dp.Navigation:
        assert False
        yield

    @override
    def summary_message(self) -> str:
        return str(self.error)

fail

fail(
    label: str | None = None, *, message: str | None = None, error: Error | None = None
) -> Strategy[Fail, object, NoReturn]

Fail immediately with an error.

The error can be specified using the error keyword argument. As a shortcut, the label and message arguments can also be used to directly specify the corresponding fields of the Error type. Those arguments can only be used if error is not provided.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from fail(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
def fail(
    label: str | None = None,
    *,
    message: str | None = None,
    error: dp.Error | None = None,
) -> dp.Strategy[Fail, object, typing.NoReturn]:
    """
    Fail immediately with an error.

    The error can be specified using the `error` keyword argument. As a
    shortcut, the `label` and `message` arguments can also be used to
    directly specify the corresponding fields of the `Error` type. Those
    arguments can only be used if `error` is not provided.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from fail(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Fail, error=_build_error(message, label, error))
    assert False

ensure

ensure(
    prop: bool,
    label: str | None = None,
    *,
    message: str | None = None,
    error: Error | None = None,
) -> Strategy[Fail, object, None]

Ensure that a property holds, otherwise fail with an error.

See fail regarding the label, message and error arguments.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from ensure(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
def ensure(
    prop: bool,
    label: str | None = None,
    *,
    message: str | None = None,
    error: dp.Error | None = None,
) -> dp.Strategy[Fail, object, None]:
    """
    Ensure that a property holds, otherwise fail with an error.

    See `fail` regarding the `label`, `message` and `error` arguments.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from ensure(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    if not prop:
        yield from fail(label=label, message=message, error=error)

Feedback

Feedback dataclass

Bases: Skippable

The standard Feedback effect.

Source code in src/delphyne/stdlib/hindsight_feedback.py
67
68
69
70
71
@dataclass(frozen=True)
class Feedback(nd.Skippable):
    """
    The standard `Feedback` effect.
    """

ThrowFeedback dataclass

Bases: Feedback

Feedback source.

Source code in src/delphyne/stdlib/hindsight_feedback.py
74
75
76
77
78
79
80
81
@dataclass(frozen=True)
class ThrowFeedback(Feedback):
    """
    Feedback source.
    """

    label: str
    messages: Iterable[AttachedFeedback[Any]]

BackpropagateFeedback dataclass

Bases: Feedback

Handler for backpropagating feedback.

Source code in src/delphyne/stdlib/hindsight_feedback.py
84
85
86
87
88
89
90
91
@dataclass(frozen=True)
class BackpropagateFeedback(Feedback):
    """
    Handler for backpropagating feedback.
    """

    label: str
    back: Callable[[ValueFeedback[Any]], Iterable[AttachedFeedback[Any]]]

AttachedFeedback dataclass

Source code in src/delphyne/stdlib/hindsight_feedback.py
50
51
52
53
@dataclass(frozen=True)
class AttachedFeedback[T]:
    msg: ValueFeedback[T]
    dst: nd.TypedSpaceElementRef[T]

ValueFeedback dataclass

Bases: Generic[T_co]

Source code in src/delphyne/stdlib/hindsight_feedback.py
21
22
23
@dataclass(frozen=True)
class ValueFeedback(Generic[T_co]):
    pass

GoodValue dataclass

Bases: ValueFeedback[Never]

Source code in src/delphyne/stdlib/hindsight_feedback.py
26
27
28
@dataclass(frozen=True)
class GoodValue(ValueFeedback[Never]):
    pass

BadValue dataclass

Bases: ValueFeedback[Never]

Source code in src/delphyne/stdlib/hindsight_feedback.py
31
32
33
@dataclass(frozen=True)
class BadValue(ValueFeedback[Never]):
    error: dp.Error

BetterValue dataclass

Bases: ValueFeedback[T]

Source code in src/delphyne/stdlib/hindsight_feedback.py
36
37
38
@dataclass(frozen=True)
class BetterValue[T](ValueFeedback[T]):
    value: T

send

Source code in src/delphyne/stdlib/hindsight_feedback.py
56
57
58
59
def send[T](
    msg: ValueFeedback[T], to: nd.TypedSpaceElementRef[T], /
) -> AttachedFeedback[T]:
    return AttachedFeedback(msg, to)

feedback

feedback(
    label: str, messages: Iterable[AttachedFeedback[Any]]
) -> Strategy[Feedback, object, None]
Source code in src/delphyne/stdlib/hindsight_feedback.py
 99
100
101
102
103
def feedback(
    label: str, messages: Iterable[AttachedFeedback[Any]]
) -> dp.Strategy[Feedback, object, None]:
    yield nd.spawn_node(ThrowFeedback, label=label, messages=messages)
    return None

backward

backward(
    label: str,
    res: T,
    back: Callable[[ValueFeedback[T]], Iterable[AttachedFeedback[Any]]],
) -> Strategy[Feedback, object, None]
Source code in src/delphyne/stdlib/hindsight_feedback.py
106
107
108
109
110
111
112
def backward[T](
    label: str,
    res: T,
    back: Callable[[ValueFeedback[T]], Iterable[AttachedFeedback[Any]]],
) -> dp.Strategy[Feedback, object, None]:
    yield nd.spawn_node(BackpropagateFeedback, label=label, back=back)
    return None

elim_feedback

elim_feedback(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Feedback, Never]

Eliminate the Feedback effect, by removing all feedback nodes.

Source code in src/delphyne/stdlib/hindsight_feedback.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
@pol.contextual_tree_transformer
def elim_feedback(
    env: pol.PolicyEnv,
    policy: Any,
) -> pol.PureTreeTransformerFn[Feedback, Never]:
    """
    Eliminate the `Feedback` effect, by removing all feedback nodes.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Feedback | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Feedback):
            return transform(tree.child(None))
        return tree.transform(tree.node, transform)

    return transform

Message

Message dataclass

Bases: Node

The standard Message effect.

Message nodes are tree nodes carrying a message. They have a unique child. They can be eliminated using the elim_messages tree transformer, which automatically logs their content.

This effect is useful for debugging strategies. Using print statements in strategies is discouraged since strategy computations are replayed every time a child of the associated tree is computed, causing the same message to be repeatedly printed.

Source code in src/delphyne/stdlib/nodes.py
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
@dataclass
class Message(dp.Node):
    """
    The standard `Message` effect.

    Message nodes are tree nodes carrying a message. They have a unique
    child. They can be eliminated using the `elim_messages` tree
    transformer, which automatically logs their content.

    This effect is useful for debugging strategies. Using `print`
    statements in strategies is discouraged since strategy computations
    are replayed every time a child of the associated tree is computed,
    causing the same message to be repeatedly printed.
    """

    msg: str
    data: object | None
    level: dp.LogLevel | None

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def summary_message(self) -> str:
        return self.msg

message

message(
    msg: str, data: object | None = None, level: LogLevel | None = None
) -> Strategy[Message, object, None]

Log a debugging message. See Message for more details.

Parameters:

Name Type Description Default
msg str

The message to log.

required
data object | None

Optional data to attach to the message.

None
level LogLevel | None

Optional severity level of the message.

None

Warning

Like all effect triggering functions, this function must be invoked as:

yield from message(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
def message(
    msg: str, data: object | None = None, level: dp.LogLevel | None = None
) -> dp.Strategy[Message, object, None]:
    """
    Log a debugging message. See `Message` for more details.

    Arguments:
        msg: The message to log.
        data: Optional data to attach to the message.
        level: Optional severity level of the message.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from message(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Message, msg=msg, data=data, level=level)

elim_messages

elim_messages(
    env: PolicyEnv,
    policy: Any,
    show_in_log: bool = True,
    default_log_level: LogLevel = "info",
) -> PureTreeTransformerFn[Message, Never]

Eliminate the Message effect.

Parameters:

Name Type Description Default
show_in_log bool

Whether to log the associated content whenever a message node is encountered.

True
default_log_level LogLevel

The default severity level to use when no severity level is specified in the message node.

'info'
Source code in src/delphyne/stdlib/nodes.py
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
@pol.contextual_tree_transformer
def elim_messages(
    env: PolicyEnv,
    policy: Any,
    show_in_log: bool = True,
    default_log_level: dp.LogLevel = "info",
) -> pol.PureTreeTransformerFn[Message, Never]:
    """
    Eliminate the `Message` effect.

    Arguments:
        show_in_log: Whether to log the associated content whenever a
            message node is encountered.
        default_log_level: The default severity level to use when no
            severity level is specified in the message node.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Message | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Message):
            if show_in_log:
                metadata = {"attached": tree.node.data}
                level = tree.node.level or default_log_level
                env.log(level, tree.node.msg, metadata=metadata, loc=tree)
            return transform(tree.child(None))
        return tree.transform(tree.node, transform)

    return transform

Factor and Value

Factor dataclass

Bases: Node

The standard Factor effect.

A Factor node allows computing a confidence score in the [0, 1] interval. This confidence can be computed by a query or a dedicated strategy but only one element will be generated from the resulting space. Instead of having an oracle compute a numerical value directly, it computes an evaluation object that is then transformed into a number using a policy-provided function. This allows greater flexibility on the policy side. If no such function is given, the whole node is ignored.

Source code in src/delphyne/stdlib/nodes.py
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
@dataclass(frozen=True)
class Factor(dp.Node):
    """
    The standard `Factor` effect.

    A `Factor` node allows computing a confidence score in the [0, 1]
    interval. This confidence can be computed by a query or a dedicated
    strategy but only one element will be generated from the resulting
    space. Instead of having an oracle compute a numerical value
    directly, it computes an evaluation object that is then transformed
    into a number using a policy-provided function. This allows greater
    flexibility on the policy side. If no such function is given, the
    whole node is ignored.
    """

    eval: OpaqueSpace[Any, Any]
    factor: FromPolicy[Callable[[Any], float] | None]

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def primary_space(self):
        return self.eval

factor

factor(
    eval: Opaque[P, E],
    factor: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> Strategy[Factor, P, None]

Apply a multiplicative penalty to the current search branch.

Parameters:

Name Type Description Default
eval Opaque[P, E]

An opaque space, typically induced by a strategy or a query whose purpose is to evaluate the current search state, returning an evaluation object of type E. Policies typically extract a single element from this space.

required
factor Callable[[P], Callable[[E], float] | None]

An inner-policy-dependent function that maps an evaluation object of type E to an actual numerical penalty, as a multiplicative factor in the [0, 1] interval. If no function is provided, the whole node is ignored. Separating eval and factor allows greater flexibility for policies to tune how multidimensional state evaluations are reduced into a single numbers.

required

Warning

Like all effect triggering functions, this function must be invoked as:

yield from factor(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
def factor[E, P](
    eval: Opaque[P, E],
    factor: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Factor, P, None]:
    """
    Apply a multiplicative penalty to the current search branch.

    Arguments:
        eval: An opaque space, typically induced by a strategy or a
            query whose purpose is to evaluate the current search state,
            returning an evaluation object of type `E`. Policies
            typically extract a single element from this space.
        factor: An inner-policy-dependent function that maps an
            evaluation object of type `E` to an actual numerical
            penalty, as a multiplicative factor in the [0, 1] interval.
            If no function is provided, the whole node is ignored.
            Separating `eval` and `factor` allows greater flexibility
            for policies to tune how multidimensional state evaluations
            are reduced into a single numbers.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from factor(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Factor, eval=eval, factor=factor)

Value dataclass

Bases: Node

The standard Value effect.

Similar to Factor, except that the resulting number is used to set the whole value of the branch instead of just multiplying it.

Source code in src/delphyne/stdlib/nodes.py
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
@dataclass(frozen=True)
class Value(dp.Node):
    """
    The standard `Value` effect.

    Similar to `Factor`, except that the resulting number is used to set
    the whole value of the branch instead of just multiplying it.
    """

    eval: OpaqueSpace[Any, Any]
    value: FromPolicy[Callable[[Any], float] | None]

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def primary_space(self):
        return self.eval

value

value(
    eval: Opaque[P, E],
    value: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> Strategy[Value, P, None]

Set the value of the current search branch.

See the similar factor function for more details.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from message(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
def value[E, P](
    eval: Opaque[P, E],
    value: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Value, P, None]:
    """
    Set the value of the current search branch.

    See the similar `factor` function for more details.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from message(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Value, eval=eval, value=value)

elim_values

elim_values(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Value, Never]

Eliminate the Value effect.

Source code in src/delphyne/stdlib/nodes.py
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
@pol.contextual_tree_transformer
def elim_values(
    env: PolicyEnv,
    policy: Any,
) -> pol.PureTreeTransformerFn[Value, Never]:
    """
    Eliminate the `Value` effect.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Value | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Value):
            return transform(tree.child(None))
        return tree.transform(tree.node, transform)

    return transform

binarize_values

binarize_values(
    env: PolicyEnv, policy: Any, *, threshold: float = 0.5
) -> PureTreeTransformerFn[Value, Branch | Fail]

Turn value nodes into assertions based on a threshold.

Attributes:

Name Type Description
threshold

The threshold above which a value is considered acceptable.

!!! warning: Value nodes are transformed into Branch nodes and so it is important that the associated opaque spaces for computing strategies only generate one candidate so that no actual branching can happen on value estimation.

Source code in src/delphyne/stdlib/nodes.py
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
@pol.contextual_tree_transformer
def binarize_values(
    env: PolicyEnv,
    policy: Any,
    *,
    threshold: float = 0.5,
) -> pol.PureTreeTransformerFn[Value, Branch | Fail]:
    """
    Turn value nodes into assertions based on a threshold.

    Attributes:
        threshold: The threshold above which a value is considered
            acceptable.

    !!! warning:
        `Value` nodes are transformed into `Branch` nodes and so it is
        important that the associated opaque spaces for computing
        strategies only generate one candidate so that no actual
        branching can happen on value estimation.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Value | N, P, T],
    ) -> dp.Tree[Branch | Fail | N, P, T]:
        if isinstance(tree.node, Value):
            node = tree.node

            def branch_child(
                eval: dp.Value,
            ) -> dp.Tree[Branch | Fail | N, P, T]:
                value_comp = node.value(policy)
                if (
                    value_comp is None
                    or value_comp(dp.drop_refs(eval)) >= threshold
                ):
                    return transform(tree.child(None))
                else:
                    return dp.Tree[Branch | Fail | N, P, T](
                        Fail(dp.Error()),
                        # What we pass for `child` does not matter since
                        # it will never be accessed.
                        child=(lambda v: transform(tree.child(None))),
                        ref=tree.ref,
                    )

            return dp.Tree[Branch | Fail | N, P, T](
                Branch(node.eval, meta=lambda _: None), branch_child, tree.ref
            )
        return tree.transform(tree.node, transform)

    return transform

Join

Join dataclass

Bases: Node

The standard Join effect.

This effect can be triggered using the join function. A Join node features a sequence of embedded trees.

Source code in src/delphyne/stdlib/nodes.py
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
@dataclass(frozen=True)
class Join(dp.Node):
    """
    The standard `Join` effect.

    This effect can be triggered using the `join` function. A `Join`
    node features a sequence of embedded trees.
    """

    subs: Sequence[dp.EmbeddedTree[Any, Any, Any]]
    meta: FromPolicy[NodeMeta | None]

    @override
    def navigate(self) -> dp.Navigation:
        ret: list[Any] = []
        for sub in self.subs:
            ret.append((yield sub))
        return tuple(ret)

join

join(
    subs: Sequence[StrategyComp[N, P, T]],
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
) -> Strategy[N, P, Sequence[T]]

Evaluate a sequence of independent strategy computations, possibly in parallel.

Parameters:

Name Type Description Default
subs Sequence[StrategyComp[N, P, T]]

A sequence of strategy computations to evaluate.

required
meta Callable[[P], NodeMeta | None]

An optional mapping from the ambient inner policy to arbitrary metadata accessible to search policies.

lambda _: None

Returns:

Type Description
Strategy[N, P, Sequence[T]]

A sequence featuring all computation results.

Source code in src/delphyne/stdlib/nodes.py
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
def join[N: dp.Node, P, T](
    subs: Sequence[dp.StrategyComp[N, P, T]],
    meta: Callable[[P], NodeMeta | None] = lambda _: None,
) -> dp.Strategy[N, P, Sequence[T]]:
    """
    Evaluate a sequence of independent strategy computations, possibly
    in parallel.

    Arguments:
        subs: A sequence of strategy computations to evaluate.
        meta: An optional mapping from the ambient inner policy to
            arbitrary metadata accessible to search policies.

    Returns:
        A sequence featuring all computation results.
    """
    recv = yield spawn_node(Join, subs=subs, meta=meta)
    return cast(Sequence[T], recv.action)

elim_join

elim_join(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Join, Never]
Source code in src/delphyne/stdlib/nodes.py
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
@pol.contextual_tree_transformer
def elim_join(
    env: PolicyEnv,
    policy: Any,
) -> pol.PureTreeTransformerFn[Join, Never]:
    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Join | N, P, T],
    ) -> dp.Tree[N, P, T]:
        node = tree.node
        if isinstance(node, Join):

            def aux(computed: tuple[Any, ...]) -> dp.Tree[N, P, T]:
                if len(computed) == len(node.subs):
                    # Once all joined values are computed, continue
                    return transform(tree.child(computed))
                # Otherwise, compute the next joined value and then all
                # remaining ones.
                compute_next = transform(node.subs[len(computed)].spawn_tree())
                return bind_tree(compute_next, lambda v: aux(computed + (v,)))

            return aux(())
        return tree.transform(node, transform)

    return transform

Compute

Compute dataclass

Bases: Node

The standard Compute effect.

For efficiency and replicability reasons, strategies must not directly perform expensive and possibly non-replicable computations. For example, a strategy should not directly call an SMT solver since:

  • The call may be expensive, and stratgy computations are replayed from scratch every time a child is computed in the corresponding tree (see documentation for reify).
  • SMT solvers using wall-time timeouts may return different results when called repeatedly on the same input.

The Compute effect allows performing an expensive and possibly non-deterministic computation by issuing a special __Computation__ query that specifies the computation to be performed. Such a query is not answered by an LLM, but by actually performing the described computation. Special support is available in the demonstration interpreter in the form of implicit answers, allowing __Computation__ queries to be automatically answered when running tests. Generated answers can be hardcoded in demonstrations after the fact via proper editor support (i.e. using the Add Implicit Answers code action from Delphyne's VSCode extension).

Source code in src/delphyne/stdlib/computations.py
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
@dataclass
class Compute(dp.Node):
    """
    The standard `Compute` effect.

    For efficiency and replicability reasons, strategies must not directly
    perform expensive and possibly non-replicable computations. For example,
    a strategy should not directly call an SMT solver since:

    - The call may be expensive, and stratgy computations are replayed from
    scratch every time a child is computed in the corresponding tree (see
    documentation for `reify`).
    - SMT solvers using wall-time timeouts may return different results when
    called repeatedly on the same input.

    The `Compute` effect allows performing an expensive and possibly
    non-deterministic computation by issuing a special `__Computation__`
    query that specifies the computation to be performed. Such a query is
    not answered by an LLM, but by _actually_ performing the described
    computation. Special support is available in the demonstration
    interpreter in the form of *implicit answers*, allowing
    `__Computation__` queries to be automatically answered when running
    tests. Generated answers can be hardcoded in demonstrations **after**
    the fact via proper editor support (i.e. using the `Add Implicit
    Answers` code action from Delphyne's VSCode extension).
    """

    query: dp.TransparentQuery[Any]
    override_args: dp.FromPolicy[dict[str, Any] | None] | None

    # Takes as an argument a dict of overriden args
    _comp: Callable[[dict[str, Any] | None], Any]
    _ret_type: ty.TypeAnnot[Any]

    def navigate(self) -> dp.Navigation:
        return (yield self.query)

    def run_computation(
        self, *, overriden_args: dict[str, Any] | None = None
    ) -> str:
        ret = self._comp(overriden_args)
        serialized = dump_yaml(self._ret_type, ret)
        return serialized

    def run_computation_with_cache(
        self,
        *,
        cache: dp.LLMCache | None,
        overriden_args: dict[str, Any] | None = None,
    ) -> str:
        """
        Disguise the computation as an LLM request so as to reuse the
        caching infrastructure.
        """

        def _compute_answer() -> dp.LLMResponse:
            res = self.run_computation(overriden_args=overriden_args)
            return dp.LLMResponse(outputs=[dp.LLMOutput(content=res)])

        query = self.query.attached.query
        assert isinstance(query, __Computation__)
        serialized = dump_yaml(Any, query.__dict__)
        chat = (dp.UserMessage(serialized),)
        req = dp.LLMRequest(
            chat=chat, num_completions=1, options={"model": "__compute__"}
        )
        resp = md.fetch_or_answer(cache, req, lambda _: _compute_answer())
        assert len(resp.outputs) == 1
        answer = resp.outputs[0].content
        assert isinstance(answer, str)
        return answer

compute

compute(
    f: Callable[A, T],
    *,
    override_args: Callable[[P], dict[str, Any] | None] | None = None,
    inner_policy_type: type[P] | None = None,
) -> Callable[A, Strategy[Compute, P, T]]

Triggering function for the Compute effect.

Parameters:

Name Type Description Default
f Callable[A, T]

Function performing an expensive computation. It must feature type annotations and its arguments must be JSON-serializable. It does not need to be deterministic.

required
override_args Callable[[P], dict[str, Any] | None] | None

Mapping from the ambient inner policy to a dictionary overriding some of the function arguments. These overrides are only visible on policy side and do not affect the underlying __Compute__ query. This is particularly useful to override timeout parameters in policies.

None
inner_policy_type type[P] | None

Ambient inner policy type. This information is not used at runtime but it can be provided to help type inference when necessary.

None
Source code in src/delphyne/stdlib/computations.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def compute[**A, T, P](
    f: Callable[A, T],
    *,
    override_args: Callable[[P], dict[str, Any] | None] | None = None,
    inner_policy_type: type[P] | None = None,
) -> Callable[A, dp.Strategy[Compute, P, T]]:
    """
    Triggering function for the `Compute` effect.

    Arguments:
        f: Function performing an expensive computation. It must feature
            type annotations and its arguments must be
            JSON-serializable. It does not need to be deterministic.
        override_args: Mapping from the ambient inner policy to a
            dictionary overriding some of the function arguments. These
            overrides are only visible on policy side and do not affect
            the underlying `__Compute__` query. This is particularly
            useful to override timeout parameters in policies.
        inner_policy_type: Ambient inner policy type. This information
            is not used at runtime but it can be provided to help type
            inference when necessary.
    """

    def wrapped(
        *args: A.args, **kwargs: A.kwargs
    ) -> dp.Strategy[Compute, object, T]:
        def comp(overriden_args: dict[str, Any] | None) -> Any:
            return f(*args, **(kwargs | (overriden_args or {})))  # type: ignore

        ret_type = insp.function_return_type(f)
        assert not isinstance(ret_type, ty.NoTypeInfo)
        fun_args = insp.function_args_dict(f, args, kwargs)
        fun = insp.function_name(f)
        assert fun is not None
        query = dp.TransparentQuery.build(__Computation__(fun, fun_args))
        recv = yield spawn_node(
            Compute,
            query=query,
            override_args=override_args,
            _comp=comp,
            _ret_type=ret_type,
        )
        ret = ty.pydantic_load(ret_type, recv.action)
        return cast(T, ret)

    return wrapped

elim_compute

elim_compute(
    env: PolicyEnv,
    policy: Any,
    *,
    force_bypass_cache: bool = False,
    log_computations: LogLevel | None = None,
    log_long_computations: tuple[LogLevel, float] | None = None,
    override_args: dict[str, Any] | None = None,
) -> PureTreeTransformerFn[Compute, Never]

Eliminate the Compute effect by performing the computation when computing tree children (making the child function possibly nondeterministic).

Parameters:

Name Type Description Default
force_bypass_cache bool

If set to True, do not cache computation results, even if a cache is available in the global policy environment.

False
log_computations LogLevel | None

If set, log every performed computation at the given severity level.

None
log_long_computations tuple[LogLevel, float] | None

If set, log every computation taking more than the given number of seconds at the given severity level. When set to None, this setting can be overriden by PolicyEnv.log_long_computations.

None
override_args dict[str, Any] | None

Overriden argument values for all computations. This is particularly useful for setting global timeouts. Argument values specified this way have lower precedence than those specified with the override_args argument of compute.

None
Source code in src/delphyne/stdlib/computations.py
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
@dp.contextual_tree_transformer
def elim_compute(
    env: PolicyEnv,
    policy: Any,
    *,
    force_bypass_cache: bool = False,
    log_computations: dp.LogLevel | None = None,
    log_long_computations: tuple[dp.LogLevel, float] | None = None,
    override_args: dict[str, Any] | None = None,
) -> dp.PureTreeTransformerFn[Compute, Never]:
    """
    Eliminate the `Compute` effect by performing the computation when
    computing tree children (making the `child` function possibly
    nondeterministic).

    Arguments:
        force_bypass_cache: If set to `True`, do not cache computation
            results, even if a cache is available in the global policy
            environment.
        log_computations: If set, log every performed computation at the
            given severity level.
        log_long_computations: If set, log every computation taking more
            than the given number of seconds at the given severity
            level. When set to `None`, this setting can be overriden by
            `PolicyEnv.log_long_computations`.
        override_args: Overriden argument values for all computations.
            This is particularly useful for setting global timeouts.
            Argument values specified this way have lower precedence
            than those specified with the `override_args` argument of
            `compute`.
    """

    if log_long_computations is None:
        log_long_computations = env.log_long_computations

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Compute | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Compute):
            cache = None
            if not force_bypass_cache:
                cache = env.cache
            query = tree.node.query.attached.query
            assert isinstance(query, __Computation__), str(type(query))
            lid = None
            if log_computations:
                lid = env.log(
                    log_computations,
                    "computation_started",
                    {"computation": query},
                    loc=tree,
                )
            overriden: dict[str, Any] = override_args or {}
            if tree.node.override_args is not None:
                overriden_local = tree.node.override_args(policy)
                if overriden_local is not None:
                    overriden = overriden | overriden_local
            start = time.time()
            answer = tree.node.run_computation_with_cache(
                cache=cache, overriden_args=overriden
            )
            _elapsed = time.time() - start
            if log_computations:
                assert lid is not None
                env.log(
                    log_computations,
                    "computation_finished",
                    {"elapsed": _elapsed, "result": answer},
                    loc=tree,
                    related=[lid],
                )
            if log_long_computations and _elapsed > log_long_computations[1]:
                env.log(
                    log_long_computations[0],
                    "long_computation",
                    {
                        "computation": query,
                        "elapsed": _elapsed,
                        "result": answer,
                    },
                    loc=tree,
                )
            tracked = tree.node.query.attached.parse_answer(
                dp.Answer(None, answer)
            )
            assert not isinstance(tracked, dp.ParseError)
            return transform(tree.child(tracked))

        return tree.transform(tree.node, transform)

    return transform

__Computation__ dataclass

Bases: PseudoQuery[object]

A special query that represents a computation.

Returns a parsed JSON value.

Attributes:

Name Type Description
fun str

Name of the function to call.

args dict[str, Any]

Arguments to pass to the function, as a dictionary.

Source code in src/delphyne/stdlib/computations.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
@dataclass
class __Computation__(dp.PseudoQuery[object]):
    """
    A special query that represents a computation.

    Returns a parsed JSON value.

    Attributes:
        fun: Name of the function to call.
        args: Arguments to pass to the function, as a dictionary.
    """

    fun: str
    args: dict[str, Any]

    @override
    def parse_answer(self, answer: dp.Answer) -> object:
        # We expect answers to feature a YAML serialization of the
        # computation result, as a string. Also, we do not return
        # `ParseError` when parsing fails since this is definitely a
        # user error and not an LLM error.
        # TODO: transition to using structured answers?
        assert isinstance(answer.content, str)
        return yaml.safe_load(answer.content)

Data

Data dataclass

Bases: Node

The standard "Data" effect.

This effect allows loading external data. Strategies must be monotonic with respect to data, meaning that adding a new data file or adding a new key into a data dictionary must not break an oracular program. Thus, a form of learning can be implemented by growing a database of learned facts.

Source code in src/delphyne/stdlib/data.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
@dataclass
class Data(dp.Node):
    """
    The standard "Data" effect.

    This effect allows loading external data. Strategies must be
    monotonic with respect to data, meaning that adding a new data file
    or adding a new key into a data dictionary must not break an
    oracular program. Thus, a form of learning can be implemented by
    growing a database of learned facts.
    """

    query: dp.TransparentQuery[Any]

    def navigate(self) -> dp.Navigation:
        return (yield self.query)

load_data

load_data(
    refs: Sequence[DataRef], type: type[T]
) -> Strategy[Data, object, Sequence[T]]
load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> Strategy[Data, object, Sequence[Any]]
load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> Strategy[Data, object, Sequence[Any]]

Load external data.

An exception is raised if any piece of data is not found, thus enforcing monotonicity (assuming this exception is never caught in strategy code).

Parameters:

Name Type Description Default
refs Sequence[DataRef]

References to the data to load. A list of identical length is returned.

required
type Any

Optional type information for the elements of the returned list. If provided, Pydantic is used to load the data.

NoTypeInfo

Raises:

Type Description
DataNotFound

If any of the data references could not be found.

Source code in src/delphyne/stdlib/data.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
def load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> dp.Strategy[Data, object, Sequence[Any]]:
    """
    Load external data.

    An exception is raised if any piece of data is not found, thus
    enforcing monotonicity (assuming this exception is never caught in
    strategy code).

    Arguments:
        refs: References to the data to load. A list of identical length
            is returned.
        type: Optional type information for the elements of the returned
            list. If provided, Pydantic is used to load the data.

    Raises:
        DataNotFound: If any of the data references could not be found.
    """

    if not refs:
        return []

    query = dp.TransparentQuery.build(__LoadData__(refs))
    recv = yield dp.spawn_node(Data, query=query)
    result = cast(Sequence[Any] | DataNotFound, recv.action)
    if isinstance(result, DataNotFound):
        raise result
    if isinstance(type, NoTypeInfo):
        return result
    else:
        return [pydantic_load(type, item) for item in result]

DataRef

DataRef = str | tuple[str, str]

Reference to some data, which either consists in:

  • A file name (string), in which case the reference refers to the whole file's content.
  • A (file name, key) pair, in which case the reference refers to the value associated with key in the dictionary contained in the file.

DataNotFound dataclass

Bases: Exception

Exception raised when some data is not found.

Source code in src/delphyne/stdlib/data.py
31
32
33
34
35
36
37
38
39
40
@dataclass
class DataNotFound(Exception):
    """
    Exception raised when some data is not found.
    """

    ref: DataRef

    def __str__(self):
        return f"Data not found: {_pp_data_ref(self.ref)}"

elim_data

elim_data(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Data, Never]
Source code in src/delphyne/stdlib/data.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
@dp.contextual_tree_transformer
def elim_data(
    env: dp.PolicyEnv,
    policy: Any,
) -> dp.PureTreeTransformerFn[Data, Never]:
    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Data | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Data):
            query = tree.node.query.attached.query
            assert isinstance(query, __LoadData__)
            answer = _produce_answer(env.data_manager, query)
            tracked = tree.node.query.attached.parse_answer(answer)
            assert not isinstance(tracked, dp.ParseError)
            return transform(tree.child(tracked))
        return tree.transform(tree.node, transform)

    return transform

Flag

Flag dataclass

Bases: Node

The standard Flag effect.

Flags allow providing several implementations for a strategy component, and have policies select which variant to use (or perform search at runtime for selecting variants).

For each flag, a subclass of FlagQuery must be defined, which admits a finite set of answers (one per allowed flag value), along with a default answer. Type parameter F denotes the type of the flag query that can be issued. To express a signature wih several flag queries, use Flag[A] | Flag[B] instead of Flag[A | B], so that both kinds of flags can be eliminated separately.

Behavior in demonstrations

Because flag queries override AbstractQuery.default_answer, default flag values are automatically selected by the demonstration interpreter. This behaviour can be overriden by adding answers for flag queries in the queries section, or by using value-based hints (i.e., #flag_value, which is possible since flag queries implement AbstractQuery.finite_answer_set).

Source code in src/delphyne/stdlib/flags.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
@dataclass(frozen=True)
class Flag[F: FlagQuery[Any]](dp.Node):
    """
    The standard `Flag` effect.

    Flags allow providing several implementations for a strategy
    component, and have policies select which variant to use (or perform
    search at runtime for selecting variants).

    For each flag, a subclass of `FlagQuery` must be defined, which
    admits a finite set of answers (one per allowed flag value), along
    with a default answer. Type parameter `F` denotes the type of the
    flag query that can be issued. To express a signature wih several
    flag queries, use `Flag[A] | Flag[B]` instead of `Flag[A | B]`, so
    that both kinds of flags can be eliminated separately.

    ??? info "Behavior in demonstrations"
        Because flag queries override `AbstractQuery.default_answer`,
        default flag values are automatically selected by the
        demonstration interpreter. This behaviour can be overriden by
        adding answers for flag queries in the `queries` section, or by
        using value-based hints (i.e., `#flag_value`, which is possible
        since flag queries implement `AbstractQuery.finite_answer_set`).
    """

    flag: dp.TransparentQuery[Any]

    def summary_message(self) -> str:
        query = self.flag.attached.query
        assert isinstance(query, FlagQuery)
        name = query.query_name()
        return f"{name}: {', '.join(query.flag_values())}"

    def navigate(self) -> dp.Navigation:
        return (yield self.flag)

get_flag

get_flag(flag: type[FlagQuery[T]]) -> Strategy[Flag[Any], object, T]

Triggering function for the Flag effect.

Takes a flag query type as an argument and return a flag value.

Info

A more precise type cannot be given for this function since Python does not support higher-kinded types.

Source code in src/delphyne/stdlib/flags.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def get_flag[T: str](
    flag: type[FlagQuery[T]],
) -> dp.Strategy[Flag[Any], object, T]:
    """
    Triggering function for the `Flag` effect.

    Takes a flag query type as an argument and return a flag value.

    !!! info
        A more precise type cannot be given for this function since
        Python does not support higher-kinded types.
    """
    query = dp.TransparentQuery.build(flag())
    recv = yield spawn_node(Flag, flag=query)
    return cast(T, recv.action)

elim_flag

elim_flag(
    env: PolicyEnv, policy: Any, flag: type[F], val: str
) -> PureTreeTransformerFn[Flag[F], Never]
Source code in src/delphyne/stdlib/flags.py
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
@dp.contextual_tree_transformer
def elim_flag[F: FlagQuery[Any]](
    env: PolicyEnv,
    policy: Any,
    flag: type[F],
    val: str,
) -> dp.PureTreeTransformerFn[Flag[F], Never]:
    assert val in flag.flag_values()

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Flag[F] | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Flag):
            tree = cast(dp.Tree[Any, P, T], tree)
            node = cast(Flag[Any], tree.node)
            if isinstance(query := node.flag.attached.query, flag):
                node = cast(Flag[F], node)
                answer = dp.Answer(None, val)
                assert answer in query.finite_answer_set()
                tracked = node.flag.attached.parse_answer(answer)
                assert not isinstance(tracked, dp.ParseError)
                return transform(tree.child(tracked))
        tree = cast(dp.Tree[Any, P, T], tree)
        node = cast(Any, tree.node)
        return tree.transform(node, transform)

    return transform

FlagQuery

Bases: PseudoQuery[T]

Base class for flag queries.

Type parameter T must be of the form Literal[s1, ..., sn] where si are string literals. The first value is considered the default.

Source code in src/delphyne/stdlib/flags.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
class FlagQuery[T: str](PseudoQuery[T]):
    """
    Base class for flag queries.

    Type parameter `T` must be of the form `Literal[s1, ..., sn]` where
    `si` are string literals. The first value is considered the default.
    """

    @classmethod
    def flag_values(cls) -> Sequence[str]:
        ans = insp.first_parameter_of_base_class(cls)
        assert (args := insp.literal_type_args(ans)) is not None
        assert len(args) > 0
        assert all(isinstance(a, str) for a in args)
        return cast(Sequence[str], args)

    @override
    def finite_answer_set(self) -> Sequence[dp.Answer]:
        vals = self.flag_values()
        return [dp.Answer(None, v) for v in vals]

    @override
    def default_answer(self) -> dp.Answer:
        return self.finite_answer_set()[0]

    @override
    def parse_answer(self, answer: dp.Answer) -> T | dp.ParseError:
        if not isinstance(answer.content, str):
            return dp.ParseError(description="Flag answer must be a string.")
        ans = self.flag_values()
        if answer.content not in ans:
            msg = f"Invalid flag value: '{answer.content}'."
            return dp.ParseError(description=msg)
        return cast(T, answer.content)