Skip to content

Standard Nodes and Effects

Generic Utilities

spawn_node

spawn_node(node_type: type[N], **args: Any) -> NodeBuilder[Any, Any]

A convenience helper to write effect triggering functions.

Attributes:

Name Type Description
node_type

The type of the node to spawn (e.g., Branch).

args

Arguments to populate the node fields, passed to Node.spawn.

Source code in src/delphyne/stdlib/nodes.py
19
20
21
22
23
24
25
26
27
28
29
30
def spawn_node[N: dp.Node](
    node_type: type[N], **args: Any
) -> dp.NodeBuilder[Any, Any]:
    """
    A convenience helper to write effect triggering functions.

    Attributes:
        node_type: The type of the node to spawn (e.g., `Branch`).
        args: Arguments to populate the node fields, passed to
            [`Node.spawn`][delphyne.core.trees.Node.spawn].
    """
    return dp.NodeBuilder(lambda spawner: node_type.spawn(spawner, **args))

FromPolicy

FromPolicy = Callable[[Any], T]

Type for an inner-policy-dependent data field.

We use Any instead of introducing an inner policy type parameter P, since Node is not parametric either. Thus, this alias is mostly meant for readability and expressing intent.

NodeMeta

Abstract base class for node metadata.

Nodes can feature fields with arbitrary metadata accessible to policies (e.g., meta field of Branch). Typing those fields with NodeMeta instead of object or Any allows for better type safety. In particular, it prevents errors that arise from accidentally passing uninstantiated parametric inner policy fields.

Source code in src/delphyne/stdlib/nodes.py
43
44
45
46
47
48
49
50
51
52
53
54
class NodeMeta:
    """
    Abstract base class for node metadata.

    Nodes can feature fields with arbitrary metadata accessible to
    policies (e.g., `meta` field of `Branch`). Typing those fields with
    `NodeMeta` instead of `object` or `Any` allows for better type
    safety. In particular, it prevents errors that arise from
    accidentally passing uninstantiated parametric inner policy fields.
    """

    pass

Branch

Branch dataclass

Bases: Node

The standard Branch effect.

Can be triggered using the branch function, which allows branching over elements of an opaque space.

Source code in src/delphyne/stdlib/nodes.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
@dataclass(frozen=True)
class Branch(dp.Node):
    """
    The standard `Branch` effect.

    Can be triggered using the `branch` function, which allows branching
    over elements of an opaque space.
    """

    cands: OpaqueSpace[Any, Any]
    meta: FromPolicy[NodeMeta] | None

    @override
    def navigate(self) -> dp.Navigation:
        return (yield self.cands)

    @override
    def primary_space(self) -> dp.Space[Any]:
        return self.cands

branch

branch(
    cands: Opaque[P, T],
    meta: Callable[[P], NodeMeta] | None = None,
    inner_policy_type: type[P] | None = None,
) -> Strategy[Branch, P, T]

Branch over the elements of an opaque space.

Parameters:

Name Type Description Default
cands Opaque[P, T]

An opaque space, which can be defined from either a query or a strategy via the using method.

required
meta Callable[[P], NodeMeta] | None

An optional mapping from the ambient inner policy to arbitrary metadata accessible to search policies.

None
inner_policy_type type[P] | None

Ambient inner policy type. This information is not used at runtime but it can be provided to help type inference when necessary.

None
Source code in src/delphyne/stdlib/nodes.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def branch[P, T](
    cands: Opaque[P, T],
    meta: Callable[[P], NodeMeta] | None = None,
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Branch, P, T]:
    """
    Branch over the elements of an opaque space.

    Arguments:
        cands: An opaque space, which can be defined from either a query
            or a strategy via the `using` method.
        meta: An optional mapping from the ambient inner policy to
            arbitrary metadata accessible to search policies.
        inner_policy_type: Ambient inner policy type. This information
            is not used at runtime but it can be provided to help type
            inference when necessary.
    """
    ret = yield spawn_node(Branch, cands=cands, meta=meta)
    return cast(T, ret)

Fail

Fail dataclass

Bases: Node

The standard Fail effect.

Can be triggered using the fail and ensure functions.

Source code in src/delphyne/stdlib/nodes.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@dataclass(frozen=True)
class Fail(dp.Node):
    """
    The standard `Fail` effect.

    Can be triggered using the `fail` and `ensure` functions.
    """

    error: dp.Error

    @override
    def leaf_node(self) -> bool:
        return True

    @override
    def navigate(self) -> dp.Navigation:
        assert False
        yield

    @override
    def summary_message(self) -> str:
        return str(self.error)

fail

fail(
    label: str | None = None, *, message: str | None = None, error: Error | None = None
) -> Strategy[Fail, object, NoReturn]

Fail immediately with an error.

The error can be specified using the error keyword argument. As a shortcut, the label and message arguments can also be used to directly specify the corresponding fields of the Error type. Those arguments can only be used if error is not provided.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from fail(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def fail(
    label: str | None = None,
    *,
    message: str | None = None,
    error: dp.Error | None = None,
) -> dp.Strategy[Fail, object, NoReturn]:
    """
    Fail immediately with an error.

    The error can be specified using the `error` keyword argument. As a
    shortcut, the `label` and `message` arguments can also be used to
    directly specify the corresponding fields of the `Error` type. Those
    arguments can only be used if `error` is not provided.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from fail(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Fail, error=_build_error(message, label, error))
    assert False

ensure

ensure(
    prop: bool,
    label: str | None = None,
    *,
    message: str | None = None,
    error: Error | None = None,
) -> Strategy[Fail, object, None]

Ensure that a property holds, otherwise fail with an error.

See fail regarding the label, message and error arguments.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from ensure(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def ensure(
    prop: bool,
    label: str | None = None,
    *,
    message: str | None = None,
    error: dp.Error | None = None,
) -> dp.Strategy[Fail, object, None]:
    """
    Ensure that a property holds, otherwise fail with an error.

    See `fail` regarding the `label`, `message` and `error` arguments.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from ensure(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    if not prop:
        yield from fail(label=label, message=message, error=error)

Message

Message dataclass

Bases: Node

The standard Message effect.

Message nodes are tree nodes carrying a message. They have a unique child. They can be eliminated using the elim_messages tree transformer, which automatically logs their content.

This effect is useful for debugging strategies. Using print statements in strategies is discouraged since strategy computations are replayed every time a child of the associated tree is computed, causing the same message to be repeatedly printed.

Source code in src/delphyne/stdlib/nodes.py
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
@dataclass
class Message(dp.Node):
    """
    The standard `Message` effect.

    Message nodes are tree nodes carrying a message. They have a unique
    child. They can be eliminated using the `elim_messages` tree
    transformer, which automatically logs their content.

    This effect is useful for debugging strategies. Using `print`
    statements in strategies is discouraged since strategy computations
    are replayed every time a child of the associated tree is computed,
    causing the same message to be repeatedly printed.
    """

    msg: str
    data: object | None
    level: dp.LogLevel | None

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def summary_message(self) -> str:
        return self.msg

message

message(
    msg: str, data: object | None = None, level: LogLevel | None = None
) -> Strategy[Message, object, None]

Log a debugging message. See Message for more details.

Parameters:

Name Type Description Default
msg str

The message to log.

required
data object | None

Optional data to attach to the message.

None
level LogLevel | None

Optional severity level of the message.

None

Warning

Like all effect triggering functions, this function must be invoked as:

yield from message(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def message(
    msg: str, data: object | None = None, level: dp.LogLevel | None = None
) -> dp.Strategy[Message, object, None]:
    """
    Log a debugging message. See `Message` for more details.

    Arguments:
        msg: The message to log.
        data: Optional data to attach to the message.
        level: Optional severity level of the message.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from message(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Message, msg=msg, data=data, level=level)

elim_messages

elim_messages(
    env: PolicyEnv,
    policy: Any,
    show_in_log: bool = True,
    default_log_level: LogLevel = "info",
) -> PureTreeTransformerFn[Message, Never]

Eliminate the Message effect.

Parameters:

Name Type Description Default
show_in_log bool

Whether to log the associated content whenever a message node is encountered.

True
default_log_level LogLevel

The default severity level to use when no severity level is specified in the message node.

'info'
Source code in src/delphyne/stdlib/nodes.py
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
@pol.contextual_tree_transformer
def elim_messages(
    env: PolicyEnv,
    policy: Any,
    show_in_log: bool = True,
    default_log_level: dp.LogLevel = "info",
) -> pol.PureTreeTransformerFn[Message, Never]:
    """
    Eliminate the `Message` effect.

    Arguments:
        show_in_log: Whether to log the associated content whenever a
            message node is encountered.
        default_log_level: The default severity level to use when no
            severity level is specified in the message node.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Message | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Message):
            if show_in_log:
                metadata = {"attached": tree.node.data}
                level = tree.node.level or default_log_level
                env.log(level, tree.node.msg, metadata=metadata, loc=tree)
            return transform(tree.child(None))
        return tree.transform(tree.node, transform)

    return transform

Factor and Value

Factor dataclass

Bases: Node

The standard Factor effect.

A Factor node allows computing a confidence score in the [0, 1] interval. This confidence can be computed by a query or a dedicated strategy but only one element will be generated from the resulting space. Instead of having an oracle compute a numerical value directly, it computes an evaluation object that is then transformed into a number using a policy-provided function. This allows greater flexibility on the policy side. If no such function is given, the whole node is ignored.

Source code in src/delphyne/stdlib/nodes.py
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
@dataclass(frozen=True)
class Factor(dp.Node):
    """
    The standard `Factor` effect.

    A `Factor` node allows computing a confidence score in the [0, 1]
    interval. This confidence can be computed by a query or a dedicated
    strategy but only one element will be generated from the resulting
    space. Instead of having an oracle compute a numerical value
    directly, it computes an evaluation object that is then transformed
    into a number using a policy-provided function. This allows greater
    flexibility on the policy side. If no such function is given, the
    whole node is ignored.
    """

    eval: OpaqueSpace[Any, Any]
    factor: FromPolicy[Callable[[Any], float] | None]

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def primary_space(self):
        return self.eval

factor

factor(
    eval: Opaque[P, E],
    factor: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> Strategy[Factor, P, None]

Apply a multiplicative penalty to the current search branch.

Parameters:

Name Type Description Default
eval Opaque[P, E]

An opaque space, typically induced by a strategy or a query whose purpose is to evaluate the current search state, returning an evaluation object of type E. Policies typically extract a single element from this space.

required
factor Callable[[P], Callable[[E], float] | None]

An inner-policy-dependent function that maps an evaluation object of type E to an actual numerical penalty, as a multiplicative factor in the [0, 1] interval. If no function is provided, the whole node is ignored. Separating eval and factor allows greater flexibility for policies to tune how multidimensional state evaluations are reduced into a single numbers.

required

Warning

Like all effect triggering functions, this function must be invoked as:

yield from factor(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
def factor[E, P](
    eval: Opaque[P, E],
    factor: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Factor, P, None]:
    """
    Apply a multiplicative penalty to the current search branch.

    Arguments:
        eval: An opaque space, typically induced by a strategy or a
            query whose purpose is to evaluate the current search state,
            returning an evaluation object of type `E`. Policies
            typically extract a single element from this space.
        factor: An inner-policy-dependent function that maps an
            evaluation object of type `E` to an actual numerical
            penalty, as a multiplicative factor in the [0, 1] interval.
            If no function is provided, the whole node is ignored.
            Separating `eval` and `factor` allows greater flexibility
            for policies to tune how multidimensional state evaluations
            are reduced into a single numbers.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from factor(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Factor, eval=eval, factor=factor)

Value dataclass

Bases: Node

The standard Value effect.

Similar to Factor, except that the resulting number is used to set the whole value of the branch instead of just multiplying it.

Source code in src/delphyne/stdlib/nodes.py
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
@dataclass(frozen=True)
class Value(dp.Node):
    """
    The standard `Value` effect.

    Similar to `Factor`, except that the resulting number is used to set
    the whole value of the branch instead of just multiplying it.
    """

    eval: OpaqueSpace[Any, Any]
    value: FromPolicy[Callable[[Any], float] | None]

    @override
    def navigate(self) -> dp.Navigation:
        return None
        yield

    @override
    def primary_space(self):
        return self.eval

value

value(
    eval: Opaque[P, E],
    value: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> Strategy[Value, P, None]

Set the value of the current search branch.

See the similar factor function for more details.

Warning

Like all effect triggering functions, this function must be invoked as:

yield from message(...)

Forgetting yield from may not result in a type error but will result in a no-op at runtime.

Source code in src/delphyne/stdlib/nodes.py
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
def value[E, P](
    eval: Opaque[P, E],
    value: Callable[[P], Callable[[E], float] | None],
    inner_policy_type: type[P] | None = None,
) -> dp.Strategy[Value, P, None]:
    """
    Set the value of the current search branch.

    See the similar `factor` function for more details.

    !!! warning
        Like all effect triggering functions, this function must be
        invoked as:

            yield from message(...)

        Forgetting `yield from` may not result in a type error but will
        result in a no-op at runtime.
    """
    yield spawn_node(Value, eval=eval, value=value)

elim_values

elim_values(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Value, Never]

Eliminate the Value effect.

Source code in src/delphyne/stdlib/nodes.py
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
@pol.contextual_tree_transformer
def elim_values(
    env: PolicyEnv,
    policy: Any,
) -> pol.PureTreeTransformerFn[Value, Never]:
    """
    Eliminate the `Value` effect.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Value | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Value):
            return transform(tree.child(None))
        return tree.transform(tree.node, transform)

    return transform

binarize_values

binarize_values(
    env: PolicyEnv, policy: Any, *, threshold: float = 0.5
) -> PureTreeTransformerFn[Value, Branch | Fail]

Turn value nodes into assertions based on a threshold.

Attributes:

Name Type Description
threshold

The threshold above which a value is considered acceptable.

!!! warning: Value nodes are transformed into Branch nodes and so it is important that the associated opaque spaces for computing strategies only generate one candidate so that no actual branching can happen on value estimation.

Source code in src/delphyne/stdlib/nodes.py
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
@pol.contextual_tree_transformer
def binarize_values(
    env: PolicyEnv,
    policy: Any,
    *,
    threshold: float = 0.5,
) -> pol.PureTreeTransformerFn[Value, Branch | Fail]:
    """
    Turn value nodes into assertions based on a threshold.

    Attributes:
        threshold: The threshold above which a value is considered
            acceptable.

    !!! warning:
        `Value` nodes are transformed into `Branch` nodes and so it is
        important that the associated opaque spaces for computing
        strategies only generate one candidate so that no actual
        branching can happen on value estimation.
    """

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Value | N, P, T],
    ) -> dp.Tree[Branch | Fail | N, P, T]:
        if isinstance(tree.node, Value):
            node = tree.node

            def branch_child(
                eval: dp.Value,
            ) -> dp.Tree[Branch | Fail | N, P, T]:
                value_comp = node.value(policy)
                if (
                    value_comp is None
                    or value_comp(dp.drop_refs(eval)) >= threshold
                ):
                    return transform(tree.child(None))
                else:
                    return dp.Tree[Branch | Fail | N, P, T](
                        Fail(dp.Error()),
                        # What we pass for `child` does not matter since
                        # it will never be accessed.
                        child=(lambda v: transform(tree.child(None))),
                        ref=tree.ref,
                    )

            return dp.Tree[Branch | Fail | N, P, T](
                Branch(node.eval, meta=None), branch_child, tree.ref
            )
        return tree.transform(tree.node, transform)

    return transform

Join

Join dataclass

Bases: Node

The standard Join effect.

This effect can be triggered using the join function. A Join node features a sequence of embedded trees.

Source code in src/delphyne/stdlib/nodes.py
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
@dataclass(frozen=True)
class Join(dp.Node):
    """
    The standard `Join` effect.

    This effect can be triggered using the `join` function. A `Join`
    node features a sequence of embedded trees.
    """

    subs: Sequence[dp.EmbeddedTree[Any, Any, Any]]
    meta: FromPolicy[NodeMeta] | None

    @override
    def navigate(self) -> dp.Navigation:
        ret: list[Any] = []
        for sub in self.subs:
            ret.append((yield sub))
        return tuple(ret)

join

join(
    subs: Sequence[StrategyComp[N, P, T]],
    meta: Callable[[P], NodeMeta] | None = None,
) -> Strategy[N, P, Sequence[T]]

Evaluate a sequence of independent strategy computations, possibly in parallel.

Parameters:

Name Type Description Default
subs Sequence[StrategyComp[N, P, T]]

A sequence of strategy computations to evaluate.

required
meta Callable[[P], NodeMeta] | None

An optional mapping from the ambient inner policy to arbitrary metadata accessible to search policies.

None

Returns:

Type Description
Strategy[N, P, Sequence[T]]

A sequence featuring all computation results.

Source code in src/delphyne/stdlib/nodes.py
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
def join[N: dp.Node, P, T](
    subs: Sequence[dp.StrategyComp[N, P, T]],
    meta: Callable[[P], NodeMeta] | None = None,
) -> dp.Strategy[N, P, Sequence[T]]:
    """
    Evaluate a sequence of independent strategy computations, possibly
    in parallel.

    Arguments:
        subs: A sequence of strategy computations to evaluate.
        meta: An optional mapping from the ambient inner policy to
            arbitrary metadata accessible to search policies.

    Returns:
        A sequence featuring all computation results.
    """
    ret = yield spawn_node(Join, subs=subs, meta=meta)
    return cast(Sequence[T], ret)

Compute

Compute dataclass

Bases: Node

The standard Compute effect.

For efficiency and replicability reasons, strategies must not directly perform expensive and possibly non-replicable computations. For example, a strategy should not directly call an SMT solver since:

  • The call may be expensive, and stratgy computations are replayed from scratch every time a child is computed in the corresponding tree (see documentation for reify).
  • SMT solvers using wall-time timeouts may return different results when called repeatedly on the same input.

The Compute effect allows performing an expensive and possibly non-deterministic computation by issuing a special __Computation__ query that specifies the computation to be performed. Such a query is not answered by an LLM, but by actually performing the described computation. Special support is available in the demonstration interpreter in the form of implicit answers, allowing __Computation__ queries to be automatically answered when running tests. Generated answers can be hardcoded in demonstrations after the fact via proper editor support (i.e. using the Add Implicit Answers code action from Delphyne's VSCode extension).

Source code in src/delphyne/stdlib/computations.py
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
@dataclass
class Compute(dp.Node):
    """
    The standard `Compute` effect.

    For efficiency and replicability reasons, strategies must not directly
    perform expensive and possibly non-replicable computations. For example,
    a strategy should not directly call an SMT solver since:

    - The call may be expensive, and stratgy computations are replayed from
    scratch every time a child is computed in the corresponding tree (see
    documentation for `reify`).
    - SMT solvers using wall-time timeouts may return different results when
    called repeatedly on the same input.

    The `Compute` effect allows performing an expensive and possibly
    non-deterministic computation by issuing a special `__Computation__`
    query that specifies the computation to be performed. Such a query is
    not answered by an LLM, but by _actually_ performing the described
    computation. Special support is available in the demonstration
    interpreter in the form of *implicit answers*, allowing
    `__Computation__` queries to be automatically answered when running
    tests. Generated answers can be hardcoded in demonstrations **after**
    the fact via proper editor support (i.e. using the `Add Implicit
    Answers` code action from Delphyne's VSCode extension).
    """

    query: dp.TransparentQuery[Any]
    override_args: dp.FromPolicy[dict[str, Any] | None] | None

    # Takes as an argument a dict of overriden args
    _comp: Callable[[dict[str, Any] | None], Any]
    _ret_type: ty.TypeAnnot[Any]

    def navigate(self) -> dp.Navigation:
        return (yield self.query)

    def run_computation(
        self, *, overriden_args: dict[str, Any] | None = None
    ) -> str:
        ret = self._comp(overriden_args)
        serialized = dump_yaml(self._ret_type, ret)
        return serialized

    def run_computation_with_cache(
        self,
        *,
        cache: dp.LLMCache | None,
        overriden_args: dict[str, Any] | None = None,
    ) -> str:
        """
        Run the computation using a fake oracle so that the LLM caching
        mechanism can be reused.
        """
        chat = create_prompt(
            self.query.attached.query,
            examples=[],
            params={},
            mode=None,
            env=None,
        )
        req = dp.LLMRequest(chat=chat, num_completions=1, options={})
        model = ComputationOracle(
            partial(self.run_computation, overriden_args=overriden_args)
        )
        resp = model.send_request(req, cache)
        assert len(resp.outputs) == 1
        answer = resp.outputs[0].content
        assert isinstance(answer, str)
        return answer

compute

compute(
    f: Callable[A, T],
    *,
    override_args: Callable[[P], dict[str, Any] | None] | None = None,
    inner_policy_type: type[P] | None = None,
) -> Callable[A, Strategy[Compute, P, T]]

Triggering function for the Compute effect.

Parameters:

Name Type Description Default
f Callable[A, T]

Function performing an expensive computation. It must feature type annotations and its arguments must be JSON-serializable. It does not need to be deterministic.

required
override_args Callable[[P], dict[str, Any] | None] | None

Mapping from the ambient inner policy to a dictionary overriding some of the function arguments. These overrides are only visible on policy side and do not affect the underlying __Compute__ query. This is particularly useful to override timeout parameters in policies.

None
inner_policy_type type[P] | None

Ambient inner policy type. This information is not used at runtime but it can be provided to help type inference when necessary.

None
Source code in src/delphyne/stdlib/computations.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def compute[**A, T, P](
    f: Callable[A, T],
    *,
    override_args: Callable[[P], dict[str, Any] | None] | None = None,
    inner_policy_type: type[P] | None = None,
) -> Callable[A, dp.Strategy[Compute, P, T]]:
    """
    Triggering function for the `Compute` effect.

    Arguments:
        f: Function performing an expensive computation. It must feature
            type annotations and its arguments must be
            JSON-serializable. It does not need to be deterministic.
        override_args: Mapping from the ambient inner policy to a
            dictionary overriding some of the function arguments. These
            overrides are only visible on policy side and do not affect
            the underlying `__Compute__` query. This is particularly
            useful to override timeout parameters in policies.
        inner_policy_type: Ambient inner policy type. This information
            is not used at runtime but it can be provided to help type
            inference when necessary.
    """

    def wrapped(
        *args: A.args, **kwargs: A.kwargs
    ) -> dp.Strategy[Compute, object, T]:
        def comp(overriden_args: dict[str, Any] | None) -> Any:
            return f(*args, **(kwargs | (overriden_args or {})))  # type: ignore

        ret_type = insp.function_return_type(f)
        assert not isinstance(ret_type, ty.NoTypeInfo)
        fun_args = insp.function_args_dict(f, args, kwargs)
        fun = insp.function_name(f)
        assert fun is not None
        query = dp.TransparentQuery.build(__Computation__(fun, fun_args))
        unparsed = yield spawn_node(
            Compute,
            query=query,
            override_args=override_args,
            _comp=comp,
            _ret_type=ret_type,
        )
        ret = ty.pydantic_load(ret_type, unparsed)
        return cast(T, ret)

    return wrapped

elim_compute

elim_compute(
    env: PolicyEnv,
    policy: Any,
    *,
    force_bypass_cache: bool = False,
    log_computations: LogLevel | None = None,
    log_long_computations: tuple[LogLevel, float] | None = None,
    override_args: dict[str, Any] | None = None,
) -> PureTreeTransformerFn[Compute, Never]

Eliminate the Compute effect by performing the computation when computing tree children (making the child function possibly nondeterministic).

Parameters:

Name Type Description Default
force_bypass_cache bool

If set to True, do not cache computation results, even if a cache is available in the global policy environment.

False
log_computations LogLevel | None

If set, log every performed computation at the given severity level.

None
log_long_computations tuple[LogLevel, float] | None

If set, log every computation taking more than the given number of seconds at the given severity level. When set to None, this setting can be overriden by PolicyEnv.log_long_computations.

None
override_args dict[str, Any] | None

Overriden argument values for all computations. This is particularly useful for setting global timeouts. Argument values specified this way have lower precedence than those specified with the override_args argument of compute.

None
Source code in src/delphyne/stdlib/computations.py
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
@dp.contextual_tree_transformer
def elim_compute(
    env: PolicyEnv,
    policy: Any,
    *,
    force_bypass_cache: bool = False,
    log_computations: dp.LogLevel | None = None,
    log_long_computations: tuple[dp.LogLevel, float] | None = None,
    override_args: dict[str, Any] | None = None,
) -> dp.PureTreeTransformerFn[Compute, Never]:
    """
    Eliminate the `Compute` effect by performing the computation when
    computing tree children (making the `child` function possibly
    nondeterministic).

    Arguments:
        force_bypass_cache: If set to `True`, do not cache computation
            results, even if a cache is available in the global policy
            environment.
        log_computations: If set, log every performed computation at the
            given severity level.
        log_long_computations: If set, log every computation taking more
            than the given number of seconds at the given severity
            level. When set to `None`, this setting can be overriden by
            `PolicyEnv.log_long_computations`.
        override_args: Overriden argument values for all computations.
            This is particularly useful for setting global timeouts.
            Argument values specified this way have lower precedence
            than those specified with the `override_args` argument of
            `compute`.
    """

    if log_long_computations is None:
        log_long_computations = env.log_long_computations

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Compute | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Compute):
            cache = None
            if not force_bypass_cache:
                cache = env.cache
            query = tree.node.query.attached.query
            assert isinstance(query, __Computation__), str(type(query))
            digest = json_object_digest(ty.pydantic_dump(Any, query))
            if log_computations:
                env.log(
                    log_computations,
                    "computation_started",
                    {"hash": digest, "details": query},
                    loc=tree,
                )
            overriden: dict[str, Any] = override_args or {}
            if tree.node.override_args is not None:
                overriden_local = tree.node.override_args(policy)
                if overriden_local is not None:
                    overriden = overriden | overriden_local
            start = time.time()
            answer = tree.node.run_computation_with_cache(
                cache=cache, overriden_args=overriden
            )
            _elapsed = time.time() - start
            if log_computations:
                env.log(
                    log_computations,
                    "computation_finished",
                    {"hash": digest, "elapsed": _elapsed, "result": answer},
                    loc=tree,
                )
            if log_long_computations and _elapsed > log_long_computations[1]:
                env.log(
                    log_long_computations[0],
                    "long_computation",
                    {
                        "hash": digest,
                        "details": query,
                        "elapsed": _elapsed,
                        "result": answer,
                    },
                    loc=tree,
                )
            tracked = tree.node.query.attached.parse_answer(
                dp.Answer(None, answer)
            )
            assert not isinstance(tracked, dp.ParseError)
            return transform(tree.child(tracked))

        return tree.transform(tree.node, transform)

    return transform

__Computation__ dataclass

Bases: AbstractQuery[object]

A special query that represents a computation.

Returns a parsed JSON value.

Attributes:

Name Type Description
fun str

Name of the function to call.

args dict[str, Any]

Arguments to pass to the function, as a dictionary.

Source code in src/delphyne/stdlib/computations.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
@dataclass
class __Computation__(dp.AbstractQuery[object]):
    """
    A special query that represents a computation.

    Returns a parsed JSON value.

    Attributes:
        fun: Name of the function to call.
        args: Arguments to pass to the function, as a dictionary.
    """

    fun: str
    args: dict[str, Any]

    @override
    def generate_prompt(
        self,
        *,
        kind: str,
        mode: dp.AnswerMode,
        params: dict[str, object],
        extra_args: dict[str, object] | None = None,
        env: dp.AbstractTemplatesManager | None = None,
    ) -> str:
        # The prompt is never sent to LLMs but it is important that it
        # uniquely identifies the query instance since it is used for
        # caching.
        return dump_yaml(Any, self.__dict__)

    @override
    def query_modes(self):
        return [None]

    @override
    def answer_type(self):
        return object

    @override
    def parse_answer(self, answer: dp.Answer) -> object:
        # We expect answers to feature a YAML serialization of the
        # computation result, as a string. Also, we do not return
        # `ParseError` when parsing fails since this is definitely a
        # user error and not an LLM error.
        # TODO: transition to using structured answers?
        assert isinstance(answer.content, str)
        return yaml.safe_load(answer.content)

Data

Data dataclass

Bases: Node

The standard "Data" effect.

This effect allows loading external data. Strategies must be monotonic with respect to data, meaning that adding a new data file or adding a new key into a data dictionary must not break an oracular program. Thus, a form of learning can be implemented by growing a database of learned facts.

Source code in src/delphyne/stdlib/data.py
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
@dataclass
class Data(dp.Node):
    """
    The standard "Data" effect.

    This effect allows loading external data. Strategies must be
    monotonic with respect to data, meaning that adding a new data file
    or adding a new key into a data dictionary must not break an
    oracular program. Thus, a form of learning can be implemented by
    growing a database of learned facts.
    """

    query: dp.TransparentQuery[Any]

    def navigate(self) -> dp.Navigation:
        return (yield self.query)

load_data

load_data(
    refs: Sequence[DataRef], type: type[T]
) -> Strategy[Data, object, Sequence[T]]
load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> Strategy[Data, object, Sequence[Any]]
load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> Strategy[Data, object, Sequence[Any]]

Load external data.

An exception is raised if any piece of data is not found, thus enforcing monotonicity (assuming this exception is never caught in strategy code).

Parameters:

Name Type Description Default
refs Sequence[DataRef]

References to the data to load. A list of identical length is returned.

required
type Any

Optional type information for the elements of the returned list. If provided, Pydantic is used to load the data.

NoTypeInfo

Raises:

Type Description
DataNotFound

If any of the data references could not be found.

Source code in src/delphyne/stdlib/data.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def load_data(
    refs: Sequence[DataRef], type: Any = NoTypeInfo
) -> dp.Strategy[Data, object, Sequence[Any]]:
    """
    Load external data.

    An exception is raised if any piece of data is not found, thus
    enforcing monotonicity (assuming this exception is never caught in
    strategy code).

    Arguments:
        refs: References to the data to load. A list of identical length
            is returned.
        type: Optional type information for the elements of the returned
            list. If provided, Pydantic is used to load the data.

    Raises:
        DataNotFound: If any of the data references could not be found.
    """

    if not refs:
        return []

    query = dp.TransparentQuery.build(__LoadData__(refs))
    result = yield dp.spawn_node(Data, query=query)
    result = cast(Sequence[Any] | DataNotFound, result)
    if isinstance(result, DataNotFound):
        raise result
    if isinstance(type, NoTypeInfo):
        return result
    else:
        return [pydantic_load(type, item) for item in result]

DataRef

DataRef = str | tuple[str, str]

Reference to some data, which either consists in:

  • A file name (string), in which case the reference refers to the whole file's content.
  • A (file name, key) pair, in which case the reference refers to the value associated with key in the dictionary contained in the file.

DataNotFound dataclass

Bases: Exception

Exception raised when some data is not found.

Source code in src/delphyne/stdlib/data.py
32
33
34
35
36
37
38
39
40
41
@dataclass
class DataNotFound(Exception):
    """
    Exception raised when some data is not found.
    """

    ref: DataRef

    def __str__(self):
        return f"Data not found: {_pp_data_ref(self.ref)}"

elim_data

elim_data(env: PolicyEnv, policy: Any) -> PureTreeTransformerFn[Data, Never]
Source code in src/delphyne/stdlib/data.py
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
@dp.contextual_tree_transformer
def elim_data(
    env: dp.PolicyEnv,
    policy: Any,
) -> dp.PureTreeTransformerFn[Data, Never]:
    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Data | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Data):
            query = tree.node.query.attached.query
            assert isinstance(query, __LoadData__)
            answer = _produce_answer(env.data_manager, query)
            tracked = tree.node.query.attached.parse_answer(answer)
            assert not isinstance(tracked, dp.ParseError)
            return transform(tree.child(tracked))
        return tree.transform(tree.node, transform)

    return transform

Flag

Flag dataclass

Bases: Node

The standard Flag effect.

Flags allow providing several implementations for a strategy component, and have policies select which variant to use (or perform search at runtime for selecting variants).

For each flag, a subclass of FlagQuery must be defined, which admits a finite set of answers (one per allowed flag value), along with a default answer. Type parameter F denotes the type of the flag query that can be issued. To express a signature wih several flag queries, use Flag[A] | Flag[B] instead of Flag[A | B], so that both kinds of flags can be eliminated separately.

Behavior in demonstrations

Because flag queries override AbstractQuery.default_answer, default flag values are automatically selected by the demonstration interpreter. This behaviour can be overriden by adding answers for flag queries in the queries section, or by using value-based hints (i.e., #flag_value, which is possible since flag queries implement AbstractQuery.finite_answer_set).

Source code in src/delphyne/stdlib/flags.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
@dataclass(frozen=True)
class Flag[F: FlagQuery[Any]](dp.Node):
    """
    The standard `Flag` effect.

    Flags allow providing several implementations for a strategy
    component, and have policies select which variant to use (or perform
    search at runtime for selecting variants).

    For each flag, a subclass of `FlagQuery` must be defined, which
    admits a finite set of answers (one per allowed flag value), along
    with a default answer. Type parameter `F` denotes the type of the
    flag query that can be issued. To express a signature wih several
    flag queries, use `Flag[A] | Flag[B]` instead of `Flag[A | B]`, so
    that both kinds of flags can be eliminated separately.

    ??? info "Behavior in demonstrations"
        Because flag queries override `AbstractQuery.default_answer`,
        default flag values are automatically selected by the
        demonstration interpreter. This behaviour can be overriden by
        adding answers for flag queries in the `queries` section, or by
        using value-based hints (i.e., `#flag_value`, which is possible
        since flag queries implement `AbstractQuery.finite_answer_set`).
    """

    flag: dp.TransparentQuery[Any]

    def summary_message(self) -> str:
        query = self.flag.attached.query
        assert isinstance(query, FlagQuery)
        name = query.query_name()
        return f"{name}: {', '.join(query.flag_values())}"

    def navigate(self) -> dp.Navigation:
        return (yield self.flag)

get_flag

get_flag(flag: type[FlagQuery[T]]) -> Strategy[Flag[Any], object, T]

Triggering function for the Flag effect.

Takes a flag query type as an argument and return a flag value.

Info

A more precise type cannot be given for this function since Python does not support higher-kinded types.

Source code in src/delphyne/stdlib/flags.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def get_flag[T: str](
    flag: type[FlagQuery[T]],
) -> dp.Strategy[Flag[Any], object, T]:
    """
    Triggering function for the `Flag` effect.

    Takes a flag query type as an argument and return a flag value.

    !!! info
        A more precise type cannot be given for this function since
        Python does not support higher-kinded types.
    """
    query = dp.TransparentQuery.build(flag())
    ret = yield spawn_node(Flag, flag=query)
    return cast(T, ret)

elim_flag

elim_flag(
    env: PolicyEnv, policy: Any, flag: type[F], val: str
) -> PureTreeTransformerFn[Flag[F], Never]
Source code in src/delphyne/stdlib/flags.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
@dp.contextual_tree_transformer
def elim_flag[F: FlagQuery[Any]](
    env: PolicyEnv,
    policy: Any,
    flag: type[F],
    val: str,
) -> dp.PureTreeTransformerFn[Flag[F], Never]:
    assert val in flag.flag_values()

    def transform[N: dp.Node, P, T](
        tree: dp.Tree[Flag[F] | N, P, T],
    ) -> dp.Tree[N, P, T]:
        if isinstance(tree.node, Flag):
            tree = cast(dp.Tree[Any, P, T], tree)
            node = cast(Flag[Any], tree.node)
            if isinstance(query := node.flag.attached.query, flag):
                node = cast(Flag[F], node)
                answer = dp.Answer(None, val)
                assert answer in query.finite_answer_set()
                tracked = node.flag.attached.parse_answer(answer)
                assert not isinstance(tracked, dp.ParseError)
                return transform(tree.child(tracked))
        tree = cast(dp.Tree[Any, P, T], tree)
        node = cast(Any, tree.node)
        return tree.transform(node, transform)

    return transform

FlagQuery

Bases: Query[T]

Base class for flag queries.

Type parameter T must be of the form Literal[s1, ..., sn] where si are string literals. The first value is considered the default.

Source code in src/delphyne/stdlib/flags.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
class FlagQuery[T: str](Query[T]):
    """
    Base class for flag queries.

    Type parameter `T` must be of the form `Literal[s1, ..., sn]` where
    `si` are string literals. The first value is considered the default.
    """

    @classmethod
    def flag_values(cls) -> Sequence[str]:
        ans = insp.first_parameter_of_base_class(cls)
        assert (args := insp.literal_type_args(ans)) is not None
        assert len(args) > 0
        assert all(isinstance(a, str) for a in args)
        return cast(Sequence[str], args)

    def finite_answer_set(self) -> Sequence[dp.Answer]:
        vals = self.flag_values()
        return [dp.Answer(None, v) for v in vals]

    def default_answer(self) -> dp.Answer:
        return self.finite_answer_set()[0]

    def parse_answer(self, answer: dp.Answer) -> T | dp.ParseError:
        if not isinstance(answer.content, str):
            return dp.ParseError(description="Flag answer must be a string.")
        ans = self.flag_values()
        if answer.content not in ans:
            msg = f"Invalid flag value: '{answer.content}'."
            return dp.ParseError(description=msg)
        return cast(T, answer.content)