mirror of
https://github.com/m-labs/artiq.git
synced 2024-12-05 01:36:39 +08:00
cb3b811fd7
After this commit, the delay instruction (again) does not generate
any LLVM IR: all heavy lifting is relegated to the delay and delay_mu
intrinsics. When the interleave transform needs to adjust the global
timeline, it synthesizes a delay_mu intrinsnic. This way,
the interleave transformation becomes composable, as the input and
the output IR invariants are the same.
Also, code generation is adjusted so that a basic block is split off
not only after a delay call, but also before one; otherwise, e.g.,
code immediately at the beginning of a `with parallel:` branch
would have no choice but to execute after another branch has already
advanced the timeline.
This takes care of issue #1 described in 50e7b44
and is a step
to solving issue #2.
26 lines
533 B
Python
26 lines
533 B
Python
# RUN: %python -m artiq.compiler.testbench.jit %s >%t
|
|
# RUN: OutputCheck %s --file-to-check=%t
|
|
|
|
def g():
|
|
with parallel:
|
|
with sequential:
|
|
print("A", now_mu())
|
|
delay_mu(2)
|
|
#
|
|
print("B", now_mu())
|
|
with sequential:
|
|
print("C", now_mu())
|
|
delay_mu(2)
|
|
#
|
|
print("D", now_mu())
|
|
delay_mu(2)
|
|
#
|
|
print("E", now_mu())
|
|
|
|
# CHECK-L: A 0
|
|
# CHECK-L: C 0
|
|
# CHECK-L: B 2
|
|
# CHECK-L: D 2
|
|
# CHECK-L: E 4
|
|
g()
|