Rename 'with parallel' to 'with interleave' (#265).

This commit is contained in:
whitequark 2016-02-22 13:24:43 +00:00
parent b0e7fddc32
commit 51a5910002
33 changed files with 79 additions and 79 deletions

View File

@ -144,8 +144,8 @@ def fn_print():
def fn_kernel():
return types.TBuiltinFunction("kernel")
def obj_parallel():
return types.TBuiltin("parallel")
def obj_interleave():
return types.TBuiltin("interleave")
def obj_sequential():
return types.TBuiltin("sequential")

View File

@ -1418,7 +1418,7 @@ class Loop(Terminator):
def opcode(self):
return "loop({} times)".format(self.trip_count)
class Parallel(Terminator):
class Interleave(Terminator):
"""
An instruction that schedules several threads of execution
in parallel.
@ -1428,7 +1428,7 @@ class Parallel(Terminator):
super().__init__(destinations, builtins.TNone(), name)
def opcode(self):
return "parallel"
return "interleave"
def destinations(self):
return self.operands

View File

@ -30,7 +30,7 @@ def globals():
"portable": builtins.fn_kernel(),
# ARTIQ context managers
"parallel": builtins.obj_parallel(),
"interleave": builtins.obj_interleave(),
"sequential": builtins.obj_sequential(),
"watchdog": builtins.fn_watchdog(),

View File

@ -747,8 +747,8 @@ class ARTIQIRGenerator(algorithm.Visitor):
if types.is_builtin(context_expr_node.type, "sequential"):
self.visit(node.body)
return
elif types.is_builtin(context_expr_node.type, "parallel"):
parallel = self.append(ir.Parallel([]))
elif types.is_builtin(context_expr_node.type, "interleave"):
interleave = self.append(ir.Interleave([]))
heads, tails = [], []
for stmt in node.body:
@ -758,7 +758,7 @@ class ARTIQIRGenerator(algorithm.Visitor):
tails.append(self.current_block)
for head in heads:
parallel.add_destination(head)
interleave.add_destination(head)
self.current_block = self.add_block()
for tail in tails:

View File

@ -993,7 +993,7 @@ class Inferencer(algorithm.Visitor):
self.generic_visit(node)
typ = node.context_expr.type
if (types.is_builtin(typ, "parallel") or types.is_builtin(typ, "sequential") or
if (types.is_builtin(typ, "interleave") or types.is_builtin(typ, "sequential") or
(isinstance(node.context_expr, asttyped.CallT) and
types.is_builtin(node.context_expr.func.type, "watchdog"))):
# builtin context managers
@ -1092,7 +1092,7 @@ class Inferencer(algorithm.Visitor):
for item_node in node.items:
typ = item_node.context_expr.type.find()
if (types.is_builtin(typ, "parallel") or types.is_builtin(typ, "sequential")) and \
if (types.is_builtin(typ, "interleave") or types.is_builtin(typ, "sequential")) and \
len(node.items) != 1:
diag = diagnostic.Diagnostic("error",
"the '{kind}' context manager must be the only one in a 'with' statement",

View File

@ -66,7 +66,7 @@ class Interleaver:
postdom_tree = None
for insn in func.instructions():
if not isinstance(insn, ir.Parallel):
if not isinstance(insn, ir.Interleave):
continue
# Lazily compute dominators.
@ -79,7 +79,7 @@ class Interleaver:
source_times = [0 for _ in source_blocks]
if len(source_blocks) == 1:
# Immediate dominator for a parallel instruction with one successor
# Immediate dominator for a interleave instruction with one successor
# is the first instruction in the body of the statement which created
# it, but below we expect that it would be the first instruction after
# the statement itself.
@ -87,7 +87,7 @@ class Interleaver:
continue
interleave_until = postdom_tree.immediate_dominator(insn.basic_block)
assert interleave_until is not None # no nonlocal flow in `with parallel`
assert interleave_until is not None # no nonlocal flow in `with interleave`
assert interleave_until not in source_blocks
while len(source_blocks) > 0:
@ -111,7 +111,7 @@ class Interleaver:
assert target_time_delta >= 0
target_terminator = target_block.terminator()
if isinstance(target_terminator, ir.Parallel):
if isinstance(target_terminator, ir.Interleave):
target_terminator.replace_with(ir.Branch(source_block))
elif isinstance(target_terminator, (ir.Delay, ir.Branch)):
target_terminator.set_target(source_block)
@ -119,7 +119,7 @@ class Interleaver:
assert False
source_terminator = source_block.terminator()
if isinstance(source_terminator, ir.Parallel):
if isinstance(source_terminator, ir.Interleave):
source_terminator.replace_with(ir.Branch(source_terminator.target()))
elif isinstance(source_terminator, ir.Branch):
pass
@ -149,7 +149,7 @@ class Interleaver:
if old_decomp.static_target_function is None:
diag = diagnostic.Diagnostic("fatal",
"it is not possible to interleave this function call within "
"a 'with parallel:' statement because the compiler could not "
"a 'with interleave:' statement because the compiler could not "
"prove that the same function would always be called", {},
old_decomp.loc)
self.engine.process(diag)

View File

@ -219,7 +219,7 @@ class IODelayEstimator(algorithm.Visitor):
self.visit(node.items)
context_expr = node.items[0].context_expr
if len(node.items) == 1 and types.is_builtin(context_expr.type, "parallel"):
if len(node.items) == 1 and types.is_builtin(context_expr.type, "interleave"):
try:
delays = []
for stmt in node.body:
@ -235,7 +235,7 @@ class IODelayEstimator(algorithm.Visitor):
# since there's no chance that the code will never actually execute
# inside a `with` statement after all.
note = diagnostic.Diagnostic("note",
"while interleaving this 'with parallel:' statement", {},
"while interleaving this 'with interleave:' statement", {},
node.loc)
error.cause.notes += [note]
self.engine.process(error.cause)
@ -249,11 +249,11 @@ class IODelayEstimator(algorithm.Visitor):
if flow_stmt is not None:
note = diagnostic.Diagnostic("note",
"this '{kind}' statement transfers control out of "
"the 'with parallel:' statement",
"the 'with interleave:' statement",
{"kind": flow_stmt.keyword_loc.source()},
flow_stmt.loc)
diag = diagnostic.Diagnostic("error",
"cannot interleave this 'with parallel:' statement", {},
"cannot interleave this 'with interleave:' statement", {},
node.keyword_loc.join(node.colon_loc), notes=[note])
self.engine.process(diag)

View File

@ -13,7 +13,7 @@ __all__ = ["host_int", "int", "host_round", "round",
# global namespace for kernels
kernel_globals = (
"sequential", "parallel",
"sequential", "parallel", "interleave",
"delay_mu", "now_mu", "at_mu", "delay",
"seconds_to_mu", "mu_to_seconds",
"watchdog"
@ -264,7 +264,7 @@ _time_manager = _DummyTimeManager()
def set_time_manager(time_manager):
"""Set the time manager used for simulating kernels by running them
directly inside the Python interpreter. The time manager responds to the
entering and leaving of parallel/sequential blocks, delays, etc. and
entering and leaving of interleave/parallel/sequential blocks, delays, etc. and
provides a time-stamped logging facility for events.
"""
global _time_manager
@ -288,7 +288,7 @@ class _Parallel:
The execution time of a parallel block is the execution time of its longest
statement. A parallel block may contain sequential blocks, which themselves
may contain parallel blocks, etc.
may contain interleave blocks, etc.
"""
def __enter__(self):
_time_manager.enter_parallel()
@ -296,7 +296,7 @@ class _Parallel:
def __exit__(self, type, value, traceback):
_time_manager.exit()
parallel = _Parallel()
interleave = _Parallel() # no difference in semantics on host
def delay_mu(duration):
"""Increases the RTIO time by the given amount (in machine units)."""

View File

@ -12,7 +12,7 @@ class CreateTTLPulse(EnvExperiment):
def run(self):
self.ttl_inout.output()
delay_mu(100)
with parallel:
with interleave:
self.ttl_inout.gate_both_mu(1200)
with sequential:
delay_mu(100)

View File

@ -100,7 +100,7 @@ class _Pulses(EnvExperiment):
@kernel
def run(self):
for i in range(3):
with parallel:
with interleave:
with sequential:
self.a.pulse(100+i, 20*us)
self.b.pulse(200+i, 20*us)

View File

@ -16,7 +16,7 @@ class RTT(EnvExperiment):
def run(self):
self.ttl_inout.output()
delay(1*us)
with parallel:
with interleave:
# make sure not to send two commands into the same RTIO
# channel with the same timestamp
self.ttl_inout.gate_rising(5*us)
@ -37,7 +37,7 @@ class Loopback(EnvExperiment):
def run(self):
self.loop_in.input()
delay(1*us)
with parallel:
with interleave:
self.loop_in.gate_rising(2*us)
with sequential:
delay(1*us)
@ -57,7 +57,7 @@ class ClockGeneratorLoopback(EnvExperiment):
self.loop_clock_in.input()
self.loop_clock_out.stop()
delay(1*us)
with parallel:
with interleave:
self.loop_clock_in.gate_rising(10*us)
with sequential:
delay(200*ns)
@ -110,7 +110,7 @@ class LoopbackCount(EnvExperiment):
def run(self):
self.ttl_inout.output()
delay(5*us)
with parallel:
with interleave:
self.ttl_inout.gate_rising(10*us)
with sequential:
for i in range(self.npulses):

View File

@ -1,6 +1,6 @@
# RUN: %python -m artiq.compiler.testbench.inferencer +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
# CHECK-L: ${LINE:+1}: error: the 'parallel' context manager must be the only one in a 'with' statement
with parallel, sequential:
# CHECK-L: ${LINE:+1}: error: the 'interleave' context manager must be the only one in a 'with' statement
with interleave, sequential:
pass

View File

@ -2,4 +2,4 @@
# RUN: OutputCheck %s --file-to-check=%t
# CHECK-L: as x:NoneType
with parallel as x: pass
with interleave as x: pass

View File

@ -2,7 +2,7 @@
# RUN: OutputCheck %s --file-to-check=%t
def f():
with parallel:
with interleave:
if True:
print(1)
else:

View File

@ -10,7 +10,7 @@ def g():
x = f if True else g
def h():
with parallel:
with interleave:
f()
# CHECK-L: ${LINE:+1}: fatal: it is not possible to interleave this function call within a 'with parallel:' statement because the compiler could not prove that the same function would always be called
# CHECK-L: ${LINE:+1}: fatal: it is not possible to interleave this function call within a 'with interleave:' statement because the compiler could not prove that the same function would always be called
x()

View File

@ -2,16 +2,16 @@
# RUN: OutputCheck %s --file-to-check=%t
def f():
# CHECK-L: ${LINE:+1}: error: cannot interleave this 'with parallel:' statement
with parallel:
# CHECK-L: ${LINE:+1}: note: this 'return' statement transfers control out of the 'with parallel:' statement
# CHECK-L: ${LINE:+1}: error: cannot interleave this 'with interleave:' statement
with interleave:
# CHECK-L: ${LINE:+1}: note: this 'return' statement transfers control out of the 'with interleave:' statement
return
delay(1.0)
def g():
while True:
# CHECK-L: ${LINE:+1}: error: cannot interleave this 'with parallel:' statement
with parallel:
# CHECK-L: ${LINE:+1}: note: this 'break' statement transfers control out of the 'with parallel:' statement
# CHECK-L: ${LINE:+1}: error: cannot interleave this 'with interleave:' statement
with interleave:
# CHECK-L: ${LINE:+1}: note: this 'break' statement transfers control out of the 'with interleave:' statement
break
delay(1.0)

View File

@ -5,7 +5,7 @@ def f():
delay_mu(2)
def g():
with parallel:
with interleave:
with sequential:
print("A", now_mu())
f()

View File

@ -5,7 +5,7 @@ def f(n):
delay_mu(n)
def g():
with parallel:
with interleave:
with sequential:
print("A", now_mu())
f(2)

View File

@ -2,7 +2,7 @@
# RUN: OutputCheck %s --file-to-check=%t
def g():
with parallel:
with interleave:
with sequential:
print("A", now_mu())
delay_mu(2)

View File

@ -2,7 +2,7 @@
# RUN: OutputCheck %s --file-to-check=%t
def g():
with parallel:
with interleave:
with sequential:
print("A", now_mu())
delay_mu(3)

View File

@ -5,7 +5,7 @@ def f():
delay_mu(2)
def g():
with parallel:
with interleave:
f()
delay_mu(2)
print(now_mu())

View File

@ -1,7 +1,7 @@
# RUN: %python -m artiq.compiler.testbench.jit %s >%t
# RUN: OutputCheck %s --file-to-check=%t
with parallel:
with interleave:
for x in range(10):
delay_mu(1)
print("a", x)

View File

@ -2,7 +2,7 @@
# RUN: OutputCheck %s --file-to-check=%t
def f():
with parallel:
with interleave:
# CHECK-L: ${LINE:+1}: error: while statement cannot be interleaved
while True:
delay_mu(1)

View File

@ -3,13 +3,13 @@
# CHECK-L: f: (a:int(width=64), b:int(width=64))->NoneType delay(max(a, b) mu)
def f(a, b):
with parallel:
with interleave:
delay_mu(a)
delay_mu(b)
# CHECK-L: g: (a:int(width=64))->NoneType delay(max(a, 200) mu)
def g(a):
with parallel:
with interleave:
delay_mu(100)
delay_mu(200)
delay_mu(a)

View File

@ -1,6 +1,6 @@
# RUN: %python -m artiq.compiler.testbench.signature %s >%t
with parallel:
with interleave:
delay(1.0)
t0 = now_mu()
print(t0)

View File

@ -156,23 +156,23 @@ The core device records the real-time IO waveforms into a circular buffer. It is
Afterwards, the recorded data can be extracted and written to a VCD file using ``artiq_coreanalyzer -w rtio.vcd`` (see: :ref:`core-device-rtio-analyzer-tool`). VCD files can be viewed using third-party tools such as GtkWave.
Parallel and sequential blocks
------------------------------
Interleave and sequential blocks
--------------------------------
It is often necessary that several pulses overlap one another. This can be expressed through the use of ``with parallel`` constructs, in which all statements execute at the same time. The execution time of the ``parallel`` block is the execution time of its longest statement.
It is often necessary that several pulses overlap one another. This can be expressed through the use of ``with interleave`` constructs, in which all statements execute at the same time. The execution time of the ``interleave`` block is the execution time of its longest statement.
Try the following code and observe the generated pulses on a 2-channel oscilloscope or logic analyzer: ::
for i in range(1000000):
with parallel:
with interleave:
self.ttl0.pulse(2*us)
self.ttl1.pulse(4*us)
delay(4*us)
Within a parallel block, some statements can be made sequential again using a ``with sequential`` construct. Observe the pulses generated by this code: ::
Within a interleave block, some statements can be made sequential again using a ``with sequential`` construct. Observe the pulses generated by this code: ::
for i in range(1000000):
with parallel:
with interleave:
with sequential:
self.ttl0.pulse(2*us)
delay(1*us)
@ -181,4 +181,4 @@ Within a parallel block, some statements can be made sequential again using a ``
delay(4*us)
.. warning::
In its current implementation, ARTIQ only supports those pulse sequences that can be interleaved at compile time into a sequential series of on/off events. Combinations of ``parallel``/``sequential`` blocks that require multithreading (due to the parallel execution of long loops, complex algorithms, or algorithms that depend on external input) will cause the compiler to return an error.
In its current implementation, ARTIQ only supports those pulse sequences that can be interleaved at compile time into a sequential series of on/off events. Combinations of ``interleave``/``sequential`` blocks that require multithreading (due to the parallel execution of long loops, complex algorithms, or algorithms that depend on external input) will cause the compiler to return an error.

View File

@ -117,10 +117,10 @@ dds.pulse(200*MHz, 11*us) # exactly 1 ms after trigger
\footnotesize
\begin{minted}[frame=leftline]{python}
with sequential:
with parallel:
with interleave:
a.pulse(100*MHz, 10*us)
b.pulse(200*MHz, 20*us)
with parallel:
with interleave:
c.pulse(300*MHz, 30*us)
d.pulse(400*MHz, 20*us)
\end{minted}
@ -128,7 +128,7 @@ with sequential:
\begin{itemize}
\item Experiments are inherently parallel:
simultaneous laser pulses, parallel cooling of ions in different trap zones
\item \verb!parallel! and \verb!sequential! contexts with arbitrary nesting
\item \verb!interleave! and \verb!sequential! contexts with arbitrary nesting
\item \verb!a! and \verb!b! pulses both start at the same time
\item \verb!c! and \verb!d! pulses both start when \verb!a! and \verb!b! are both done
(after 20\,µs)
@ -181,7 +181,7 @@ class Experiment:
@kernel
def run(self):
with parallel:
with interleave:
self.ion1.cool(duration=10*us)
self.ion2.cool(frequency=...)
self.transporter.move(speed=...)
@ -222,7 +222,7 @@ class Experiment:
\item RPC and exception mappings are generated
\item Constants and small kernels are inlined
\item Small loops are unrolled
\item Statements in parallel blocks are interleaved
\item Statements in interleave blocks are interleaved
\item Time is converted to RTIO clock cycles
\item The Python AST is converted to LLVM IR
\item The LLVM IR is compiled to OpenRISC machine code

View File

@ -133,10 +133,10 @@ dds.pulse(200*MHz, 11*us) # exactly 1 ms after trigger
\footnotesize
\begin{minted}[frame=leftline]{python}
with sequential:
with parallel:
with interleave:
a.pulse(100*MHz, 10*us)
b.pulse(200*MHz, 20*us)
with parallel:
with interleave:
c.pulse(300*MHz, 30*us)
d.pulse(400*MHz, 20*us)
\end{minted}
@ -144,7 +144,7 @@ with sequential:
\begin{itemize}
\item Experiments are inherently parallel:
simultaneous laser pulses, parallel cooling of ions in different trap zones
\item \verb!parallel! and \verb!sequential! contexts with arbitrary nesting
\item \verb!interleave! and \verb!sequential! contexts with arbitrary nesting
\item \verb!a! and \verb!b! pulses both start at the same time
\item \verb!c! and \verb!d! pulses both start when \verb!a! and \verb!b! are both done
(after 20\,µs)
@ -197,7 +197,7 @@ class Experiment:
@kernel
def run(self):
with parallel:
with interleave:
self.ion1.cool(duration=10*us)
self.ion2.cool(frequency=...)
self.transporter.move(speed=...)
@ -238,7 +238,7 @@ class Experiment:
\item RPC and exception mappings are generated
\item Constants and small kernels are inlined
\item Small loops are unrolled
\item Statements in parallel blocks are interleaved
\item Statements in interleave blocks are interleaved
\item Time is converted to RTIO clock cycles
\item The Python AST is converted to LLVM IR
\item The LLVM IR is compiled to OpenRISC machine code

View File

@ -28,7 +28,7 @@ class PhotonHistogram(EnvExperiment):
@kernel
def cool_detect(self):
with parallel:
with interleave:
self.bd_sw.pulse(1*ms)
self.bdd_sw.pulse(1*ms)
@ -36,7 +36,7 @@ class PhotonHistogram(EnvExperiment):
self.bd_sw.pulse(100*us)
self.bd_dds.set(self.detect_f)
with parallel:
with interleave:
self.bd_sw.pulse(self.detect_t)
self.pmt.gate_rising(self.detect_t)

View File

@ -27,7 +27,7 @@ class DDSTest(EnvExperiment):
self.led.on()
else:
self.led.off()
with parallel:
with interleave:
with sequential:
self.dds0.set(100*MHz + 4*i*kHz)
self.ttl0.pulse(500*us)

View File

@ -62,7 +62,7 @@ class TDR(EnvExperiment):
@kernel
def one(self, t, p):
t0 = now_mu()
with parallel:
with interleave:
self.pmt0.gate_both_mu(2*p)
self.ttl2.pulse_mu(p)
for i in range(len(t)):

View File

@ -24,7 +24,7 @@ class AluminumSpectroscopy(EnvExperiment):
delay(10*us)
self.laser_cooling.pulse(100*MHz, 100*us)
delay(5*us)
with parallel:
with interleave:
self.spectroscopy.pulse(self.spectroscopy_freq, 100*us)
with sequential:
delay(50*us)
@ -32,7 +32,7 @@ class AluminumSpectroscopy(EnvExperiment):
delay(5*us)
while True:
delay(5*us)
with parallel:
with interleave:
self.state_detection.pulse(100*MHz, 10*us)
photon_count = self.pmt.count_gate(10*us)
if (photon_count < self.photon_limit_low

View File

@ -11,7 +11,7 @@ class SimpleSimulation(EnvExperiment):
@kernel
def run(self):
with parallel:
with interleave:
with sequential:
self.a.pulse(100*MHz, 20*us)
self.b.pulse(200*MHz, 20*us)