forked from M-Labs/artiq
1
0
Fork 0

remove old compiler, add nac3 dependency (WIP)

This commit is contained in:
Sebastien Bourdeauducq 2021-10-08 00:30:27 +08:00
parent 59065c4663
commit 97909d7619
256 changed files with 41 additions and 17804 deletions

View File

@ -1,2 +0,0 @@
from .inline import inline
from .unroll import unroll

View File

@ -1,79 +0,0 @@
"""
:func:`inline` inlines a call instruction in ARTIQ IR.
The call instruction must have a statically known callee,
it must be second to last in the basic block, and the basic
block must have exactly one successor.
"""
from .. import types, builtins, iodelay, ir
def inline(call_insn):
assert isinstance(call_insn, ir.Call)
assert call_insn.static_target_function is not None
assert len(call_insn.basic_block.successors()) == 1
assert call_insn.basic_block.index(call_insn) == \
len(call_insn.basic_block.instructions) - 2
value_map = {}
source_function = call_insn.static_target_function
target_function = call_insn.basic_block.function
target_predecessor = call_insn.basic_block
target_successor = call_insn.basic_block.successors()[0]
if builtins.is_none(source_function.type.ret):
target_return_phi = None
else:
target_return_phi = target_successor.prepend(ir.Phi(source_function.type.ret))
closure = target_predecessor.insert(ir.GetAttr(call_insn.target_function(), '__closure__'),
before=call_insn)
for actual_arg, formal_arg in zip([closure] + call_insn.arguments(),
source_function.arguments):
value_map[formal_arg] = actual_arg
for source_block in source_function.basic_blocks:
target_block = ir.BasicBlock([], "i." + source_block.name)
target_function.add(target_block)
value_map[source_block] = target_block
def mapper(value):
if isinstance(value, ir.Constant):
return value
else:
return value_map[value]
for source_insn in source_function.instructions():
target_block = value_map[source_insn.basic_block]
if isinstance(source_insn, ir.Return):
if target_return_phi is not None:
target_return_phi.add_incoming(mapper(source_insn.value()), target_block)
target_insn = ir.Branch(target_successor)
elif isinstance(source_insn, ir.Phi):
target_insn = ir.Phi()
elif isinstance(source_insn, ir.Delay):
target_insn = source_insn.copy(mapper)
target_insn.interval = source_insn.interval.fold(call_insn.arg_exprs)
elif isinstance(source_insn, ir.Loop):
target_insn = source_insn.copy(mapper)
target_insn.trip_count = source_insn.trip_count.fold(call_insn.arg_exprs)
elif isinstance(source_insn, ir.Call):
target_insn = source_insn.copy(mapper)
target_insn.arg_exprs = \
{ arg: source_insn.arg_exprs[arg].fold(call_insn.arg_exprs)
for arg in source_insn.arg_exprs }
else:
target_insn = source_insn.copy(mapper)
target_insn.name = "i." + source_insn.name
value_map[source_insn] = target_insn
target_block.append(target_insn)
for source_insn in source_function.instructions():
if isinstance(source_insn, ir.Phi):
target_insn = value_map[source_insn]
for block, value in source_insn.incoming():
target_insn.add_incoming(value_map[value], value_map[block])
target_predecessor.terminator().replace_with(ir.Branch(value_map[source_function.entry()]))
if target_return_phi is not None:
call_insn.replace_all_uses_with(target_return_phi)
call_insn.erase()

View File

@ -1,97 +0,0 @@
"""
:func:`unroll` unrolls a loop instruction in ARTIQ IR.
The loop's trip count must be constant.
The loop body must not have any control flow instructions
except for one branch back to the loop head.
The loop body must be executed if the condition to which
the instruction refers is true.
"""
from .. import types, builtins, iodelay, ir
from ..analyses import domination
def _get_body_blocks(root, limit):
postorder = []
visited = set()
def visit(block):
visited.add(block)
for next_block in block.successors():
if next_block not in visited and next_block is not limit:
visit(next_block)
postorder.append(block)
visit(root)
postorder.reverse()
return postorder
def unroll(loop_insn):
loop_head = loop_insn.basic_block
function = loop_head.function
assert isinstance(loop_insn, ir.Loop)
assert len(loop_head.predecessors()) == 2
assert len(loop_insn.if_false().predecessors()) == 1
assert iodelay.is_const(loop_insn.trip_count)
trip_count = loop_insn.trip_count.fold().value
if trip_count == 0:
loop_insn.replace_with(ir.Branch(loop_insn.if_false()))
return
source_blocks = _get_body_blocks(loop_insn.if_true(), loop_head)
source_indvar = loop_insn.induction_variable()
source_tail = loop_insn.if_false()
unroll_target = loop_head
for n in range(trip_count):
value_map = {source_indvar: ir.Constant(n, source_indvar.type)}
for source_block in source_blocks:
target_block = ir.BasicBlock([], "u{}.{}".format(n, source_block.name))
function.add(target_block)
value_map[source_block] = target_block
def mapper(value):
if isinstance(value, ir.Constant):
return value
elif value in value_map:
return value_map[value]
else:
return value
for source_block in source_blocks:
target_block = value_map[source_block]
for source_insn in source_block.instructions:
if isinstance(source_insn, ir.Phi):
target_insn = ir.Phi()
else:
target_insn = source_insn.copy(mapper)
target_insn.name = "u{}.{}".format(n, source_insn.name)
target_block.append(target_insn)
value_map[source_insn] = target_insn
for source_block in source_blocks:
for source_insn in source_block.instructions:
if isinstance(source_insn, ir.Phi):
target_insn = value_map[source_insn]
for block, value in source_insn.incoming():
target_insn.add_incoming(value_map[value], value_map[block])
assert isinstance(unroll_target.terminator(), (ir.Branch, ir.Loop))
unroll_target.terminator().replace_with(ir.Branch(value_map[source_blocks[0]]))
unroll_target = value_map[source_blocks[-1]]
assert isinstance(unroll_target.terminator(), ir.Branch)
assert len(source_blocks[-1].successors()) == 1
unroll_target.terminator().replace_with(ir.Branch(source_tail))
for source_block in reversed(source_blocks):
for source_insn in reversed(source_block.instructions):
for use in set(source_insn.uses):
if isinstance(use, ir.Phi):
assert use.basic_block == loop_head
use.remove_incoming_value(source_insn)
source_insn.erase()
for source_block in reversed(source_blocks):
source_block.erase()

View File

@ -1,3 +0,0 @@
from .domination import DominatorTree
from .devirtualization import Devirtualization
from .invariant_detection import InvariantDetection

View File

@ -1,119 +0,0 @@
"""
:class:`Devirtualizer` performs method resolution at
compile time.
Devirtualization is implemented using a lattice
with three states: unknown assigned once diverges.
The lattice is computed individually for every
variable in scope as well as every
(instance type, field name) pair.
"""
from pythonparser import algorithm
from .. import asttyped, ir, types
def _advance(target_map, key, value):
if key not in target_map:
target_map[key] = value # unknown → assigned once
else:
target_map[key] = None # assigned once → diverges
class FunctionResolver(algorithm.Visitor):
def __init__(self, variable_map):
self.variable_map = variable_map
self.scope_map = dict()
self.queue = []
self.in_assign = False
self.current_scopes = []
def finalize(self):
for thunk in self.queue:
thunk()
def visit_scope(self, node):
self.current_scopes.append(node)
self.generic_visit(node)
self.current_scopes.pop()
def visit_in_assign(self, node):
self.in_assign = True
self.visit(node)
self.in_assign = False
def visit_Assign(self, node):
self.visit(node.value)
self.visit_in_assign(node.targets)
def visit_ForT(self, node):
self.visit(node.iter)
self.visit_in_assign(node.target)
self.visit(node.body)
self.visit(node.orelse)
def visit_withitemT(self, node):
self.visit(node.context_expr)
self.visit_in_assign(node.optional_vars)
def visit_comprehension(self, node):
self.visit(node.iter)
self.visit_in_assign(node.target)
self.visit(node.ifs)
def visit_ModuleT(self, node):
self.visit_scope(node)
def visit_FunctionDefT(self, node):
_advance(self.scope_map, (self.current_scopes[-1], node.name), node)
self.visit_scope(node)
def visit_NameT(self, node):
if self.in_assign:
# Just give up if we assign anything at all to a variable, and
# assume it diverges.
_advance(self.scope_map, (self.current_scopes[-1], node.id), None)
else:
# Look up the final value in scope_map and copy it into variable_map.
keys = [(scope, node.id) for scope in reversed(self.current_scopes)]
def thunk():
for key in keys:
if key in self.scope_map:
self.variable_map[node] = self.scope_map[key]
return
self.queue.append(thunk)
class MethodResolver(algorithm.Visitor):
def __init__(self, variable_map, method_map):
self.variable_map = variable_map
self.method_map = method_map
# embedding.Stitcher.finalize generates initialization statements
# of form "constructor.meth = meth_body".
def visit_Assign(self, node):
if node.value not in self.variable_map:
return
value = self.variable_map[node.value]
for target in node.targets:
if isinstance(target, asttyped.AttributeT):
if types.is_constructor(target.value.type):
instance_type = target.value.type.instance
elif types.is_instance(target.value.type):
instance_type = target.value.type
else:
continue
_advance(self.method_map, (instance_type, target.attr), value)
class Devirtualization:
def __init__(self):
self.variable_map = dict()
self.method_map = dict()
def visit(self, node):
function_resolver = FunctionResolver(self.variable_map)
function_resolver.visit(node)
function_resolver.finalize()
method_resolver = MethodResolver(self.variable_map, self.method_map)
method_resolver.visit(node)

View File

@ -1,144 +0,0 @@
"""
:class:`DominatorTree` computes the dominance relation over
control flow graphs.
See http://www.cs.rice.edu/~keith/EMBED/dom.pdf.
"""
class GenericDominatorTree:
def __init__(self):
self._assign_names()
self._compute()
def _traverse_in_postorder(self):
raise NotImplementedError
def _prev_block_names(self, block):
raise NotImplementedError
def _assign_names(self):
postorder = self._traverse_in_postorder()
self._start_name = len(postorder) - 1
self._block_of_name = postorder
self._name_of_block = {}
for block_name, block in enumerate(postorder):
self._name_of_block[block] = block_name
def _intersect(self, block_name_1, block_name_2):
finger_1, finger_2 = block_name_1, block_name_2
while finger_1 != finger_2:
while finger_1 < finger_2:
finger_1 = self._doms[finger_1]
while finger_2 < finger_1:
finger_2 = self._doms[finger_2]
return finger_1
def _compute(self):
self._doms = {}
# Start block dominates itself.
self._doms[self._start_name] = self._start_name
# We don't yet know what blocks dominate all other blocks.
for block_name in range(self._start_name):
self._doms[block_name] = None
changed = True
while changed:
changed = False
# For all blocks except start block, in reverse postorder...
for block_name in reversed(range(self._start_name)):
# Select a new immediate dominator from the blocks we have
# already processed, and remember all others.
# We've already processed at least one previous block because
# of the graph traverse order.
new_idom, prev_block_names = None, []
for prev_block_name in self._prev_block_names(block_name):
if new_idom is None and self._doms[prev_block_name] is not None:
new_idom = prev_block_name
else:
prev_block_names.append(prev_block_name)
# Find a common previous block
for prev_block_name in prev_block_names:
if self._doms[prev_block_name] is not None:
new_idom = self._intersect(prev_block_name, new_idom)
if self._doms[block_name] != new_idom:
self._doms[block_name] = new_idom
changed = True
def immediate_dominator(self, block):
return self._block_of_name[self._doms[self._name_of_block[block]]]
def dominators(self, block):
# Blocks that are statically unreachable from entry are considered
# dominated by every other block.
if block not in self._name_of_block:
yield from self._block_of_name
return
block_name = self._name_of_block[block]
yield self._block_of_name[block_name]
while block_name != self._doms[block_name]:
block_name = self._doms[block_name]
yield self._block_of_name[block_name]
class DominatorTree(GenericDominatorTree):
def __init__(self, function):
self.function = function
super().__init__()
def _traverse_in_postorder(self):
postorder = []
visited = set()
def visit(block):
visited.add(block)
for next_block in block.successors():
if next_block not in visited:
visit(next_block)
postorder.append(block)
visit(self.function.entry())
return postorder
def _prev_block_names(self, block_name):
for block in self._block_of_name[block_name].predecessors():
# Only return predecessors that are statically reachable from entry.
if block in self._name_of_block:
yield self._name_of_block[block]
class PostDominatorTree(GenericDominatorTree):
def __init__(self, function):
self.function = function
super().__init__()
def _traverse_in_postorder(self):
postorder = []
visited = set()
def visit(block):
visited.add(block)
for next_block in block.predecessors():
if next_block not in visited:
visit(next_block)
postorder.append(block)
for block in self.function.basic_blocks:
if not any(block.successors()):
visit(block)
postorder.append(None) # virtual exit block
return postorder
def _prev_block_names(self, block_name):
succ_blocks = self._block_of_name[block_name].successors()
if len(succ_blocks) > 0:
for block in succ_blocks:
yield self._name_of_block[block]
else:
yield self._start_name

View File

@ -1,49 +0,0 @@
"""
:class:`InvariantDetection` determines which attributes can be safely
marked kernel invariant.
"""
from pythonparser import diagnostic
from .. import ir, types
class InvariantDetection:
def __init__(self, engine):
self.engine = engine
def process(self, functions):
self.attr_locs = dict()
self.attr_written = set()
for func in functions:
self.process_function(func)
for key in self.attr_locs:
if key not in self.attr_written:
typ, attr = key
if attr in typ.constant_attributes:
continue
diag = diagnostic.Diagnostic("note",
"attribute '{attr}' of type '{type}' is never written to; " +
"it could be marked as kernel invariant to potentially increase performance",
{"attr": attr,
"type": typ.name},
self.attr_locs[key])
self.engine.process(diag)
def process_function(self, func):
for block in func.basic_blocks:
for insn in block.instructions:
if not isinstance(insn, (ir.GetAttr, ir.SetAttr)):
continue
if not types.is_instance(insn.object().type):
continue
key = (insn.object().type, insn.attr)
if isinstance(insn, ir.GetAttr):
if types.is_method(insn.type):
continue
if key not in self.attr_locs and insn.loc is not None:
self.attr_locs[key] = insn.loc
elif isinstance(insn, ir.SetAttr):
self.attr_written.add(key)

View File

@ -1,113 +0,0 @@
"""
The typedtree module exports the PythonParser AST enriched with
typing information.
"""
from pythonparser import ast
class commontyped(ast.commonloc):
"""A mixin for typed AST nodes."""
_types = ("type",)
def _reprfields(self):
return self._fields + self._locs + self._types
class scoped(object):
"""
:ivar typing_env: (dict with string keys and :class:`.types.Type` values)
map of variable names to variable types
:ivar globals_in_scope: (set of string keys)
set of variables resolved as globals
"""
# Typed versions of untyped nodes
class argT(ast.arg, commontyped):
pass
class ClassDefT(ast.ClassDef):
_types = ("constructor_type",)
class FunctionDefT(ast.FunctionDef, scoped):
_types = ("signature_type",)
class QuotedFunctionDefT(FunctionDefT):
"""
:ivar flags: (set of str) Code generation flags (see :class:`ir.Function`).
"""
class ModuleT(ast.Module, scoped):
pass
class ExceptHandlerT(ast.ExceptHandler):
_fields = ("filter", "name", "body") # rename ast.ExceptHandler.type to filter
_types = ("name_type",)
class ForT(ast.For):
"""
:ivar trip_count: (:class:`iodelay.Expr`)
:ivar trip_interval: (:class:`iodelay.Expr`)
"""
class withitemT(ast.withitem):
_types = ("enter_type", "exit_type")
class SliceT(ast.Slice, commontyped):
pass
class AttributeT(ast.Attribute, commontyped):
pass
class BinOpT(ast.BinOp, commontyped):
pass
class BoolOpT(ast.BoolOp, commontyped):
pass
class CallT(ast.Call, commontyped):
"""
:ivar iodelay: (:class:`iodelay.Expr`)
:ivar arg_exprs: (dict of str to :class:`iodelay.Expr`)
"""
class CompareT(ast.Compare, commontyped):
pass
class DictT(ast.Dict, commontyped):
pass
class DictCompT(ast.DictComp, commontyped, scoped):
pass
class EllipsisT(ast.Ellipsis, commontyped):
pass
class GeneratorExpT(ast.GeneratorExp, commontyped, scoped):
pass
class IfExpT(ast.IfExp, commontyped):
pass
class LambdaT(ast.Lambda, commontyped, scoped):
pass
class ListT(ast.List, commontyped):
pass
class ListCompT(ast.ListComp, commontyped, scoped):
pass
class NameT(ast.Name, commontyped):
pass
class NameConstantT(ast.NameConstant, commontyped):
pass
class NumT(ast.Num, commontyped):
pass
class SetT(ast.Set, commontyped):
pass
class SetCompT(ast.SetComp, commontyped, scoped):
pass
class StrT(ast.Str, commontyped):
pass
class StarredT(ast.Starred, commontyped):
pass
class SubscriptT(ast.Subscript, commontyped):
pass
class TupleT(ast.Tuple, commontyped):
pass
class UnaryOpT(ast.UnaryOp, commontyped):
pass
class YieldT(ast.Yield, commontyped):
pass
class YieldFromT(ast.YieldFrom, commontyped):
pass
# Novel typed nodes
class CoerceT(ast.expr, commontyped):
_fields = ('value',) # other_value deliberately not in _fields
class QuoteT(ast.expr, commontyped):
_fields = ('value',)

View File

@ -1,339 +0,0 @@
"""
The :mod:`builtins` module contains the builtin Python
and ARTIQ types, such as int or float.
"""
from collections import OrderedDict
from . import types
# Types
class TNone(types.TMono):
def __init__(self):
super().__init__("NoneType")
class TBool(types.TMono):
def __init__(self):
super().__init__("bool")
@staticmethod
def zero():
return False
@staticmethod
def one():
return True
class TInt(types.TMono):
def __init__(self, width=None):
if width is None:
width = types.TVar()
super().__init__("int", {"width": width})
@staticmethod
def zero():
return 0
@staticmethod
def one():
return 1
def TInt32():
return TInt(types.TValue(32))
def TInt64():
return TInt(types.TValue(64))
def _int_printer(typ, printer, depth, max_depth):
if types.is_var(typ["width"]):
return "numpy.int?"
else:
return "numpy.int{}".format(types.get_value(typ.find()["width"]))
types.TypePrinter.custom_printers["int"] = _int_printer
class TFloat(types.TMono):
def __init__(self):
super().__init__("float")
@staticmethod
def zero():
return 0.0
@staticmethod
def one():
return 1.0
class TStr(types.TMono):
def __init__(self):
super().__init__("str")
class TBytes(types.TMono):
def __init__(self):
super().__init__("bytes")
class TByteArray(types.TMono):
def __init__(self):
super().__init__("bytearray")
class TList(types.TMono):
def __init__(self, elt=None):
if elt is None:
elt = types.TVar()
super().__init__("list", {"elt": elt})
class TArray(types.TMono):
def __init__(self, elt=None, num_dims=1):
if elt is None:
elt = types.TVar()
if isinstance(num_dims, int):
# Make TArray more convenient to instantiate from (ARTIQ) user code.
num_dims = types.TValue(num_dims)
# For now, enforce number of dimensions to be known, as we'd otherwise
# need to implement custom unification logic for the type of `shape`.
# Default to 1 to keep compatibility with old user code from before
# multidimensional array support.
assert isinstance(num_dims.value, int), "Number of dimensions must be resolved"
super().__init__("array", {"elt": elt, "num_dims": num_dims})
self.attributes = OrderedDict([
("buffer", types._TPointer(elt)),
("shape", types.TTuple([TInt32()] * num_dims.value)),
])
def _array_printer(typ, printer, depth, max_depth):
return "numpy.array(elt={}, num_dims={})".format(
printer.name(typ["elt"], depth, max_depth), typ["num_dims"].value)
types.TypePrinter.custom_printers["array"] = _array_printer
class TRange(types.TMono):
def __init__(self, elt=None):
if elt is None:
elt = types.TVar()
super().__init__("range", {"elt": elt})
self.attributes = OrderedDict([
("start", elt),
("stop", elt),
("step", elt),
])
class TException(types.TMono):
# All exceptions share the same internal layout:
# * Pointer to the unique global with the name of the exception (str)
# (which also serves as the EHABI type_info).
# * File, line and column where it was raised (str, int, int).
# * Message, which can contain substitutions {0}, {1} and {2} (str).
# * Three 64-bit integers, parameterizing the message (numpy.int64).
# Keep this in sync with the function ARTIQIRGenerator.alloc_exn.
attributes = OrderedDict([
("__name__", TStr()),
("__file__", TStr()),
("__line__", TInt32()),
("__col__", TInt32()),
("__func__", TStr()),
("__message__", TStr()),
("__param0__", TInt64()),
("__param1__", TInt64()),
("__param2__", TInt64()),
])
def __init__(self, name="Exception", id=0):
super().__init__(name)
self.id = id
def fn_bool():
return types.TConstructor(TBool())
def fn_int():
return types.TConstructor(TInt())
def fn_int32():
return types.TBuiltinFunction("int32")
def fn_int64():
return types.TBuiltinFunction("int64")
def fn_float():
return types.TConstructor(TFloat())
def fn_str():
return types.TConstructor(TStr())
def fn_bytes():
return types.TConstructor(TBytes())
def fn_bytearray():
return types.TConstructor(TByteArray())
def fn_list():
return types.TConstructor(TList())
def fn_array():
return types.TConstructor(TArray())
def fn_Exception():
return types.TExceptionConstructor(TException("Exception"))
def fn_IndexError():
return types.TExceptionConstructor(TException("IndexError"))
def fn_ValueError():
return types.TExceptionConstructor(TException("ValueError"))
def fn_ZeroDivisionError():
return types.TExceptionConstructor(TException("ZeroDivisionError"))
def fn_RuntimeError():
return types.TExceptionConstructor(TException("RuntimeError"))
def fn_range():
return types.TBuiltinFunction("range")
def fn_len():
return types.TBuiltinFunction("len")
def fn_round():
return types.TBuiltinFunction("round")
def fn_abs():
return types.TBuiltinFunction("abs")
def fn_min():
return types.TBuiltinFunction("min")
def fn_max():
return types.TBuiltinFunction("max")
def fn_make_array():
return types.TBuiltinFunction("make_array")
def fn_print():
return types.TBuiltinFunction("print")
def fn_kernel():
return types.TBuiltinFunction("kernel")
def obj_parallel():
return types.TBuiltin("parallel")
def obj_interleave():
return types.TBuiltin("interleave")
def obj_sequential():
return types.TBuiltin("sequential")
def fn_delay():
return types.TBuiltinFunction("delay")
def fn_now_mu():
return types.TBuiltinFunction("now_mu")
def fn_delay_mu():
return types.TBuiltinFunction("delay_mu")
def fn_at_mu():
return types.TBuiltinFunction("at_mu")
def fn_rtio_log():
return types.TBuiltinFunction("rtio_log")
# Accessors
def is_none(typ):
return types.is_mono(typ, "NoneType")
def is_bool(typ):
return types.is_mono(typ, "bool")
def is_int(typ, width=None):
if width is not None:
return types.is_mono(typ, "int", width=width)
else:
return types.is_mono(typ, "int")
def is_int32(typ):
return is_int(typ, types.TValue(32))
def is_int64(typ):
return is_int(typ, types.TValue(64))
def get_int_width(typ):
if is_int(typ):
return types.get_value(typ.find()["width"])
def is_float(typ):
return types.is_mono(typ, "float")
def is_str(typ):
return types.is_mono(typ, "str")
def is_bytes(typ):
return types.is_mono(typ, "bytes")
def is_bytearray(typ):
return types.is_mono(typ, "bytearray")
def is_numeric(typ):
typ = typ.find()
return isinstance(typ, types.TMono) and \
typ.name in ('int', 'float')
def is_list(typ, elt=None):
if elt is not None:
return types.is_mono(typ, "list", elt=elt)
else:
return types.is_mono(typ, "list")
def is_array(typ, elt=None):
if elt is not None:
return types.is_mono(typ, "array", elt=elt)
else:
return types.is_mono(typ, "array")
def is_listish(typ, elt=None):
if is_list(typ, elt) or is_array(typ, elt):
return True
elif elt is None:
return is_str(typ) or is_bytes(typ) or is_bytearray(typ)
else:
return False
def is_range(typ, elt=None):
if elt is not None:
return types.is_mono(typ, "range", {"elt": elt})
else:
return types.is_mono(typ, "range")
def is_exception(typ, name=None):
if name is None:
return isinstance(typ.find(), TException)
else:
return isinstance(typ.find(), TException) and \
typ.name == name
def is_iterable(typ):
return is_listish(typ) or is_range(typ)
def get_iterable_elt(typ):
# TODO: Arrays count as listish, but this returns the innermost element type for
# n-dimensional arrays, rather than the n-1 dimensional result of iterating over
# the first axis, which makes the name a bit misleading.
if is_str(typ) or is_bytes(typ) or is_bytearray(typ):
return TInt(types.TValue(8))
elif types._is_pointer(typ) or is_iterable(typ):
return typ.find()["elt"].find()
else:
assert False
def is_collection(typ):
typ = typ.find()
return isinstance(typ, types.TTuple) or \
types.is_mono(typ, "list")
def is_allocated(typ):
return not (is_none(typ) or is_bool(typ) or is_int(typ) or
is_float(typ) or is_range(typ) or
types._is_pointer(typ) or types.is_function(typ) or
types.is_external_function(typ) or types.is_rpc(typ) or
types.is_method(typ) or types.is_tuple(typ) or
types.is_value(typ))

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
import sys
import builtins
import linecache
import tokenize
import logging
import importlib.machinery as im
from artiq.experiment import kernel, portable
__all__ = ["install_hook"]
logger = logging.getLogger(__name__)
cache = dict()
im_exec_module = None
linecache_getlines = None
def hook_exec_module(self, module):
im_exec_module(self, module)
if (hasattr(module, "__file__")
# Heuristic to determine if the module may contain ARTIQ kernels.
# This breaks if kernel is not imported the usual way.
and ((getattr(module, "kernel", None) is kernel)
or (getattr(module, "portable", None) is portable))):
fn = module.__file__
try:
with tokenize.open(fn) as fp:
lines = fp.readlines()
if lines and not lines[-1].endswith("\n"):
lines[-1] += "\n"
cache[fn] = lines
except:
logger.warning("failed to add '%s' to cache", fn, exc_info=True)
else:
logger.debug("added '%s' to cache", fn)
def hook_getlines(filename, module_globals=None):
if filename in cache:
return cache[filename]
else:
return linecache_getlines(filename, module_globals)
def install_hook():
global im_exec_module, linecache_getlines
im_exec_module = im.SourceFileLoader.exec_module
im.SourceFileLoader.exec_module = hook_exec_module
linecache_getlines = linecache.getlines
linecache.getlines = hook_getlines
logger.debug("hook installed")

View File

@ -1,249 +0,0 @@
"""
The :mod:`iodelay` module contains the classes describing
the statically inferred RTIO delay arising from executing
a function.
"""
from functools import reduce
class Expr:
def __add__(lhs, rhs):
assert isinstance(rhs, Expr)
return Add(lhs, rhs)
__iadd__ = __add__
def __sub__(lhs, rhs):
assert isinstance(rhs, Expr)
return Sub(lhs, rhs)
__isub__ = __sub__
def __mul__(lhs, rhs):
assert isinstance(rhs, Expr)
return Mul(lhs, rhs)
__imul__ = __mul__
def __truediv__(lhs, rhs):
assert isinstance(rhs, Expr)
return TrueDiv(lhs, rhs)
__itruediv__ = __truediv__
def __floordiv__(lhs, rhs):
assert isinstance(rhs, Expr)
return FloorDiv(lhs, rhs)
__ifloordiv__ = __floordiv__
def __ne__(lhs, rhs):
return not (lhs == rhs)
def free_vars(self):
return set()
def fold(self, vars=None):
return self
class Const(Expr):
_priority = 1
def __init__(self, value):
assert isinstance(value, (int, float))
self.value = value
def __str__(self):
return str(self.value)
def __eq__(lhs, rhs):
return rhs.__class__ == lhs.__class__ and lhs.value == rhs.value
def eval(self, env):
return self.value
class Var(Expr):
_priority = 1
def __init__(self, name):
assert isinstance(name, str)
self.name = name
def __str__(self):
return self.name
def __eq__(lhs, rhs):
return rhs.__class__ == lhs.__class__ and lhs.name == rhs.name
def free_vars(self):
return {self.name}
def fold(self, vars=None):
if vars is not None and self.name in vars:
return vars[self.name]
else:
return self
class Conv(Expr):
_priority = 1
def __init__(self, operand, ref_period):
assert isinstance(operand, Expr)
assert isinstance(ref_period, float)
self.operand, self.ref_period = operand, ref_period
def __eq__(lhs, rhs):
return rhs.__class__ == lhs.__class__ and \
lhs.ref_period == rhs.ref_period and \
lhs.operand == rhs.operand
def free_vars(self):
return self.operand.free_vars()
class MUToS(Conv):
def __str__(self):
return "mu->s({})".format(self.operand)
def eval(self, env):
return self.operand.eval(env) * self.ref_period
def fold(self, vars=None):
operand = self.operand.fold(vars)
if isinstance(operand, Const):
return Const(operand.value * self.ref_period)
else:
return MUToS(operand, ref_period=self.ref_period)
class SToMU(Conv):
def __str__(self):
return "s->mu({})".format(self.operand)
def eval(self, env):
return int(self.operand.eval(env) / self.ref_period)
def fold(self, vars=None):
operand = self.operand.fold(vars)
if isinstance(operand, Const):
return Const(int(operand.value / self.ref_period))
else:
return SToMU(operand, ref_period=self.ref_period)
class BinOp(Expr):
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
def __str__(self):
lhs = "({})".format(self.lhs) if self.lhs._priority > self._priority else str(self.lhs)
rhs = "({})".format(self.rhs) if self.rhs._priority > self._priority else str(self.rhs)
return "{} {} {}".format(lhs, self._symbol, rhs)
def __eq__(lhs, rhs):
return rhs.__class__ == lhs.__class__ and lhs.lhs == rhs.lhs and lhs.rhs == rhs.rhs
def eval(self, env):
return self.__class__._op(self.lhs.eval(env), self.rhs.eval(env))
def free_vars(self):
return self.lhs.free_vars() | self.rhs.free_vars()
def _fold_binop(self, lhs, rhs):
if isinstance(lhs, Const) and lhs.__class__ == rhs.__class__:
return Const(self.__class__._op(lhs.value, rhs.value))
elif isinstance(lhs, (MUToS, SToMU)) and lhs.__class__ == rhs.__class__:
return lhs.__class__(self.__class__(lhs.operand, rhs.operand),
ref_period=lhs.ref_period).fold()
else:
return self.__class__(lhs, rhs)
def fold(self, vars=None):
return self._fold_binop(self.lhs.fold(vars), self.rhs.fold(vars))
class BinOpFixpoint(BinOp):
def _fold_binop(self, lhs, rhs):
if isinstance(lhs, Const) and lhs.value == self._fixpoint:
return rhs
elif isinstance(rhs, Const) and rhs.value == self._fixpoint:
return lhs
else:
return super()._fold_binop(lhs, rhs)
class Add(BinOpFixpoint):
_priority = 2
_symbol = "+"
_op = lambda a, b: a + b
_fixpoint = 0
class Mul(BinOpFixpoint):
_priority = 1
_symbol = "*"
_op = lambda a, b: a * b
_fixpoint = 1
class Sub(BinOp):
_priority = 2
_symbol = "-"
_op = lambda a, b: a - b
def _fold_binop(self, lhs, rhs):
if isinstance(rhs, Const) and rhs.value == 0:
return lhs
else:
return super()._fold_binop(lhs, rhs)
class Div(BinOp):
def _fold_binop(self, lhs, rhs):
if isinstance(rhs, Const) and rhs.value == 1:
return lhs
else:
return super()._fold_binop(lhs, rhs)
class TrueDiv(Div):
_priority = 1
_symbol = "/"
_op = lambda a, b: a / b if b != 0 else 0
class FloorDiv(Div):
_priority = 1
_symbol = "//"
_op = lambda a, b: a // b if b != 0 else 0
class Max(Expr):
_priority = 1
def __init__(self, operands):
assert isinstance(operands, list)
assert all([isinstance(operand, Expr) for operand in operands])
assert operands != []
self.operands = operands
def __str__(self):
return "max({})".format(", ".join([str(operand) for operand in self.operands]))
def __eq__(lhs, rhs):
return rhs.__class__ == lhs.__class__ and lhs.operands == rhs.operands
def free_vars(self):
return reduce(lambda a, b: a | b, [operand.free_vars() for operand in self.operands])
def eval(self, env):
return max([operand.eval() for operand in self.operands])
def fold(self, vars=None):
consts, exprs = [], []
for operand in self.operands:
operand = operand.fold(vars)
if isinstance(operand, Const):
consts.append(operand.value)
elif operand not in exprs:
exprs.append(operand)
if len(consts) > 0:
exprs.append(Const(max(consts)))
if len(exprs) == 1:
return exprs[0]
else:
return Max(exprs)
def is_const(expr, value=None):
expr = expr.fold()
if value is None:
return isinstance(expr, Const)
else:
return isinstance(expr, Const) and expr.value == value
def is_zero(expr):
return is_const(expr, 0)

File diff suppressed because it is too large Load Diff

View File

@ -1,53 +0,0 @@
/* Force ld to make the ELF header as loadable. */
PHDRS
{
headers PT_LOAD FILEHDR PHDRS ;
text PT_LOAD ;
data PT_LOAD ;
dynamic PT_DYNAMIC ;
eh_frame PT_GNU_EH_FRAME ;
}
SECTIONS
{
/* Push back .text section enough so that ld.lld not complain */
. = SIZEOF_HEADERS;
.text :
{
*(.text .text.*)
} : text
.rodata :
{
*(.rodata .rodata.*)
}
.eh_frame :
{
KEEP(*(.eh_frame))
} : text
.eh_frame_hdr :
{
KEEP(*(.eh_frame_hdr))
} : text : eh_frame
.data :
{
*(.data)
} : data
.dynamic :
{
*(.dynamic)
} : data : dynamic
.bss (NOLOAD) : ALIGN(4)
{
__bss_start = .;
*(.sbss .sbss.* .bss .bss.*);
. = ALIGN(4);
_end = .;
}
}

View File

@ -1,132 +0,0 @@
r"""
The :mod:`math_fns` module lists math-related functions from NumPy recognized
by the ARTIQ compiler so host function objects can be :func:`match`\ ed to
the compiler type metadata describing their core device analogue.
"""
from collections import OrderedDict
import numpy
from . import builtins, types
# Some special mathematical functions are exposed via their scipy.special
# equivalents. Since the rest of the ARTIQ core does not depend on SciPy,
# gracefully handle it not being present, making the functions simply not
# available.
try:
import scipy.special as scipy_special
except ImportError:
scipy_special = None
#: float -> float numpy.* math functions for which llvm.* intrinsics exist.
unary_fp_intrinsics = [(name, "llvm." + name + ".f64") for name in [
"sin",
"cos",
"exp",
"exp2",
"log",
"log10",
"log2",
"fabs",
"floor",
"ceil",
"trunc",
"sqrt",
]] + [
# numpy.rint() seems to (NumPy 1.19.0, Python 3.8.5, Linux x86_64)
# implement round-to-even, but unfortunately, rust-lang/libm only
# provides round(), which always rounds away from zero.
#
# As there is no equivalent of the latter in NumPy (nor any other
# basic rounding function), expose round() as numpy.rint anyway,
# even if the rounding modes don't match up, so there is some way
# to do rounding on the core device. (numpy.round() has entirely
# different semantics; it rounds to a configurable number of
# decimals.)
("rint", "llvm.round.f64"),
]
#: float -> float numpy.* math functions lowered to runtime calls.
unary_fp_runtime_calls = [
("tan", "tan"),
("arcsin", "asin"),
("arccos", "acos"),
("arctan", "atan"),
("sinh", "sinh"),
("cosh", "cosh"),
("tanh", "tanh"),
("arcsinh", "asinh"),
("arccosh", "acosh"),
("arctanh", "atanh"),
("expm1", "expm1"),
("cbrt", "cbrt"),
]
#: float -> float numpy.* math functions lowered to runtime calls.
unary_fp_runtime_calls = [
("tan", "tan"),
("arcsin", "asin"),
("arccos", "acos"),
("arctan", "atan"),
("sinh", "sinh"),
("cosh", "cosh"),
("tanh", "tanh"),
("arcsinh", "asinh"),
("arccosh", "acosh"),
("arctanh", "atanh"),
("expm1", "expm1"),
("cbrt", "cbrt"),
]
scipy_special_unary_runtime_calls = [
("erf", "erf"),
("erfc", "erfc"),
("gamma", "tgamma"),
("gammaln", "lgamma"),
("j0", "j0"),
("j1", "j1"),
("y0", "y0"),
("y1", "y1"),
]
# Not mapped: jv/yv, libm only supports integer orders.
#: (float, float) -> float numpy.* math functions lowered to runtime calls.
binary_fp_runtime_calls = [
("arctan2", "atan2"),
("copysign", "copysign"),
("fmax", "fmax"),
("fmin", "fmin"),
# ("ldexp", "ldexp"), # One argument is an int; would need a bit more plumbing.
("hypot", "hypot"),
("nextafter", "nextafter"),
]
#: Array handling builtins (special treatment due to allocations).
numpy_builtins = ["transpose"]
def fp_runtime_type(name, arity):
args = [("arg{}".format(i), builtins.TFloat()) for i in range(arity)]
return types.TExternalFunction(
OrderedDict(args),
builtins.TFloat(),
name,
# errno isn't observable from ARTIQ Python.
flags={"nounwind", "nowrite"},
broadcast_across_arrays=True)
math_fn_map = {
getattr(numpy, symbol): fp_runtime_type(mangle, arity=1)
for symbol, mangle in (unary_fp_intrinsics + unary_fp_runtime_calls)
}
for symbol, mangle in binary_fp_runtime_calls:
math_fn_map[getattr(numpy, symbol)] = fp_runtime_type(mangle, arity=2)
for name in numpy_builtins:
math_fn_map[getattr(numpy, name)] = types.TBuiltinFunction("numpy." + name)
if scipy_special is not None:
for symbol, mangle in scipy_special_unary_runtime_calls:
math_fn_map[getattr(scipy_special, symbol)] = fp_runtime_type(mangle, arity=1)
def match(obj):
return math_fn_map.get(obj, None)

View File

@ -1,98 +0,0 @@
"""
The :class:`Module` class encapsulates a single Python module,
which corresponds to a single ARTIQ translation unit (one LLVM
bitcode file and one object file, unless LTO is used).
A :class:`Module` can be created from a typed AST.
The :class:`Source` class parses a single source file or
string and infers types for it using a trivial :module:`prelude`.
"""
import os
from pythonparser import source, diagnostic, parse_buffer
from . import prelude, types, transforms, analyses, validators
class Source:
def __init__(self, source_buffer, engine=None):
if engine is None:
self.engine = diagnostic.Engine(all_errors_are_fatal=True)
else:
self.engine = engine
self.embedding_map = None
self.name, _ = os.path.splitext(os.path.basename(source_buffer.name))
asttyped_rewriter = transforms.ASTTypedRewriter(engine=engine,
prelude=prelude.globals())
inferencer = transforms.Inferencer(engine=engine)
self.parsetree, self.comments = parse_buffer(source_buffer, engine=engine)
self.typedtree = asttyped_rewriter.visit(self.parsetree)
self.globals = asttyped_rewriter.globals
inferencer.visit(self.typedtree)
@classmethod
def from_string(cls, source_string, name="input.py", first_line=1, engine=None):
return cls(source.Buffer(source_string + "\n", name, first_line), engine=engine)
@classmethod
def from_filename(cls, filename, engine=None):
with open(filename) as f:
return cls(source.Buffer(f.read(), filename, 1), engine=engine)
class Module:
def __init__(self, src, ref_period=1e-6, attribute_writeback=True, remarks=False):
self.attribute_writeback = attribute_writeback
self.engine = src.engine
self.embedding_map = src.embedding_map
self.name = src.name
self.globals = src.globals
int_monomorphizer = transforms.IntMonomorphizer(engine=self.engine)
cast_monomorphizer = transforms.CastMonomorphizer(engine=self.engine)
inferencer = transforms.Inferencer(engine=self.engine)
monomorphism_validator = validators.MonomorphismValidator(engine=self.engine)
escape_validator = validators.EscapeValidator(engine=self.engine)
iodelay_estimator = transforms.IODelayEstimator(engine=self.engine,
ref_period=ref_period)
constness_validator = validators.ConstnessValidator(engine=self.engine)
artiq_ir_generator = transforms.ARTIQIRGenerator(engine=self.engine,
module_name=src.name,
ref_period=ref_period)
dead_code_eliminator = transforms.DeadCodeEliminator(engine=self.engine)
local_access_validator = validators.LocalAccessValidator(engine=self.engine)
local_demoter = transforms.LocalDemoter()
constant_hoister = transforms.ConstantHoister()
devirtualization = analyses.Devirtualization()
interleaver = transforms.Interleaver(engine=self.engine)
invariant_detection = analyses.InvariantDetection(engine=self.engine)
int_monomorphizer.visit(src.typedtree)
cast_monomorphizer.visit(src.typedtree)
inferencer.visit(src.typedtree)
monomorphism_validator.visit(src.typedtree)
escape_validator.visit(src.typedtree)
iodelay_estimator.visit_fixpoint(src.typedtree)
constness_validator.visit(src.typedtree)
devirtualization.visit(src.typedtree)
self.artiq_ir = artiq_ir_generator.visit(src.typedtree)
artiq_ir_generator.annotate_calls(devirtualization)
dead_code_eliminator.process(self.artiq_ir)
interleaver.process(self.artiq_ir)
local_access_validator.process(self.artiq_ir)
local_demoter.process(self.artiq_ir)
constant_hoister.process(self.artiq_ir)
if remarks:
invariant_detection.process(self.artiq_ir)
def build_llvm_ir(self, target):
"""Compile the module to LLVM IR for the specified target."""
llvm_ir_generator = transforms.LLVMIRGenerator(
engine=self.engine, module_name=self.name, target=target,
embedding_map=self.embedding_map)
return llvm_ir_generator.process(self.artiq_ir,
attribute_writeback=self.attribute_writeback)
def __repr__(self):
printer = types.TypePrinter()
globals = ["%s: %s" % (var, printer.name(self.globals[var])) for var in self.globals]
return "<artiq.compiler.Module %s {\n %s\n}>" % (repr(self.name), ",\n ".join(globals))

View File

@ -1,57 +0,0 @@
"""
The :mod:`prelude` module contains the initial global environment
in which ARTIQ kernels are evaluated.
"""
from . import builtins
def globals():
return {
# Value constructors
"bool": builtins.fn_bool(),
"int": builtins.fn_int(),
"float": builtins.fn_float(),
"str": builtins.fn_str(),
"bytes": builtins.fn_bytes(),
"bytearray": builtins.fn_bytearray(),
"list": builtins.fn_list(),
"array": builtins.fn_array(),
"range": builtins.fn_range(),
"int32": builtins.fn_int32(),
"int64": builtins.fn_int64(),
# Exception constructors
"Exception": builtins.fn_Exception(),
"IndexError": builtins.fn_IndexError(),
"ValueError": builtins.fn_ValueError(),
"ZeroDivisionError": builtins.fn_ZeroDivisionError(),
"RuntimeError": builtins.fn_RuntimeError(),
# Built-in Python functions
"len": builtins.fn_len(),
"round": builtins.fn_round(),
"abs": builtins.fn_abs(),
"min": builtins.fn_min(),
"max": builtins.fn_max(),
"print": builtins.fn_print(),
# ARTIQ decorators
"kernel": builtins.fn_kernel(),
"portable": builtins.fn_kernel(),
"rpc": builtins.fn_kernel(),
# ARTIQ context managers
"parallel": builtins.obj_parallel(),
"interleave": builtins.obj_interleave(),
"sequential": builtins.obj_sequential(),
# ARTIQ time management functions
"delay": builtins.fn_delay(),
"now_mu": builtins.fn_now_mu(),
"delay_mu": builtins.fn_delay_mu(),
"at_mu": builtins.fn_at_mu(),
# ARTIQ utility functions
"rtio_log": builtins.fn_rtio_log(),
"core_log": builtins.fn_print(),
}

View File

@ -1,277 +0,0 @@
import os, sys, tempfile, subprocess, io
from artiq.compiler import types, ir
from llvmlite import ir as ll, binding as llvm
llvm.initialize()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
class RunTool:
def __init__(self, pattern, **tempdata):
self._pattern = pattern
self._tempdata = tempdata
self._tempnames = {}
self._tempfiles = {}
def __enter__(self):
for key, data in self._tempdata.items():
if data is None:
fd, filename = tempfile.mkstemp()
os.close(fd)
self._tempnames[key] = filename
else:
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(data)
self._tempnames[key] = f.name
cmdline = []
for argument in self._pattern:
cmdline.append(argument.format(**self._tempnames))
# https://bugs.python.org/issue17023
windows = os.name == "nt"
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, shell=windows)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise Exception("{} invocation failed: {}".
format(cmdline[0], stderr))
self._tempfiles["__stdout__"] = io.StringIO(stdout)
for key in self._tempdata:
if self._tempdata[key] is None:
self._tempfiles[key] = open(self._tempnames[key], "rb")
return self._tempfiles
def __exit__(self, exc_typ, exc_value, exc_trace):
for file in self._tempfiles.values():
file.close()
for filename in self._tempnames.values():
os.unlink(filename)
def _dump(target, kind, suffix, content):
if target is not None:
print("====== {} DUMP ======".format(kind.upper()), file=sys.stderr)
content_value = content()
if isinstance(content_value, str):
content_value = bytes(content_value, 'utf-8')
if target == "":
file = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
else:
file = open(target + suffix, "wb")
file.write(content_value)
file.close()
print("{} dumped as {}".format(kind, file.name), file=sys.stderr)
class Target:
"""
A description of the target environment where the binaries
generated by the ARTIQ compiler will be deployed.
:var triple: (string)
LLVM target triple, e.g. ``"riscv32"``
:var data_layout: (string)
LLVM target data layout, e.g. ``"E-m:e-p:32:32-i64:32-f64:32-v64:32-v128:32-a:0:32-n32"``
:var features: (list of string)
LLVM target CPU features, e.g. ``["mul", "div", "ffl1"]``
:var print_function: (string)
Name of a formatted print functions (with the signature of ``printf``)
provided by the target, e.g. ``"printf"``.
:var now_pinning: (boolean)
Whether the target implements the now-pinning RTIO optimization.
"""
triple = "unknown"
data_layout = ""
features = []
print_function = "printf"
now_pinning = True
tool_ld = "ld.lld"
tool_strip = "llvm-strip"
tool_addr2line = "llvm-addr2line"
tool_cxxfilt = "llvm-cxxfilt"
def __init__(self):
self.llcontext = ll.Context()
def target_machine(self):
lltarget = llvm.Target.from_triple(self.triple)
llmachine = lltarget.create_target_machine(
features=",".join(["+{}".format(f) for f in self.features]),
reloc="pic", codemodel="default")
llmachine.set_asm_verbosity(True)
return llmachine
def optimize(self, llmodule):
llpassmgr = llvm.create_module_pass_manager()
# Register our alias analysis passes.
llpassmgr.add_basic_alias_analysis_pass()
llpassmgr.add_type_based_alias_analysis_pass()
# Start by cleaning up after our codegen and exposing as much
# information to LLVM as possible.
llpassmgr.add_constant_merge_pass()
llpassmgr.add_cfg_simplification_pass()
llpassmgr.add_instruction_combining_pass()
llpassmgr.add_sroa_pass()
llpassmgr.add_dead_code_elimination_pass()
llpassmgr.add_function_attrs_pass()
llpassmgr.add_global_optimizer_pass()
# Now, actually optimize the code.
llpassmgr.add_function_inlining_pass(275)
llpassmgr.add_ipsccp_pass()
llpassmgr.add_instruction_combining_pass()
llpassmgr.add_gvn_pass()
llpassmgr.add_cfg_simplification_pass()
llpassmgr.add_licm_pass()
# Clean up after optimizing.
llpassmgr.add_dead_arg_elimination_pass()
llpassmgr.add_global_dce_pass()
llpassmgr.run(llmodule)
def compile(self, module):
"""Compile the module to a relocatable object for this target."""
if os.getenv("ARTIQ_DUMP_SIG"):
print("====== MODULE_SIGNATURE DUMP ======", file=sys.stderr)
print(module, file=sys.stderr)
if os.getenv("ARTIQ_IR_NO_LOC") is not None:
ir.BasicBlock._dump_loc = False
type_printer = types.TypePrinter()
_dump(os.getenv("ARTIQ_DUMP_IR"), "ARTIQ IR", ".txt",
lambda: "\n".join(fn.as_entity(type_printer) for fn in module.artiq_ir))
llmod = module.build_llvm_ir(self)
try:
llparsedmod = llvm.parse_assembly(str(llmod))
llparsedmod.verify()
except RuntimeError:
_dump("", "LLVM IR (broken)", ".ll", lambda: str(llmod))
raise
_dump(os.getenv("ARTIQ_DUMP_UNOPT_LLVM"), "LLVM IR (generated)", "_unopt.ll",
lambda: str(llparsedmod))
self.optimize(llparsedmod)
_dump(os.getenv("ARTIQ_DUMP_LLVM"), "LLVM IR (optimized)", ".ll",
lambda: str(llparsedmod))
return llparsedmod
def assemble(self, llmodule):
llmachine = self.target_machine()
_dump(os.getenv("ARTIQ_DUMP_ASM"), "Assembly", ".s",
lambda: llmachine.emit_assembly(llmodule))
_dump(os.getenv("ARTIQ_DUMP_OBJ"), "Object file", ".o",
lambda: llmachine.emit_object(llmodule))
return llmachine.emit_object(llmodule)
def link(self, objects):
"""Link the relocatable objects into a shared library for this target."""
with RunTool([self.tool_ld, "-shared", "--eh-frame-hdr"] +
["-T" + os.path.join(os.path.dirname(__file__), "kernel.ld")] +
["{{obj{}}}".format(index) for index in range(len(objects))] +
["-x"] +
["-o", "{output}"],
output=None,
**{"obj{}".format(index): obj for index, obj in enumerate(objects)}) \
as results:
library = results["output"].read()
_dump(os.getenv("ARTIQ_DUMP_ELF"), "Shared library", ".elf",
lambda: library)
return library
def compile_and_link(self, modules):
return self.link([self.assemble(self.compile(module)) for module in modules])
def strip(self, library):
with RunTool([self.tool_strip, "--strip-debug", "{library}", "-o", "{output}"],
library=library, output=None) \
as results:
return results["output"].read()
def symbolize(self, library, addresses):
if addresses == []:
return []
# We got a list of return addresses, i.e. addresses of instructions
# just after the call. Offset them back to get an address somewhere
# inside the call instruction (or its delay slot), since that's what
# the backtrace entry should point at.
offset_addresses = [hex(addr - 1) for addr in addresses]
with RunTool([self.tool_addr2line, "--addresses", "--functions", "--inlines",
"--demangle", "--exe={library}"] + offset_addresses,
library=library) \
as results:
lines = iter(results["__stdout__"].read().rstrip().split("\n"))
backtrace = []
while True:
try:
address_or_function = next(lines)
except StopIteration:
break
if address_or_function[:2] == "0x":
address = int(address_or_function[2:], 16) + 1 # remove offset
function = next(lines)
else:
address = backtrace[-1][4] # inlined
function = address_or_function
location = next(lines)
filename, line = location.rsplit(":", 1)
if filename == "??" or filename == "<synthesized>":
continue
if line == "?":
line = -1
else:
line = int(line)
# can't get column out of addr2line D:
backtrace.append((filename, line, -1, function, address))
return backtrace
def demangle(self, names):
with RunTool([self.tool_cxxfilt] + names) as results:
return results["__stdout__"].read().rstrip().split("\n")
class NativeTarget(Target):
def __init__(self):
super().__init__()
self.triple = llvm.get_default_triple()
host_data_layout = str(llvm.targets.Target.from_default_triple().create_target_machine().target_data)
class RISCVTarget(Target):
triple = "riscv32-unknown-linux"
data_layout = "e-m:e-p:32:32-i64:64-n32-S128"
features = ["m", "a"]
print_function = "core_log"
now_pinning = True
tool_ld = "ld.lld"
tool_strip = "llvm-strip"
tool_addr2line = "llvm-addr2line"
tool_cxxfilt = "llvm-cxxfilt"
class CortexA9Target(Target):
triple = "armv7-unknown-linux-gnueabihf"
data_layout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
features = ["dsp", "fp16", "neon", "vfp3"]
print_function = "core_log"
now_pinning = False
tool_ld = "ld.lld"
tool_strip = "llvm-strip"
tool_addr2line = "llvm-addr2line"
tool_cxxfilt = "llvm-cxxfilt"

View File

@ -1,21 +0,0 @@
import time, cProfile as profile, pstats
def benchmark(f, name):
profiler = profile.Profile()
profiler.enable()
start = time.perf_counter()
end = 0
runs = 0
while end - start < 5 or runs < 10:
f()
runs += 1
end = time.perf_counter()
profiler.create_stats()
print("{} {} runs: {:.2f}s, {:.2f}ms/run".format(
runs, name, end - start, (end - start) / runs * 1000))
stats = pstats.Stats(profiler)
stats.strip_dirs().sort_stats('time').print_stats(10)

View File

@ -1,46 +0,0 @@
import sys, os, tokenize
from artiq.master.databases import DeviceDB
from artiq.master.worker_db import DeviceManager
import artiq.coredevice.core
from artiq.coredevice.core import Core, CompileError
def _render_diagnostic(diagnostic, colored):
return "\n".join(diagnostic.render(only_line=True))
artiq.coredevice.core._render_diagnostic = _render_diagnostic
def main():
if len(sys.argv) > 1 and sys.argv[1] == "+diag":
del sys.argv[1]
diag = True
else:
diag = False
if len(sys.argv) > 1 and sys.argv[1] == "+compile":
del sys.argv[1]
compile_only = True
else:
compile_only = False
ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
dmgr = DeviceManager(DeviceDB(ddb_path))
with tokenize.open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr}
exec(testcase_code, testcase_vars)
try:
core = dmgr.get("core")
if compile_only:
core.compile(testcase_vars["entrypoint"], (), {})
else:
core.run(testcase_vars["entrypoint"], (), {})
except CompileError as error:
if not diag:
exit(1)
if __name__ == "__main__":
main()

View File

@ -1,103 +0,0 @@
import sys, fileinput, os
from pythonparser import source, diagnostic, algorithm, parse_buffer
from .. import prelude, types
from ..transforms import ASTTypedRewriter, Inferencer, IntMonomorphizer, CastMonomorphizer
from ..transforms import IODelayEstimator
from ..validators import ConstnessValidator
class Printer(algorithm.Visitor):
"""
:class:`Printer` prints ``:`` and the node type after every typed node,
and ``->`` and the node type before the colon in a function definition.
In almost all cases (except function definition) this does not result
in valid Python syntax.
:ivar rewriter: (:class:`pythonparser.source.Rewriter`) rewriter instance
"""
def __init__(self, buf):
self.rewriter = source.Rewriter(buf)
self.type_printer = types.TypePrinter()
def rewrite(self):
return self.rewriter.rewrite()
def visit_FunctionDefT(self, node):
super().generic_visit(node)
self.rewriter.insert_before(node.colon_loc,
"->{}".format(self.type_printer.name(node.return_type)))
def visit_ExceptHandlerT(self, node):
super().generic_visit(node)
if node.name_loc:
self.rewriter.insert_after(node.name_loc,
":{}".format(self.type_printer.name(node.name_type)))
def visit_ForT(self, node):
super().generic_visit(node)
if node.trip_count is not None and node.trip_interval is not None:
self.rewriter.insert_after(node.keyword_loc,
"[{} x {} mu]".format(node.trip_count.fold(),
node.trip_interval.fold()))
def generic_visit(self, node):
super().generic_visit(node)
if hasattr(node, "type"):
self.rewriter.insert_after(node.loc,
":{}".format(self.type_printer.name(node.type)))
def main():
if len(sys.argv) > 1 and sys.argv[1] == "+mono":
del sys.argv[1]
monomorphize = True
else:
monomorphize = False
if len(sys.argv) > 1 and sys.argv[1] == "+iodelay":
del sys.argv[1]
iodelay = True
else:
iodelay = False
if len(sys.argv) > 1 and sys.argv[1] == "+diag":
del sys.argv[1]
def process_diagnostic(diag):
print("\n".join(diag.render(only_line=True)))
if diag.level == "fatal":
exit()
else:
def process_diagnostic(diag):
print("\n".join(diag.render()))
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
buf = source.Buffer("".join(fileinput.input()).expandtabs(),
os.path.basename(fileinput.filename()))
parsed, comments = parse_buffer(buf, engine=engine)
typed = ASTTypedRewriter(engine=engine, prelude=prelude.globals()).visit(parsed)
Inferencer(engine=engine).visit(typed)
ConstnessValidator(engine=engine).visit(typed)
if monomorphize:
CastMonomorphizer(engine=engine).visit(typed)
IntMonomorphizer(engine=engine).visit(typed)
Inferencer(engine=engine).visit(typed)
if iodelay:
IODelayEstimator(engine=engine, ref_period=1e6).visit_fixpoint(typed)
printer = Printer(buf)
printer.visit(typed)
for comment in comments:
if comment.text.find("CHECK") >= 0:
printer.rewriter.remove(comment.loc)
print(printer.rewrite().source)
if __name__ == "__main__":
main()

View File

@ -1,23 +0,0 @@
import sys, os, fileinput
from pythonparser import diagnostic
from .. import ir
from ..module import Module, Source
def main():
if os.getenv("ARTIQ_IR_NO_LOC") is not None:
ir.BasicBlock._dump_loc = False
def process_diagnostic(diag):
print("\n".join(diag.render()))
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
mod = Module(Source.from_string("".join(fileinput.input()).expandtabs(), engine=engine))
for fn in mod.artiq_ir:
print(fn)
if __name__ == "__main__":
main()

View File

@ -1,35 +0,0 @@
import os, sys, fileinput, ctypes
from pythonparser import diagnostic
from llvmlite import binding as llvm
from ..module import Module, Source
from ..targets import NativeTarget
def main():
libartiq_support = os.getenv("LIBARTIQ_SUPPORT")
if libartiq_support is not None:
llvm.load_library_permanently(libartiq_support)
def process_diagnostic(diag):
print("\n".join(diag.render()))
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
source = "".join(fileinput.input())
source = source.replace("#ARTIQ#", "")
mod = Module(Source.from_string(source.expandtabs(), engine=engine))
target = NativeTarget()
llmod = mod.build_llvm_ir(target)
llparsedmod = llvm.parse_assembly(str(llmod))
llparsedmod.verify()
llmachine = llvm.Target.from_triple(target.triple).create_target_machine()
lljit = llvm.create_mcjit_compiler(llparsedmod, llmachine)
llmain = lljit.get_function_address(llmod.name + ".__modinit__")
ctypes.CFUNCTYPE(None)(llmain)()
if __name__ == "__main__":
main()

View File

@ -1,31 +0,0 @@
import sys, fileinput
from pythonparser import diagnostic
from llvmlite import ir as ll
from ..module import Module, Source
from ..targets import NativeTarget
def main():
def process_diagnostic(diag):
print("\n".join(diag.render()))
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
mod = Module(Source.from_string("".join(fileinput.input()).expandtabs(), engine=engine))
target = NativeTarget()
llmod = mod.build_llvm_ir(target=target)
# Add main so that the result can be executed with lli
llmain = ll.Function(llmod, ll.FunctionType(ll.VoidType(), []), "main")
llbuilder = ll.IRBuilder(llmain.append_basic_block("entry"))
llbuilder.call(llmod.get_global(llmod.name + ".__modinit__"), [
ll.Constant(ll.IntType(8).as_pointer(), None)])
llbuilder.ret_void()
print(llmod)
if __name__ == "__main__":
main()

View File

@ -1,37 +0,0 @@
import sys, os
from pythonparser import diagnostic
from ..module import Module, Source
from ..targets import RISCVTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
# Make sure everything's valid
filename = sys.argv[1]
with open(filename) as f:
code = f.read()
source = Source.from_string(code, filename, engine=engine)
module = Module(source)
benchmark(lambda: Source.from_string(code, filename),
"ARTIQ parsing and inference")
benchmark(lambda: Module(source),
"ARTIQ transforms and validators")
benchmark(lambda: RISCVTarget().compile_and_link([module]),
"LLVM optimization and linking")
if __name__ == "__main__":
main()

View File

@ -1,72 +0,0 @@
import sys, os, tokenize
from pythonparser import diagnostic
from ...language.environment import ProcessArgumentManager
from ...master.databases import DeviceDB, DatasetDB
from ...master.worker_db import DeviceManager, DatasetManager
from ..module import Module
from ..embedding import Stitcher
from ..targets import RISCVTarget
from . import benchmark
def main():
if not len(sys.argv) == 2:
print("Expected exactly one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
with tokenize.open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench'}
exec(testcase_code, testcase_vars)
device_db_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
device_mgr = DeviceManager(DeviceDB(device_db_path))
dataset_db_path = os.path.join(os.path.dirname(sys.argv[1]), "dataset_db.pyon")
dataset_mgr = DatasetManager(DatasetDB(dataset_db_path))
argument_mgr = ProcessArgumentManager({})
def embed():
experiment = testcase_vars["Benchmark"]((device_mgr, dataset_mgr, argument_mgr))
stitcher = Stitcher(core=experiment.core, dmgr=device_mgr)
stitcher.stitch_call(experiment.run, (), {})
stitcher.finalize()
return stitcher
stitcher = embed()
module = Module(stitcher)
target = RISCVTarget()
llvm_ir = target.compile(module)
elf_obj = target.assemble(llvm_ir)
elf_shlib = target.link([elf_obj])
benchmark(lambda: embed(),
"ARTIQ embedding")
benchmark(lambda: Module(stitcher),
"ARTIQ transforms and validators")
benchmark(lambda: target.compile(module),
"LLVM optimizations")
benchmark(lambda: target.assemble(llvm_ir),
"LLVM machine code emission")
benchmark(lambda: target.link([elf_obj]),
"Linking")
benchmark(lambda: target.strip(elf_shlib),
"Stripping debug information")
if __name__ == "__main__":
main()

View File

@ -1,30 +0,0 @@
import sys, os
from pythonparser import diagnostic
from ..module import Module, Source
from ..targets import RISCVTarget
def main():
if not len(sys.argv) > 1:
print("Expected at least one module filename", file=sys.stderr)
exit(1)
def process_diagnostic(diag):
print("\n".join(diag.render()), file=sys.stderr)
if diag.level in ("fatal", "error"):
exit(1)
engine = diagnostic.Engine()
engine.process = process_diagnostic
modules = []
for filename in sys.argv[1:]:
modules.append(Module(Source.from_filename(filename, engine=engine)))
llobj = RISCVTarget().compile_and_link(modules)
basename, ext = os.path.splitext(sys.argv[-1])
with open(basename + ".so", "wb") as f:
f.write(llobj)
if __name__ == "__main__":
main()

View File

@ -1,44 +0,0 @@
import sys, fileinput
from pythonparser import diagnostic
from ..module import Module, Source
from .. import types, iodelay
def main():
if len(sys.argv) > 1 and sys.argv[1] == "+diag":
del sys.argv[1]
diag = True
def process_diagnostic(diag):
print("\n".join(diag.render(only_line=True)))
if diag.level == "fatal":
exit()
else:
diag = False
def process_diagnostic(diag):
print("\n".join(diag.render(colored=False)))
if diag.level in ("fatal", "error"):
exit(1)
if len(sys.argv) > 1 and sys.argv[1] == "+delay":
del sys.argv[1]
force_delays = True
else:
force_delays = False
engine = diagnostic.Engine()
engine.process = process_diagnostic
try:
mod = Module(Source.from_string("".join(fileinput.input()).expandtabs(), engine=engine))
if force_delays:
for var in mod.globals:
typ = mod.globals[var].find()
if types.is_function(typ) and types.is_indeterminate_delay(typ.delay):
process_diagnostic(typ.delay.find().cause)
print(repr(mod))
except:
if not diag: raise
if __name__ == "__main__":
main()

View File

@ -1,12 +0,0 @@
from .asttyped_rewriter import ASTTypedRewriter
from .inferencer import Inferencer
from .int_monomorphizer import IntMonomorphizer
from .cast_monomorphizer import CastMonomorphizer
from .iodelay_estimator import IODelayEstimator
from .artiq_ir_generator import ARTIQIRGenerator
from .dead_code_eliminator import DeadCodeEliminator
from .local_demoter import LocalDemoter
from .constant_hoister import ConstantHoister
from .interleaver import Interleaver
from .typedtree_printer import TypedtreePrinter
from .llvm_ir_generator import LLVMIRGenerator

File diff suppressed because it is too large Load Diff

View File

@ -1,526 +0,0 @@
"""
:class:`ASTTypedRewriter` rewrites a parsetree (:mod:`pythonparser.ast`)
to a typedtree (:mod:`..asttyped`).
"""
from collections import OrderedDict
from pythonparser import ast, algorithm, diagnostic
from .. import asttyped, types, builtins
# This visitor will be called for every node with a scope,
# i.e.: class, function, comprehension, lambda
class LocalExtractor(algorithm.Visitor):
def __init__(self, env_stack, engine):
super().__init__()
self.env_stack = env_stack
self.engine = engine
self.in_root = False
self.in_assign = False
self.typing_env = OrderedDict()
# which names are global have to be recorded in the current scope
self.global_ = set()
# which names are nonlocal only affects whether the current scope
# gets a new binding or not, so we throw this away
self.nonlocal_ = set()
# parameters can't be declared as global or nonlocal
self.params = set()
def visit_in_assign(self, node, in_assign):
try:
old_in_assign, self.in_assign = self.in_assign, in_assign
return self.visit(node)
finally:
self.in_assign = old_in_assign
def visit_Assign(self, node):
self.visit(node.value)
self.visit_in_assign(node.targets, in_assign=True)
def visit_For(self, node):
self.visit(node.iter)
self.visit_in_assign(node.target, in_assign=True)
self.visit(node.body)
self.visit(node.orelse)
def visit_withitem(self, node):
self.visit(node.context_expr)
self.visit_in_assign(node.optional_vars, in_assign=True)
def visit_comprehension(self, node):
self.visit(node.iter)
self.visit_in_assign(node.target, in_assign=True)
self.visit(node.ifs)
def visit_generator(self, node):
if self.in_root:
return
self.in_root = True
self.visit(list(reversed(node.generators)))
self.visit(node.elt)
visit_ListComp = visit_generator
visit_SetComp = visit_generator
visit_GeneratorExp = visit_generator
def visit_DictComp(self, node):
if self.in_root:
return
self.in_root = True
self.visit(list(reversed(node.generators)))
self.visit(node.key)
self.visit(node.value)
def visit_root(self, node):
if self.in_root:
return
self.in_root = True
self.generic_visit(node)
visit_Module = visit_root # don't look at inner scopes
visit_ClassDef = visit_root
visit_Lambda = visit_root
def visit_FunctionDef(self, node):
if self.in_root:
self._assignable(node.name)
self.visit_root(node)
def _assignable(self, name):
assert name is not None
if name not in self.typing_env and name not in self.nonlocal_:
self.typing_env[name] = types.TVar()
def visit_arg(self, node):
if node.arg in self.params:
diag = diagnostic.Diagnostic("error",
"duplicate parameter '{name}'", {"name": node.arg},
node.loc)
self.engine.process(diag)
return
self._assignable(node.arg)
self.params.add(node.arg)
def visit_Name(self, node):
if self.in_assign:
# code like:
# x = 1
# def f():
# x = 1
# creates a new binding for x in f's scope
self._assignable(node.id)
def visit_Attribute(self, node):
self.visit_in_assign(node.value, in_assign=False)
def visit_Subscript(self, node):
self.visit_in_assign(node.value, in_assign=False)
self.visit_in_assign(node.slice, in_assign=False)
def _check_not_in(self, name, names, curkind, newkind, loc):
if name in names:
diag = diagnostic.Diagnostic("error",
"name '{name}' cannot be {curkind} and {newkind} simultaneously",
{"name": name, "curkind": curkind, "newkind": newkind}, loc)
self.engine.process(diag)
return True
return False
def visit_Global(self, node):
for name, loc in zip(node.names, node.name_locs):
if self._check_not_in(name, self.nonlocal_, "nonlocal", "global", loc) or \
self._check_not_in(name, self.params, "a parameter", "global", loc):
continue
self.global_.add(name)
if len(self.env_stack) == 1:
self._assignable(name) # already in global scope
else:
if name not in self.env_stack[1]:
self.env_stack[1][name] = types.TVar()
self.typing_env[name] = self.env_stack[1][name]
def visit_Nonlocal(self, node):
for name, loc in zip(node.names, node.name_locs):
if self._check_not_in(name, self.global_, "global", "nonlocal", loc) or \
self._check_not_in(name, self.params, "a parameter", "nonlocal", loc):
continue
# nonlocal does not search prelude and global scopes
found = False
for outer_env in reversed(self.env_stack[2:]):
if name in outer_env:
found = True
break
if not found:
diag = diagnostic.Diagnostic("error",
"cannot declare name '{name}' as nonlocal: it is not bound in any outer scope",
{"name": name},
loc, [node.keyword_loc])
self.engine.process(diag)
continue
self.nonlocal_.add(name)
def visit_ExceptHandler(self, node):
self.visit(node.type)
if node.name is not None:
self._assignable(node.name)
for stmt in node.body:
self.visit(stmt)
class ASTTypedRewriter(algorithm.Transformer):
"""
:class:`ASTTypedRewriter` converts an untyped AST to a typed AST
where all type fields of non-literals are filled with fresh type variables,
and type fields of literals are filled with corresponding types.
:class:`ASTTypedRewriter` also discovers the scope of variable bindings
via :class:`LocalExtractor`.
"""
def __init__(self, engine, prelude):
self.engine = engine
self.globals = None
self.env_stack = [prelude]
self.in_class = None
def _try_find_name(self, name):
if self.in_class is not None:
typ = self.in_class.constructor_type.attributes.get(name)
if typ is not None:
return typ
for typing_env in reversed(self.env_stack):
if name in typing_env:
return typing_env[name]
def _find_name(self, name, loc):
typ = self._try_find_name(name)
if typ is not None:
return typ
diag = diagnostic.Diagnostic("fatal",
"undefined variable '{name}'", {"name":name}, loc)
self.engine.process(diag)
# Visitors that replace node with a typed node
#
def visit_Module(self, node):
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
extractor.visit(node)
node = asttyped.ModuleT(
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
body=node.body, loc=node.loc)
self.globals = node.typing_env
try:
self.env_stack.append(node.typing_env)
return self.generic_visit(node)
finally:
self.env_stack.pop()
def visit_FunctionDef(self, node):
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
extractor.visit(node)
signature_type = self._find_name(node.name, node.name_loc)
node = asttyped.FunctionDefT(
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
signature_type=signature_type, return_type=types.TVar(),
name=node.name, args=node.args, returns=node.returns,
body=node.body, decorator_list=node.decorator_list,
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
arrow_loc=node.arrow_loc, colon_loc=node.colon_loc, at_locs=node.at_locs,
loc=node.loc)
try:
self.env_stack.append(node.typing_env)
return self.generic_visit(node)
finally:
self.env_stack.pop()
def visit_ClassDef(self, node):
if any(node.bases) or any(node.keywords) or \
node.starargs is not None or node.kwargs is not None:
diag = diagnostic.Diagnostic("error",
"inheritance is not supported", {},
node.lparen_loc.join(node.rparen_loc))
self.engine.process(diag)
for child in node.body:
if isinstance(child, (ast.Assign, ast.FunctionDef, ast.Pass)):
continue
diag = diagnostic.Diagnostic("fatal",
"class body must contain only assignments and function definitions", {},
child.loc)
self.engine.process(diag)
if node.name in self.env_stack[-1]:
diag = diagnostic.Diagnostic("fatal",
"variable '{name}' is already defined", {"name":node.name}, node.name_loc)
self.engine.process(diag)
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
extractor.visit(node)
# Now we create two types.
# The first type is the type of instances created by the constructor.
# Its attributes are those of the class environment, but wrapped
# appropriately so that they are linked to the class from which they
# originate.
instance_type = types.TInstance(node.name, OrderedDict())
# The second type is the type of the constructor itself (in other words,
# the class object): it is simply a singleton type that has the class
# environment as attributes.
constructor_type = types.TConstructor(instance_type)
constructor_type.attributes = extractor.typing_env
instance_type.constructor = constructor_type
self.env_stack[-1][node.name] = constructor_type
node = asttyped.ClassDefT(
constructor_type=constructor_type,
name=node.name,
bases=self.visit(node.bases), keywords=self.visit(node.keywords),
starargs=self.visit(node.starargs), kwargs=self.visit(node.kwargs),
body=node.body,
decorator_list=self.visit(node.decorator_list),
keyword_loc=node.keyword_loc, name_loc=node.name_loc,
lparen_loc=node.lparen_loc, star_loc=node.star_loc,
dstar_loc=node.dstar_loc, rparen_loc=node.rparen_loc,
colon_loc=node.colon_loc, at_locs=node.at_locs,
loc=node.loc)
try:
old_in_class, self.in_class = self.in_class, node
return self.generic_visit(node)
finally:
self.in_class = old_in_class
def visit_arg(self, node):
if node.annotation is not None:
diag = diagnostic.Diagnostic("fatal",
"type annotations are not supported here", {},
node.annotation.loc)
self.engine.process(diag)
return asttyped.argT(type=self._find_name(node.arg, node.loc),
arg=node.arg, annotation=None,
arg_loc=node.arg_loc, colon_loc=node.colon_loc, loc=node.loc)
def visit_Num(self, node):
if isinstance(node.n, int):
typ = builtins.TInt()
elif isinstance(node.n, float):
typ = builtins.TFloat()
else:
diag = diagnostic.Diagnostic("fatal",
"numeric type {type} is not supported", {"type": node.n.__class__.__name__},
node.loc)
self.engine.process(diag)
return asttyped.NumT(type=typ,
n=node.n, loc=node.loc)
def visit_Str(self, node):
if isinstance(node.s, str):
typ = builtins.TStr()
elif isinstance(node.s, bytes):
typ = builtins.TBytes()
else:
assert False
return asttyped.StrT(type=typ, s=node.s,
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
def visit_Name(self, node):
return asttyped.NameT(type=self._find_name(node.id, node.loc),
id=node.id, ctx=node.ctx, loc=node.loc)
def visit_NameConstant(self, node):
if node.value is True or node.value is False:
typ = builtins.TBool()
elif node.value is None:
typ = builtins.TNone()
return asttyped.NameConstantT(type=typ, value=node.value, loc=node.loc)
def visit_Tuple(self, node):
node = self.generic_visit(node)
return asttyped.TupleT(type=types.TTuple([x.type for x in node.elts]),
elts=node.elts, ctx=node.ctx, loc=node.loc)
def visit_List(self, node):
node = self.generic_visit(node)
node = asttyped.ListT(type=builtins.TList(),
elts=node.elts, ctx=node.ctx,
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
return self.visit(node)
def visit_Attribute(self, node):
node = self.generic_visit(node)
node = asttyped.AttributeT(type=types.TVar(),
value=node.value, attr=node.attr, ctx=node.ctx,
dot_loc=node.dot_loc, attr_loc=node.attr_loc, loc=node.loc)
return self.visit(node)
def visit_Slice(self, node):
node = self.generic_visit(node)
node = asttyped.SliceT(type=types.TVar(),
lower=node.lower, upper=node.upper, step=node.step,
bound_colon_loc=node.bound_colon_loc,
step_colon_loc=node.step_colon_loc,
loc=node.loc)
return self.visit(node)
def visit_Subscript(self, node):
node = self.generic_visit(node)
node = asttyped.SubscriptT(type=types.TVar(),
value=node.value, slice=node.slice, ctx=node.ctx,
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
return self.visit(node)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node = asttyped.BoolOpT(type=types.TVar(),
op=node.op, values=node.values,
op_locs=node.op_locs, loc=node.loc)
return self.visit(node)
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
node = asttyped.UnaryOpT(type=types.TVar(),
op=node.op, operand=node.operand,
loc=node.loc)
return self.visit(node)
def visit_BinOp(self, node):
node = self.generic_visit(node)
node = asttyped.BinOpT(type=types.TVar(),
left=node.left, op=node.op, right=node.right,
loc=node.loc)
return self.visit(node)
def visit_Compare(self, node):
node = self.generic_visit(node)
node = asttyped.CompareT(type=types.TVar(),
left=node.left, ops=node.ops, comparators=node.comparators,
loc=node.loc)
return self.visit(node)
def visit_IfExp(self, node):
node = self.generic_visit(node)
node = asttyped.IfExpT(type=types.TVar(),
test=node.test, body=node.body, orelse=node.orelse,
if_loc=node.if_loc, else_loc=node.else_loc, loc=node.loc)
return self.visit(node)
def visit_ListComp(self, node):
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
extractor.visit(node)
node = asttyped.ListCompT(
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
type=types.TVar(),
elt=node.elt, generators=node.generators,
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
try:
self.env_stack.append(node.typing_env)
return self.generic_visit(node)
finally:
self.env_stack.pop()
def visit_Call(self, node):
node = self.generic_visit(node)
node = asttyped.CallT(type=types.TVar(), iodelay=None, arg_exprs={},
func=node.func, args=node.args, keywords=node.keywords,
starargs=node.starargs, kwargs=node.kwargs,
star_loc=node.star_loc, dstar_loc=node.dstar_loc,
begin_loc=node.begin_loc, end_loc=node.end_loc, loc=node.loc)
return node
def visit_Lambda(self, node):
extractor = LocalExtractor(env_stack=self.env_stack, engine=self.engine)
extractor.visit(node)
node = asttyped.LambdaT(
typing_env=extractor.typing_env, globals_in_scope=extractor.global_,
type=types.TVar(),
args=node.args, body=node.body,
lambda_loc=node.lambda_loc, colon_loc=node.colon_loc, loc=node.loc)
try:
self.env_stack.append(node.typing_env)
return self.generic_visit(node)
finally:
self.env_stack.pop()
def visit_ExceptHandler(self, node):
node = self.generic_visit(node)
if node.name is not None:
name_type = self._find_name(node.name, node.name_loc)
else:
name_type = types.TVar()
node = asttyped.ExceptHandlerT(
name_type=name_type,
filter=node.type, name=node.name, body=node.body,
except_loc=node.except_loc, as_loc=node.as_loc, name_loc=node.name_loc,
colon_loc=node.colon_loc, loc=node.loc)
return node
def visit_Raise(self, node):
node = self.generic_visit(node)
if node.cause:
diag = diagnostic.Diagnostic("error",
"'raise from' syntax is not supported", {},
node.from_loc)
self.engine.process(diag)
return node
def visit_For(self, node):
node = self.generic_visit(node)
node = asttyped.ForT(
target=node.target, iter=node.iter, body=node.body, orelse=node.orelse,
trip_count=None, trip_interval=None,
keyword_loc=node.keyword_loc, in_loc=node.in_loc, for_colon_loc=node.for_colon_loc,
else_loc=node.else_loc, else_colon_loc=node.else_colon_loc, loc=node.loc)
return node
def visit_withitem(self, node):
node = self.generic_visit(node)
node = asttyped.withitemT(
context_expr=node.context_expr, optional_vars=node.optional_vars,
enter_type=types.TVar(), exit_type=types.TVar(),
as_loc=node.as_loc, loc=node.loc)
return node
# Unsupported visitors
#
def visit_unsupported(self, node):
diag = diagnostic.Diagnostic("fatal",
"this syntax is not supported", {},
node.loc)
self.engine.process(diag)
# expr
visit_Dict = visit_unsupported
visit_DictComp = visit_unsupported
visit_Ellipsis = visit_unsupported
visit_GeneratorExp = visit_unsupported
# visit_Set = visit_unsupported
visit_SetComp = visit_unsupported
visit_Starred = visit_unsupported
visit_Yield = visit_unsupported
visit_YieldFrom = visit_unsupported
# stmt
visit_Delete = visit_unsupported
visit_Import = visit_unsupported
visit_ImportFrom = visit_unsupported

View File

@ -1,47 +0,0 @@
"""
:class:`CastMonomorphizer` uses explicit casts to monomorphize
expressions of undetermined integer type to either 32 or 64 bits.
"""
from pythonparser import algorithm, diagnostic
from .. import types, builtins, asttyped
class CastMonomorphizer(algorithm.Visitor):
def __init__(self, engine):
self.engine = engine
def visit_CallT(self, node):
if (types.is_builtin(node.func.type, "int") or
types.is_builtin(node.func.type, "int32") or
types.is_builtin(node.func.type, "int64")):
typ = node.type.find()
if (not types.is_var(typ["width"]) and
len(node.args) == 1 and
builtins.is_int(node.args[0].type) and
types.is_var(node.args[0].type.find()["width"])):
if isinstance(node.args[0], asttyped.BinOpT):
# Binary operations are a bit special: they can widen, and so their
# return type is indeterminate until both argument types are fully known.
# In case we first monomorphize the return type, and then some argument type,
# and argument type is wider than return type, we'll introduce a conflict.
return
node.args[0].type.unify(typ)
if types.is_builtin(node.func.type, "int") or \
types.is_builtin(node.func.type, "round"):
typ = node.type.find()
if types.is_var(typ["width"]):
typ["width"].unify(types.TValue(32))
self.generic_visit(node)
def visit_CoerceT(self, node):
if isinstance(node.value, asttyped.NumT) and \
builtins.is_int(node.type) and \
builtins.is_int(node.value.type) and \
not types.is_var(node.type["width"]) and \
types.is_var(node.value.type["width"]):
node.value.type.unify(node.type)
self.generic_visit(node)

View File

@ -1,43 +0,0 @@
"""
:class:`ConstantHoister` is a code motion transform:
it moves any invariant loads to the earliest point where
they may be executed.
"""
from .. import types, ir
class ConstantHoister:
def process(self, functions):
for func in functions:
self.process_function(func)
def process_function(self, func):
entry = func.entry()
worklist = set(func.instructions())
moved = set()
while len(worklist) > 0:
insn = worklist.pop()
if (isinstance(insn, ir.GetAttr) and insn not in moved and
types.is_instance(insn.object().type) and
insn.attr in insn.object().type.constant_attributes):
has_variant_operands = False
index_in_entry = 0
for operand in insn.operands:
if isinstance(operand, ir.Argument):
pass
elif isinstance(operand, ir.Instruction) and operand.basic_block == entry:
index_in_entry = entry.index(operand) + 1
else:
has_variant_operands = True
break
if has_variant_operands:
continue
insn.remove_from_parent()
entry.instructions.insert(index_in_entry, insn)
moved.add(insn)
for use in insn.uses:
worklist.add(use)

View File

@ -1,66 +0,0 @@
"""
:class:`DeadCodeEliminator` is a dead code elimination transform:
it only basic blocks with no predecessors as well as unused
instructions without side effects.
"""
from .. import ir
class DeadCodeEliminator:
def __init__(self, engine):
self.engine = engine
def process(self, functions):
for func in functions:
self.process_function(func)
def process_function(self, func):
modified = True
while modified:
modified = False
for block in list(func.basic_blocks):
if not any(block.predecessors()) and block != func.entry():
self.remove_block(block)
modified = True
modified = True
while modified:
modified = False
for insn in func.instructions():
# Note that GetLocal is treated as an impure operation:
# the local access validator has to observe it to emit
# a diagnostic for reads of uninitialized locals, and
# it also has to run after the interleaver, but interleaver
# doesn't like to work with IR before DCE.
if isinstance(insn, (ir.Phi, ir.Alloc, ir.GetAttr, ir.GetElem, ir.Coerce,
ir.Arith, ir.Compare, ir.Select, ir.Quote, ir.Closure,
ir.Offset)) \
and not any(insn.uses):
insn.erase()
modified = True
def remove_block(self, block):
# block.uses are updated while iterating
for use in set(block.uses):
if isinstance(use, ir.Phi):
use.remove_incoming_block(block)
if not any(use.operands):
self.remove_instruction(use)
elif isinstance(use, ir.SetLocal):
# setlocal %env, %block is only used for lowering finally
use.erase()
else:
assert False
block.erase()
def remove_instruction(self, insn):
for use in set(insn.uses):
if isinstance(use, ir.Phi):
use.remove_incoming_value(insn)
if not any(use.operands):
self.remove_instruction(use)
else:
assert False
insn.erase()

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +0,0 @@
"""
:class:`IntMonomorphizer` collapses the integer literals of undetermined
width to 32 bits, assuming they fit into 32 bits, or 64 bits if they
do not.
"""
from pythonparser import algorithm, diagnostic
from .. import types, builtins, asttyped
class IntMonomorphizer(algorithm.Visitor):
def __init__(self, engine):
self.engine = engine
def visit_NumT(self, node):
if builtins.is_int(node.type):
if types.is_var(node.type["width"]):
if -2**31 < node.n < 2**31-1:
width = 32
elif -2**63 < node.n < 2**63-1:
width = 64
else:
diag = diagnostic.Diagnostic("error",
"integer literal out of range for a signed 64-bit value", {},
node.loc)
self.engine.process(diag)
return
node.type["width"].unify(types.TValue(width))

View File

@ -1,185 +0,0 @@
"""
:class:`Interleaver` reorders requests to the RTIO core so that
the timestamp would always monotonically nondecrease.
"""
from pythonparser import diagnostic
from .. import types, builtins, ir, iodelay
from ..analyses import domination
from ..algorithms import inline, unroll
def delay_free_subgraph(root, limit):
visited = set()
queue = root.successors()
while len(queue) > 0:
block = queue.pop()
visited.add(block)
if block is limit:
continue
if isinstance(block.terminator(), ir.Delay):
return False
for successor in block.successors():
if successor not in visited:
queue.append(successor)
return True
def iodelay_of_block(block):
terminator = block.terminator()
if isinstance(terminator, ir.Delay):
# We should be able to fold everything without free variables.
folded_expr = terminator.interval.fold()
assert iodelay.is_const(folded_expr)
return folded_expr.value
else:
return 0
def is_pure_delay(insn):
return isinstance(insn, ir.Builtin) and insn.op in ("delay", "delay_mu")
def is_impure_delay_block(block):
terminator = block.terminator()
return isinstance(terminator, ir.Delay) and \
not is_pure_delay(terminator.decomposition())
class Interleaver:
def __init__(self, engine):
self.engine = engine
def process(self, functions):
for func in functions:
self.process_function(func)
def process_function(self, func):
for insn in func.instructions():
if isinstance(insn, ir.Delay):
if any(insn.interval.free_vars()):
# If a function has free variables in delay expressions,
# that means its IO delay depends on arguments.
# Do not change such functions in any way so that it will
# be successfully inlined and then removed by DCE.
return
postdom_tree = None
for insn in func.instructions():
if not isinstance(insn, ir.Interleave):
continue
# Lazily compute dominators.
if postdom_tree is None:
postdom_tree = domination.PostDominatorTree(func)
target_block = insn.basic_block
target_time = 0
source_blocks = insn.basic_block.successors()
source_times = [0 for _ in source_blocks]
if len(source_blocks) == 1:
# Immediate dominator for a interleave instruction with one successor
# is the first instruction in the body of the statement which created
# it, but below we expect that it would be the first instruction after
# the statement itself.
insn.replace_with(ir.Branch(source_blocks[0]))
continue
interleave_until = postdom_tree.immediate_dominator(insn.basic_block)
assert interleave_until is not None # no nonlocal flow in `with interleave`
assert interleave_until not in source_blocks
while len(source_blocks) > 0:
def time_after_block(pair):
index, block = pair
return source_times[index] + iodelay_of_block(block)
# Always prefer impure blocks (with calls) to pure blocks, because
# impure blocks may expand with smaller delays appearing, and in
# case of a tie, if a pure block is preferred, this would violate
# the timeline monotonicity.
available_source_blocks = list(filter(is_impure_delay_block, source_blocks))
if not any(available_source_blocks):
available_source_blocks = source_blocks
index, source_block = min(enumerate(available_source_blocks), key=time_after_block)
source_block_delay = iodelay_of_block(source_block)
new_target_time = source_times[index] + source_block_delay
target_time_delta = new_target_time - target_time
assert target_time_delta >= 0
target_terminator = target_block.terminator()
if isinstance(target_terminator, ir.Interleave):
target_terminator.replace_with(ir.Branch(source_block))
elif isinstance(target_terminator, (ir.Delay, ir.Branch)):
target_terminator.set_target(source_block)
else:
assert False
source_terminator = source_block.terminator()
if isinstance(source_terminator, ir.Interleave):
source_terminator.replace_with(ir.Branch(source_terminator.target()))
elif isinstance(source_terminator, ir.Branch):
pass
elif isinstance(source_terminator, ir.BranchIf):
# Skip a delay-free loop/conditional
source_block = postdom_tree.immediate_dominator(source_block)
assert (source_block is not None)
elif isinstance(source_terminator, ir.Return):
break
elif isinstance(source_terminator, ir.Delay):
old_decomp = source_terminator.decomposition()
if is_pure_delay(old_decomp):
if target_time_delta > 0:
new_decomp_expr = ir.Constant(int(target_time_delta), builtins.TInt64())
new_decomp = ir.Builtin("delay_mu", [new_decomp_expr], builtins.TNone())
new_decomp.loc = old_decomp.loc
source_terminator.basic_block.insert(new_decomp, before=source_terminator)
source_terminator.interval = iodelay.Const(target_time_delta)
source_terminator.set_decomposition(new_decomp)
else:
source_terminator.replace_with(ir.Branch(source_terminator.target()))
old_decomp.erase()
else: # It's a call.
need_to_inline = len(source_blocks) > 1
if need_to_inline:
if old_decomp.static_target_function is None:
diag = diagnostic.Diagnostic("fatal",
"it is not possible to interleave this function call within "
"a 'with interleave:' statement because the compiler could not "
"prove that the same function would always be called", {},
old_decomp.loc)
self.engine.process(diag)
inline(old_decomp)
postdom_tree = domination.PostDominatorTree(func)
continue
elif target_time_delta > 0:
source_terminator.interval = iodelay.Const(target_time_delta)
else:
source_terminator.replace_with(ir.Branch(source_terminator.target()))
elif isinstance(source_terminator, ir.Loop):
unroll(source_terminator)
postdom_tree = domination.PostDominatorTree(func)
continue
else:
assert False
target_block = source_block
target_time = new_target_time
new_source_block = postdom_tree.immediate_dominator(source_block)
assert (new_source_block is not None)
assert delay_free_subgraph(source_block, new_source_block)
if new_source_block == interleave_until:
# We're finished with this branch.
del source_blocks[index]
del source_times[index]
else:
source_blocks[index] = new_source_block
source_times[index] = new_target_time

View File

@ -1,327 +0,0 @@
"""
:class:`IODelayEstimator` calculates the amount of time
elapsed from the point of view of the RTIO core for
every function.
"""
from pythonparser import ast, algorithm, diagnostic
from .. import types, iodelay, builtins, asttyped
class _UnknownDelay(Exception):
pass
class _IndeterminateDelay(Exception):
def __init__(self, cause):
self.cause = cause
class IODelayEstimator(algorithm.Visitor):
def __init__(self, engine, ref_period):
self.engine = engine
self.ref_period = ref_period
self.changed = False
self.current_delay = iodelay.Const(0)
self.current_args = None
self.current_goto = None
self.current_return = None
def evaluate(self, node, abort, context):
if isinstance(node, asttyped.NumT):
return iodelay.Const(node.n)
elif isinstance(node, asttyped.CoerceT):
return self.evaluate(node.value, abort, context)
elif isinstance(node, asttyped.NameT):
if self.current_args is None:
note = diagnostic.Diagnostic("note",
"this variable is not an argument", {},
node.loc)
abort([note])
elif node.id in [arg.arg for arg in self.current_args.args]:
return iodelay.Var(node.id)
else:
notes = [
diagnostic.Diagnostic("note",
"this variable is not an argument of the innermost function", {},
node.loc),
diagnostic.Diagnostic("note",
"only these arguments are in scope of analysis", {},
self.current_args.loc)
]
abort(notes)
elif isinstance(node, asttyped.BinOpT):
lhs = self.evaluate(node.left, abort, context)
rhs = self.evaluate(node.right, abort, context)
if isinstance(node.op, ast.Add):
return lhs + rhs
elif isinstance(node.op, ast.Sub):
return lhs - rhs
elif isinstance(node.op, ast.Mult):
return lhs * rhs
elif isinstance(node.op, ast.Div):
return lhs / rhs
elif isinstance(node.op, ast.FloorDiv):
return lhs // rhs
else:
note = diagnostic.Diagnostic("note",
"this operator is not supported {context}",
{"context": context},
node.op.loc)
abort([note])
else:
note = diagnostic.Diagnostic("note",
"this expression is not supported {context}",
{"context": context},
node.loc)
abort([note])
def abort(self, message, loc, notes=[]):
diag = diagnostic.Diagnostic("error", message, {}, loc, notes=notes)
raise _IndeterminateDelay(diag)
def visit_fixpoint(self, node):
while True:
self.changed = False
self.visit(node)
if not self.changed:
return
def visit_ModuleT(self, node):
try:
for stmt in node.body:
try:
self.visit(stmt)
except _UnknownDelay:
pass # more luck next time?
except _IndeterminateDelay:
pass # we don't care; module-level code is never interleaved
def visit_function(self, args, body, typ, loc):
old_args, self.current_args = self.current_args, args
old_return, self.current_return = self.current_return, None
old_delay, self.current_delay = self.current_delay, iodelay.Const(0)
try:
self.visit(body)
if not iodelay.is_zero(self.current_delay) and self.current_return is not None:
self.abort("only return statement at the end of the function "
"can be interleaved", self.current_return.loc)
delay = types.TFixedDelay(self.current_delay.fold())
except _IndeterminateDelay as error:
delay = types.TIndeterminateDelay(error.cause)
self.current_delay = old_delay
self.current_return = old_return
self.current_args = old_args
if types.is_indeterminate_delay(delay) and types.is_indeterminate_delay(typ.delay):
# Both delays indeterminate; no point in unifying since that will
# replace the lazy and more specific error with an eager and more generic
# error (unification error of delay(?) with delay(?), which is useless).
return
try:
old_delay = typ.delay.find()
typ.delay.unify(delay)
if typ.delay.find() != old_delay:
self.changed = True
except types.UnificationError as e:
printer = types.TypePrinter()
diag = diagnostic.Diagnostic("fatal",
"delay {delaya} was inferred for this function, but its delay is already "
"constrained externally to {delayb}",
{"delaya": printer.name(delay), "delayb": printer.name(typ.delay)},
loc)
self.engine.process(diag)
def visit_FunctionDefT(self, node):
self.visit(node.args.defaults)
self.visit(node.args.kw_defaults)
# We can only handle return in tail position.
if isinstance(node.body[-1], ast.Return):
body = node.body[:-1]
else:
body = node.body
self.visit_function(node.args, body, node.signature_type.find(), node.loc)
visit_QuotedFunctionDefT = visit_FunctionDefT
def visit_LambdaT(self, node):
self.visit_function(node.args, node.body, node.type.find(), node.loc)
def get_iterable_length(self, node, context):
def abort(notes):
self.abort("for statement cannot be interleaved because "
"iteration count is indeterminate",
node.loc, notes)
def evaluate(node):
return self.evaluate(node, abort, context)
if isinstance(node, asttyped.CallT) and types.is_builtin(node.func.type, "range"):
range_min, range_max, range_step = iodelay.Const(0), None, iodelay.Const(1)
if len(node.args) == 3:
range_min, range_max, range_step = map(evaluate, node.args)
elif len(node.args) == 2:
range_min, range_max = map(evaluate, node.args)
elif len(node.args) == 1:
range_max, = map(evaluate, node.args)
return (range_max - range_min) // range_step
else:
note = diagnostic.Diagnostic("note",
"this value is not a constant range literal", {},
node.loc)
abort([note])
def visit_ForT(self, node):
self.visit(node.iter)
old_goto, self.current_goto = self.current_goto, None
old_delay, self.current_delay = self.current_delay, iodelay.Const(0)
self.visit(node.body)
if iodelay.is_zero(self.current_delay):
self.current_delay = old_delay
else:
if self.current_goto is not None:
self.abort("loop iteration count is indeterminate because of control flow",
self.current_goto.loc)
context = "in an iterable used in a for loop that is being interleaved"
node.trip_count = self.get_iterable_length(node.iter, context).fold()
node.trip_interval = self.current_delay.fold()
self.current_delay = old_delay + node.trip_interval * node.trip_count
self.current_goto = old_goto
self.visit(node.orelse)
def visit_goto(self, node):
self.current_goto = node
visit_Break = visit_goto
visit_Continue = visit_goto
def visit_control_flow(self, kind, node):
old_delay, self.current_delay = self.current_delay, iodelay.Const(0)
self.generic_visit(node)
if not iodelay.is_zero(self.current_delay):
self.abort("{} cannot be interleaved".format(kind), node.loc)
self.current_delay = old_delay
visit_If = lambda self, node: self.visit_control_flow("if statement", node)
visit_IfExpT = lambda self, node: self.visit_control_flow("if expression", node)
visit_Try = lambda self, node: self.visit_control_flow("try statement", node)
def visit_While(self, node):
old_goto, self.current_goto = self.current_goto, None
self.visit_control_flow("while statement", node)
self.current_goto = old_goto
def visit_Return(self, node):
self.current_return = node
def visit_With(self, node):
self.visit(node.items)
context_expr = node.items[0].context_expr
if len(node.items) == 1 and types.is_builtin(context_expr.type, "interleave"):
try:
delays = []
for stmt in node.body:
old_delay, self.current_delay = self.current_delay, iodelay.Const(0)
self.visit(stmt)
delays.append(self.current_delay)
self.current_delay = old_delay
if any(delays):
self.current_delay += iodelay.Max(delays)
except _IndeterminateDelay as error:
# Interleave failures inside `with` statements are hard failures,
# since there's no chance that the code will never actually execute
# inside a `with` statement after all.
note = diagnostic.Diagnostic("note",
"while interleaving this 'with interleave:' statement", {},
node.loc)
error.cause.notes += [note]
self.engine.process(error.cause)
flow_stmt = None
if self.current_goto is not None:
flow_stmt = self.current_goto
elif self.current_return is not None:
flow_stmt = self.current_return
if flow_stmt is not None:
note = diagnostic.Diagnostic("note",
"this '{kind}' statement transfers control out of "
"the 'with interleave:' statement",
{"kind": flow_stmt.keyword_loc.source()},
flow_stmt.loc)
diag = diagnostic.Diagnostic("error",
"cannot interleave this 'with interleave:' statement", {},
node.keyword_loc.join(node.colon_loc), notes=[note])
self.engine.process(diag)
elif len(node.items) == 1 and types.is_builtin(context_expr.type, "sequential"):
self.visit(node.body)
else:
self.abort("with statement cannot be interleaved", node.loc)
def visit_CallT(self, node):
typ = node.func.type.find()
def abort(notes):
self.abort("call cannot be interleaved because "
"an argument cannot be statically evaluated",
node.loc, notes)
if types.is_builtin(typ, "delay"):
value = self.evaluate(node.args[0], abort=abort,
context="as an argument for delay()")
call_delay = iodelay.SToMU(value, ref_period=self.ref_period)
elif types.is_builtin(typ, "delay_mu"):
value = self.evaluate(node.args[0], abort=abort,
context="as an argument for delay_mu()")
call_delay = value
elif not types.is_builtin(typ):
if types.is_function(typ) or types.is_rpc(typ):
offset = 0
elif types.is_method(typ):
offset = 1
typ = types.get_method_function(typ)
else:
assert False
if types.is_rpc(typ):
call_delay = iodelay.Const(0)
else:
delay = typ.find().delay.find()
if types.is_var(delay):
raise _UnknownDelay()
elif delay.is_indeterminate():
note = diagnostic.Diagnostic("note",
"function called here", {},
node.loc)
cause = delay.cause
cause = diagnostic.Diagnostic(cause.level, cause.reason, cause.arguments,
cause.location, cause.highlights,
cause.notes + [note])
raise _IndeterminateDelay(cause)
elif delay.is_fixed():
args = {}
for kw_node in node.keywords:
args[kw_node.arg] = kw_node.value
for arg_name, arg_node in zip(list(typ.args)[offset:], node.args):
args[arg_name] = arg_node
free_vars = delay.duration.free_vars()
node.arg_exprs = {
arg: self.evaluate(args[arg], abort=abort,
context="in the expression for argument '{}' "
"that affects I/O delay".format(arg))
for arg in free_vars
}
call_delay = delay.duration.fold(node.arg_exprs)
else:
assert False
else:
call_delay = iodelay.Const(0)
self.current_delay += call_delay
node.iodelay = call_delay

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
"""
:class:`LocalDemoter` is a constant propagation transform:
it replaces reads of any local variable with only one write
in a function without closures with the value that was written.
:class:`LocalAccessValidator` must be run before this transform
to ensure that the transformation it performs is sound.
"""
from collections import defaultdict
from .. import ir
class LocalDemoter:
def process(self, functions):
for func in functions:
self.process_function(func)
def process_function(self, func):
env_safe = {}
env_gets = defaultdict(lambda: set())
env_sets = defaultdict(lambda: set())
for insn in func.instructions():
if isinstance(insn, (ir.GetLocal, ir.SetLocal)):
if "$" in insn.var_name:
continue
env = insn.environment()
if env not in env_safe:
for use in env.uses:
if not isinstance(use, (ir.GetLocal, ir.SetLocal)):
env_safe[env] = False
break
else:
env_safe[env] = True
if not env_safe[env]:
continue
if isinstance(insn, ir.SetLocal):
env_sets[(env, insn.var_name)].add(insn)
else:
env_gets[(env, insn.var_name)].add(insn)
for (env, var_name) in env_sets:
if len(env_sets[(env, var_name)]) == 1:
set_insn = next(iter(env_sets[(env, var_name)]))
for get_insn in env_gets[(env, var_name)]:
get_insn.replace_all_uses_with(set_insn.value())
get_insn.erase()

View File

@ -1,89 +0,0 @@
"""
:class:`TypedtreePrinter` prints a human-readable representation of typedtrees.
"""
from pythonparser import algorithm, ast
from .. import types, asttyped
class TypedtreePrinter(algorithm.Visitor):
def __init__(self):
self.str = None
self.level = None
self.last_nl = None
self.type_printer = None
def print(self, node):
try:
self.str = ""
self.level = 0
self.last_nl = 0
self.type_printer = types.TypePrinter()
self.visit(node)
self._nl()
return self.str
finally:
self.str = None
self.level = None
self.last_nl = 0
self.type_printer = None
def _nl(self):
# self.str += "·"
if len(self.str) != self.last_nl:
self.str += "\n" + (" " * self.level)
self.last_nl = len(self.str)
def _indent(self):
self.level += 1
self._nl()
def _dedent(self):
self._nl()
self.level -= 1
self.str = self.str[:-2]
self.last_nl -= 2
def visit(self, obj):
if isinstance(obj, ast.AST):
attrs = set(obj._fields) - {'ctx'}
if isinstance(obj, asttyped.commontyped):
attrs.update(set(obj._types))
for attr in set(attrs):
if not getattr(obj, attr):
attrs.remove(attr) # omit falsey stuff
self.str += obj.__class__.__name__ + "("
if len(attrs) > 1:
self._indent()
for attr in attrs:
if len(attrs) > 1:
self._nl()
self.str += attr + "="
self.visit(getattr(obj, attr))
if len(attrs) > 1:
self._nl()
if len(attrs) > 1:
self._dedent()
self.str += ")"
elif isinstance(obj, types.Type):
self.str += self.type_printer.name(obj, max_depth=0)
elif isinstance(obj, list):
self.str += "["
if len(obj) > 1:
self._indent()
for elem in obj:
if len(obj) > 1:
self._nl()
self.visit(elem)
if len(obj) > 1:
self._nl()
if len(obj) > 1:
self._dedent()
self.str += "]"
else:
self.str += repr(obj)

View File

@ -1,839 +0,0 @@
"""
The :mod:`types` module contains the classes describing the types
in :mod:`asttyped`.
"""
import builtins
import string
from collections import OrderedDict
from . import iodelay
class UnificationError(Exception):
def __init__(self, typea, typeb):
self.typea, self.typeb = typea, typeb
def genalnum():
ident = ["a"]
while True:
yield "".join(ident)
pos = len(ident) - 1
while pos >= 0:
cur_n = string.ascii_lowercase.index(ident[pos])
if cur_n < 25:
ident[pos] = string.ascii_lowercase[cur_n + 1]
break
else:
ident[pos] = "a"
pos -= 1
if pos < 0:
ident = ["a"] + ident
def _freeze(dict_):
return tuple((key, dict_[key]) for key in dict_)
def _map_find(elts):
if isinstance(elts, list):
return [x.find() for x in elts]
elif isinstance(elts, dict):
return {k: elts[k].find() for k in elts}
else:
assert False
class Type(object):
def __str__(self):
return TypePrinter().name(self)
class TVar(Type):
"""
A type variable.
In effect, the classic union-find data structure is intrusively
folded into this class.
"""
def __init__(self):
self.parent = self
self.rank = 0
def find(self):
parent = self.parent
if parent is self:
return self
else:
# The recursive find() invocation is turned into a loop
# because paths resulting from unification of large arrays
# can easily cause a stack overflow.
root = self
while parent.__class__ == TVar and root is not parent:
_, parent = root, root.parent = parent, parent.parent
return root.parent
def unify(self, other):
if other is self:
return
x = other.find()
y = self.find()
if x is y:
return
if y.__class__ == TVar:
if x.__class__ == TVar:
if x.rank < y.rank:
x, y = y, x
y.parent = x
if x.rank == y.rank:
x.rank += 1
else:
y.parent = x
else:
y.unify(x)
def fold(self, accum, fn):
if self.parent is self:
return fn(accum, self)
else:
return self.find().fold(accum, fn)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
if self.parent is self:
return "<artiq.compiler.types.TVar %d>" % id(self)
else:
return repr(self.find())
# __eq__ and __hash__ are not overridden and default to
# comparison by identity. Use .find() explicitly before
# any lookups or comparisons.
class TMono(Type):
"""
A monomorphic type, possibly parametric.
:class:`TMono` is supposed to be subclassed by builtin types,
unlike all other :class:`Type` descendants. Similarly,
instances of :class:`TMono` should never be allocated directly,
as that will break the type-sniffing code in :mod:`builtins`.
"""
attributes = OrderedDict()
def __init__(self, name, params={}):
assert isinstance(params, (dict, OrderedDict))
self.name, self.params = name, OrderedDict(sorted(params.items()))
def find(self):
return self
def unify(self, other):
if other is self:
return
if isinstance(other, TMono) and self.name == other.name:
assert self.params.keys() == other.params.keys()
for param in self.params:
self.params[param].unify(other.params[param])
elif isinstance(other, TVar):
other.unify(self)
else:
raise UnificationError(self, other)
def fold(self, accum, fn):
for param in self.params:
accum = self.params[param].fold(accum, fn)
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TMono(%s, %s)" % (repr(self.name), repr(self.params))
def __getitem__(self, param):
return self.params[param]
def __eq__(self, other):
return isinstance(other, TMono) and \
self.name == other.name and \
_map_find(self.params) == _map_find(other.params)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.name, _freeze(self.params)))
class TTuple(Type):
"""
A tuple type.
:ivar elts: (list of :class:`Type`) elements
"""
attributes = OrderedDict()
def __init__(self, elts=[]):
self.elts = elts
def find(self):
return self
def unify(self, other):
if other is self:
return
if isinstance(other, TTuple) and len(self.elts) == len(other.elts):
for selfelt, otherelt in zip(self.elts, other.elts):
selfelt.unify(otherelt)
elif isinstance(other, TVar):
other.unify(self)
else:
raise UnificationError(self, other)
def fold(self, accum, fn):
for elt in self.elts:
accum = elt.fold(accum, fn)
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TTuple(%s)" % repr(self.elts)
def __eq__(self, other):
return isinstance(other, TTuple) and \
_map_find(self.elts) == _map_find(other.elts)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(tuple(self.elts))
class _TPointer(TMono):
def __init__(self, elt=None):
if elt is None:
elt = TMono("int", {"width": 8}) # i8*
super().__init__("pointer", params={"elt": elt})
class TFunction(Type):
"""
A function type.
:ivar args: (:class:`collections.OrderedDict` of string to :class:`Type`)
mandatory arguments
:ivar optargs: (:class:`collections.OrderedDict` of string to :class:`Type`)
optional arguments
:ivar ret: (:class:`Type`)
return type
:ivar delay: (:class:`Type`)
RTIO delay
"""
attributes = OrderedDict([
('__closure__', _TPointer()),
('__code__', _TPointer()),
])
def __init__(self, args, optargs, ret):
assert isinstance(args, OrderedDict)
assert isinstance(optargs, OrderedDict)
assert isinstance(ret, Type)
self.args, self.optargs, self.ret = args, optargs, ret
self.delay = TVar()
def arity(self):
return len(self.args) + len(self.optargs)
def arg_names(self):
return list(self.args.keys()) + list(self.optargs.keys())
def find(self):
return self
def unify(self, other):
if other is self:
return
if isinstance(other, TFunction) and \
self.args.keys() == other.args.keys() and \
self.optargs.keys() == other.optargs.keys():
for selfarg, otherarg in zip(list(self.args.values()) + list(self.optargs.values()),
list(other.args.values()) + list(other.optargs.values())):
selfarg.unify(otherarg)
self.ret.unify(other.ret)
self.delay.unify(other.delay)
elif isinstance(other, TVar):
other.unify(self)
else:
raise UnificationError(self, other)
def fold(self, accum, fn):
for arg in self.args:
accum = self.args[arg].fold(accum, fn)
for optarg in self.optargs:
accum = self.optargs[optarg].fold(accum, fn)
accum = self.ret.fold(accum, fn)
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TFunction({}, {}, {})".format(
repr(self.args), repr(self.optargs), repr(self.ret))
def __eq__(self, other):
return isinstance(other, TFunction) and \
_map_find(self.args) == _map_find(other.args) and \
_map_find(self.optargs) == _map_find(other.optargs)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((_freeze(self.args), _freeze(self.optargs), self.ret))
class TExternalFunction(TFunction):
"""
A type of an externally-provided function.
This can be any function following the C ABI, such as provided by the
C/Rust runtime, or a compiler backend intrinsic. The mangled name to link
against is encoded as part of the type.
:ivar name: (str) external symbol name.
This will be the symbol linked against (following any extra C name
mangling rules).
:ivar flags: (set of str) function flags.
Flag ``nounwind`` means the function never raises an exception.
Flag ``nowrite`` means the function never accesses any memory
that the ARTIQ Python code can observe.
:ivar broadcast_across_arrays: (bool)
If True, the function is transparently applied element-wise when called
with TArray arguments.
"""
attributes = OrderedDict()
def __init__(self, args, ret, name, flags=set(), broadcast_across_arrays=False):
assert isinstance(flags, set)
for flag in flags:
assert flag in {'nounwind', 'nowrite'}
super().__init__(args, OrderedDict(), ret)
self.name = name
self.delay = TFixedDelay(iodelay.Const(0))
self.flags = flags
self.broadcast_across_arrays = broadcast_across_arrays
def unify(self, other):
if other is self:
return
if isinstance(other, TExternalFunction) and \
self.name == other.name:
super().unify(other)
elif isinstance(other, TVar):
other.unify(self)
else:
raise UnificationError(self, other)
class TRPC(Type):
"""
A type of a remote call.
:ivar ret: (:class:`Type`)
return type
:ivar service: (int) RPC service number
:ivar is_async: (bool) whether the RPC blocks until return
"""
attributes = OrderedDict()
def __init__(self, ret, service, is_async=False):
assert isinstance(ret, Type)
self.ret, self.service, self.is_async = ret, service, is_async
def find(self):
return self
def unify(self, other):
if other is self:
return
if isinstance(other, TRPC) and \
self.service == other.service and \
self.is_async == other.is_async:
self.ret.unify(other.ret)
elif isinstance(other, TVar):
other.unify(self)
else:
raise UnificationError(self, other)
def fold(self, accum, fn):
accum = self.ret.fold(accum, fn)
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TRPC({})".format(repr(self.ret))
def __eq__(self, other):
return isinstance(other, TRPC) and \
self.service == other.service and \
self.is_async == other.is_async
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.service)
class TBuiltin(Type):
"""
An instance of builtin type. Every instance of a builtin
type is treated specially according to its name.
"""
def __init__(self, name):
assert isinstance(name, str)
self.name = name
self.attributes = OrderedDict()
def find(self):
return self
def unify(self, other):
if other is self:
return
if self != other:
raise UnificationError(self, other)
def fold(self, accum, fn):
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.{}({})".format(type(self).__name__, repr(self.name))
def __eq__(self, other):
return isinstance(other, TBuiltin) and \
self.name == other.name
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.name)
class TBuiltinFunction(TBuiltin):
"""
A type of a builtin function.
Builtin functions are treated specially throughout all stages of the
compilation process according to their name (e.g. calls may not actually
lower to a function call). See :class:`TExternalFunction` for externally
defined functions that are otherwise regular.
"""
class TConstructor(TBuiltin):
"""
A type of a constructor of a class, e.g. ``list``.
Note that this is not the same as the type of an instance of
the class, which is ``TMono("list", ...)`` (or a descendant).
:ivar instance: (:class:`Type`)
the type of the instance created by this constructor
"""
def __init__(self, instance):
assert isinstance(instance, TMono)
super().__init__(instance.name)
self.instance = instance
class TExceptionConstructor(TConstructor):
"""
A type of a constructor of an exception, e.g. ``Exception``.
Note that this is not the same as the type of an instance of
the class, which is ``TMono("Exception", ...)``.
"""
class TInstance(TMono):
"""
A type of an instance of a user-defined class.
:ivar constructor: (:class:`TConstructor`)
the type of the constructor with which this instance
was created
"""
def __init__(self, name, attributes):
assert isinstance(attributes, OrderedDict)
super().__init__(name)
self.attributes = attributes
self.constant_attributes = set()
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TInstance({}, {})".format(
repr(self.name), repr(self.attributes))
class TModule(TMono):
"""
A type of a module.
"""
def __init__(self, name, attributes):
assert isinstance(attributes, OrderedDict)
super().__init__(name)
self.attributes = attributes
self.constant_attributes = set()
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TModule({}, {})".format(
repr(self.name), repr(self.attributes))
class TMethod(TMono):
"""
A type of a method.
"""
def __init__(self, self_type, function_type):
super().__init__("method", {"self": self_type, "fn": function_type})
self.attributes = OrderedDict([
("__func__", function_type),
("__self__", self_type),
])
class TValue(Type):
"""
A type-level value (such as the integer denoting width of
a generic integer type.
"""
def __init__(self, value):
self.value = value
def find(self):
return self
def unify(self, other):
if other is self:
return
if isinstance(other, TVar):
other.unify(self)
elif self != other:
raise UnificationError(self, other)
def fold(self, accum, fn):
return fn(accum, self)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
return "artiq.compiler.types.TValue(%s)" % repr(self.value)
def __eq__(self, other):
return isinstance(other, TValue) and \
self.value == other.value
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.value)
class TDelay(Type):
"""
The type-level representation of IO delay.
"""
def __init__(self, duration, cause):
# Avoid pulling in too many dependencies with `artiq.language`.
from pythonparser import diagnostic
assert duration is None or isinstance(duration, iodelay.Expr)
assert cause is None or isinstance(cause, diagnostic.Diagnostic)
assert (not (duration and cause)) and (duration or cause)
self.duration, self.cause = duration, cause
def is_fixed(self):
return self.duration is not None
def is_indeterminate(self):
return self.cause is not None
def find(self):
return self
def unify(self, other):
other = other.find()
if isinstance(other, TVar):
other.unify(self)
elif self.is_fixed() and other.is_fixed() and \
self.duration.fold() == other.duration.fold():
pass
elif self is not other:
raise UnificationError(self, other)
def fold(self, accum, fn):
# delay types do not participate in folding
pass
def __eq__(self, other):
return isinstance(other, TDelay) and \
(self.duration == other.duration and \
self.cause == other.cause)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
if getattr(builtins, "__in_sphinx__", False):
return str(self)
if self.duration is None:
return "<{}.TIndeterminateDelay>".format(__name__)
elif self.cause is None:
return "{}.TFixedDelay({})".format(__name__, self.duration)
else:
assert False
def TIndeterminateDelay(cause):
return TDelay(None, cause)
def TFixedDelay(duration):
return TDelay(duration, None)
def is_var(typ):
return isinstance(typ.find(), TVar)
def is_mono(typ, name=None, **params):
typ = typ.find()
if not isinstance(typ, TMono):
return False
if name is not None and typ.name != name:
return False
for param in params:
if param not in typ.params:
return False
if typ.params[param].find() != params[param].find():
return False
return True
def is_polymorphic(typ):
return typ.fold(False, lambda accum, typ: accum or is_var(typ))
def is_tuple(typ, elts=None):
typ = typ.find()
if elts:
return isinstance(typ, TTuple) and \
elts == typ.elts
else:
return isinstance(typ, TTuple)
def _is_pointer(typ):
return isinstance(typ.find(), _TPointer)
def is_function(typ):
return isinstance(typ.find(), TFunction)
def is_rpc(typ):
return isinstance(typ.find(), TRPC)
def is_external_function(typ, name=None):
typ = typ.find()
if name is None:
return isinstance(typ, TExternalFunction)
else:
return isinstance(typ, TExternalFunction) and \
typ.name == name
def is_builtin(typ, name=None):
typ = typ.find()
if name is None:
return isinstance(typ, TBuiltin)
else:
return isinstance(typ, TBuiltin) and \
typ.name == name
def is_builtin_function(typ, name=None):
typ = typ.find()
if name is None:
return isinstance(typ, TBuiltinFunction)
else:
return isinstance(typ, TBuiltinFunction) and \
typ.name == name
def is_broadcast_across_arrays(typ):
# For now, broadcasting is only exposed to predefined external functions, and
# statically selected. Might be extended to user-defined functions if the design
# pans out.
typ = typ.find()
if not isinstance(typ, TExternalFunction):
return False
return typ.broadcast_across_arrays
def is_constructor(typ, name=None):
typ = typ.find()
if name is not None:
return isinstance(typ, TConstructor) and \
typ.name == name
else:
return isinstance(typ, TConstructor)
def is_exn_constructor(typ, name=None):
typ = typ.find()
if name is not None:
return isinstance(typ, TExceptionConstructor) and \
typ.name == name
else:
return isinstance(typ, TExceptionConstructor)
def is_instance(typ, name=None):
typ = typ.find()
if name is not None:
return isinstance(typ, TInstance) and \
typ.name == name
else:
return isinstance(typ, TInstance)
def is_module(typ, name=None):
typ = typ.find()
if name is not None:
return isinstance(typ, TModule) and \
typ.name == name
else:
return isinstance(typ, TModule)
def is_method(typ):
return isinstance(typ.find(), TMethod)
def get_method_self(typ):
if is_method(typ):
return typ.find().params["self"].find()
def get_method_function(typ):
if is_method(typ):
return typ.find().params["fn"].find()
def is_value(typ):
return isinstance(typ.find(), TValue)
def get_value(typ):
typ = typ.find()
if isinstance(typ, TVar):
return None
elif isinstance(typ, TValue):
return typ.value
else:
assert False
def is_delay(typ):
return isinstance(typ.find(), TDelay)
def is_fixed_delay(typ):
return is_delay(typ) and typ.find().is_fixed()
def is_indeterminate_delay(typ):
return is_delay(typ) and typ.find().is_indeterminate()
class TypePrinter(object):
"""
A class that prints types using Python-like syntax and gives
type variables sequential alphabetic names.
"""
custom_printers = {}
def __init__(self):
self.gen = genalnum()
self.map = {}
self.recurse_guard = set()
def name(self, typ, depth=0, max_depth=1):
typ = typ.find()
if isinstance(typ, TVar):
if typ not in self.map:
self.map[typ] = "'%s" % next(self.gen)
return self.map[typ]
elif isinstance(typ, TInstance):
if typ in self.recurse_guard or depth >= max_depth:
return "<instance {}>".format(typ.name)
elif len(typ.attributes) > 0:
self.recurse_guard.add(typ)
attrs = ",\n\t\t".join(["{}: {}".format(attr, self.name(typ.attributes[attr],
depth + 1))
for attr in typ.attributes])
return "<instance {} {{\n\t\t{}\n\t}}>".format(typ.name, attrs)
else:
self.recurse_guard.add(typ)
return "<instance {} {{}}>".format(typ.name)
elif isinstance(typ, TMono):
if typ.name in self.custom_printers:
return self.custom_printers[typ.name](typ, self, depth + 1, max_depth)
elif typ.params == {}:
return typ.name
else:
return "%s(%s)" % (typ.name, ", ".join(
["%s=%s" % (k, self.name(typ.params[k], depth + 1)) for k in typ.params]))
elif isinstance(typ, _TPointer):
return "{}*".format(self.name(typ["elt"], depth + 1))
elif isinstance(typ, TTuple):
if len(typ.elts) == 1:
return "(%s,)" % self.name(typ.elts[0], depth + 1)
else:
return "(%s)" % ", ".join([self.name(typ, depth + 1) for typ in typ.elts])
elif isinstance(typ, (TFunction, TExternalFunction)):
args = []
args += [ "%s:%s" % (arg, self.name(typ.args[arg], depth + 1))
for arg in typ.args]
args += ["?%s:%s" % (arg, self.name(typ.optargs[arg], depth + 1))
for arg in typ.optargs]
signature = "(%s)->%s" % (", ".join(args), self.name(typ.ret, depth + 1))
delay = typ.delay.find()
if isinstance(delay, TVar):
signature += " delay({})".format(self.name(delay, depth + 1))
elif not (delay.is_fixed() and iodelay.is_zero(delay.duration)):
signature += " " + self.name(delay, depth + 1)
if isinstance(typ, TExternalFunction):
return "[ffi {}]{}".format(repr(typ.name), signature)
elif isinstance(typ, TFunction):
return signature
elif isinstance(typ, TRPC):
return "[rpc{} #{}](...)->{}".format(typ.service,
" async" if typ.is_async else "",
self.name(typ.ret, depth + 1))
elif isinstance(typ, TBuiltinFunction):
return "<function {}>".format(typ.name)
elif isinstance(typ, (TConstructor, TExceptionConstructor)):
if typ in self.recurse_guard or depth >= max_depth:
return "<constructor {}>".format(typ.name)
elif len(typ.attributes) > 0:
self.recurse_guard.add(typ)
attrs = ", ".join(["{}: {}".format(attr, self.name(typ.attributes[attr],
depth + 1))
for attr in typ.attributes])
return "<constructor {} {{{}}}>".format(typ.name, attrs)
else:
self.recurse_guard.add(typ)
return "<constructor {} {{}}>".format(typ.name)
elif isinstance(typ, TBuiltin):
return "<builtin {}>".format(typ.name)
elif isinstance(typ, TValue):
return repr(typ.value)
elif isinstance(typ, TDelay):
if typ.is_fixed():
return "delay({} mu)".format(typ.duration)
elif typ.is_indeterminate():
return "delay(?)"
else:
assert False
else:
assert False

View File

@ -1,4 +0,0 @@
from .monomorphism import MonomorphismValidator
from .escape import EscapeValidator
from .local_access import LocalAccessValidator
from .constness import ConstnessValidator

View File

@ -1,58 +0,0 @@
"""
:class:`ConstnessValidator` checks that no attribute marked
as constant is ever set.
"""
from pythonparser import algorithm, diagnostic
from .. import types, builtins
class ConstnessValidator(algorithm.Visitor):
def __init__(self, engine):
self.engine = engine
self.in_assign = False
def visit_Assign(self, node):
self.visit(node.value)
self.in_assign = True
self.visit(node.targets)
self.in_assign = False
def visit_AugAssign(self, node):
self.visit(node.value)
self.in_assign = True
self.visit(node.target)
self.in_assign = False
def visit_SubscriptT(self, node):
old_in_assign, self.in_assign = self.in_assign, False
self.visit(node.value)
self.visit(node.slice)
self.in_assign = old_in_assign
if self.in_assign and builtins.is_bytes(node.value.type):
diag = diagnostic.Diagnostic("error",
"type {typ} is not mutable",
{"typ": "bytes"},
node.loc)
self.engine.process(diag)
def visit_AttributeT(self, node):
old_in_assign, self.in_assign = self.in_assign, False
self.visit(node.value)
self.in_assign = old_in_assign
if self.in_assign:
typ = node.value.type.find()
if types.is_instance(typ) and node.attr in typ.constant_attributes:
diag = diagnostic.Diagnostic("error",
"cannot assign to constant attribute '{attr}' of class '{class}'",
{"attr": node.attr, "class": typ.name},
node.loc)
self.engine.process(diag)
return
if builtins.is_array(typ):
diag = diagnostic.Diagnostic("error",
"array attributes cannot be assigned to",
{}, node.loc)
self.engine.process(diag)
return

View File

@ -1,360 +0,0 @@
"""
:class:`EscapeValidator` verifies that no mutable data escapes
the region of its allocation.
"""
import functools
from pythonparser import algorithm, diagnostic
from .. import asttyped, types, builtins
def has_region(typ):
return typ.fold(False, lambda accum, typ: accum or builtins.is_allocated(typ))
class Global:
def __repr__(self):
return "Global()"
class Argument:
def __init__(self, loc):
self.loc = loc
def __repr__(self):
return "Argument()"
class Region:
"""
A last-in-first-out allocation region. Tied to lexical scoping
and is internally represented simply by a source range.
:ivar range: (:class:`pythonparser.source.Range` or None)
"""
def __init__(self, source_range=None):
self.range = source_range
def present(self):
return bool(self.range)
def includes(self, other):
assert self.range
assert self.range.source_buffer == other.range.source_buffer
return self.range.begin_pos <= other.range.begin_pos and \
self.range.end_pos >= other.range.end_pos
def intersects(self, other):
assert self.range
assert self.range.source_buffer == other.range.source_buffer
return (self.range.begin_pos <= other.range.begin_pos <= self.range.end_pos and \
other.range.end_pos > self.range.end_pos) or \
(other.range.begin_pos <= self.range.begin_pos <= other.range.end_pos and \
self.range.end_pos > other.range.end_pos)
def outlives(lhs, rhs):
if not isinstance(lhs, Region): # lhs lives nonlexically
return True
elif not isinstance(rhs, Region): # rhs lives nonlexically, lhs does not
return False
else:
assert not lhs.intersects(rhs)
return lhs.includes(rhs)
def __repr__(self):
return "Region({})".format(repr(self.range))
class RegionOf(algorithm.Visitor):
"""
Visit an expression and return the region that must be alive for the
expression to execute.
For expressions involving multiple regions, the shortest-lived one is
returned.
"""
def __init__(self, env_stack, youngest_region):
self.env_stack, self.youngest_region = env_stack, youngest_region
# Liveness determined by assignments
def visit_NameT(self, node):
# First, look at stack regions
for region in reversed(self.env_stack[1:]):
if node.id in region:
return region[node.id]
# Then, look at the global region of this module
if node.id in self.env_stack[0]:
return Global()
assert False
# Value lives as long as the current scope, if it's mutable,
# or else forever
def visit_sometimes_allocating(self, node):
if has_region(node.type):
return self.youngest_region
else:
return Global()
visit_BinOpT = visit_sometimes_allocating
def visit_CallT(self, node):
if types.is_external_function(node.func.type, "cache_get"):
# The cache is borrow checked dynamically
return Global()
else:
self.visit_sometimes_allocating(node)
# Value lives as long as the object/container, if it's mutable,
# or else forever
def visit_accessor(self, node):
if has_region(node.type):
return self.visit(node.value)
else:
return Global()
visit_AttributeT = visit_accessor
visit_SubscriptT = visit_accessor
# Value lives as long as the shortest living operand
def visit_selecting(self, nodes):
regions = [self.visit(node) for node in nodes]
regions = list(filter(lambda x: x, regions))
if any(regions):
regions.sort(key=functools.cmp_to_key(Region.outlives), reverse=True)
return regions[0]
else:
return Global()
def visit_BoolOpT(self, node):
return self.visit_selecting(node.values)
def visit_IfExpT(self, node):
return self.visit_selecting([node.body, node.orelse])
def visit_TupleT(self, node):
return self.visit_selecting(node.elts)
# Value lives as long as the current scope
def visit_allocating(self, node):
return self.youngest_region
visit_DictT = visit_allocating
visit_DictCompT = visit_allocating
visit_GeneratorExpT = visit_allocating
visit_LambdaT = visit_allocating
visit_ListT = visit_allocating
visit_ListCompT = visit_allocating
visit_SetT = visit_allocating
visit_SetCompT = visit_allocating
# Value lives forever
def visit_immutable(self, node):
assert not has_region(node.type)
return Global()
visit_NameConstantT = visit_immutable
visit_NumT = visit_immutable
visit_EllipsisT = visit_immutable
visit_UnaryOpT = visit_sometimes_allocating # possibly array op
visit_CompareT = visit_immutable
# Value lives forever
def visit_global(self, node):
return Global()
visit_StrT = visit_global
visit_QuoteT = visit_global
# Not implemented
def visit_unimplemented(self, node):
assert False
visit_StarredT = visit_unimplemented
visit_YieldT = visit_unimplemented
visit_YieldFromT = visit_unimplemented
class AssignedNamesOf(algorithm.Visitor):
"""
Visit an expression and return the list of names that appear
on the lhs of assignment, directly or through an accessor.
"""
def visit_name(self, node):
return [node]
visit_NameT = visit_name
visit_QuoteT = visit_name
def visit_accessor(self, node):
return self.visit(node.value)
visit_AttributeT = visit_accessor
visit_SubscriptT = visit_accessor
def visit_sequence(self, node):
return functools.reduce(list.__add__, map(self.visit, node.elts))
visit_TupleT = visit_sequence
visit_ListT = visit_sequence
def visit_StarredT(self, node):
assert False
class EscapeValidator(algorithm.Visitor):
def __init__(self, engine):
self.engine = engine
self.youngest_region = Global()
self.env_stack = []
self.youngest_env = None
def _region_of(self, expr):
return RegionOf(self.env_stack, self.youngest_region).visit(expr)
def _names_of(self, expr):
return AssignedNamesOf().visit(expr)
def _diagnostics_for(self, region, loc, descr="the value of the expression"):
if isinstance(region, Region):
return [
diagnostic.Diagnostic("note",
"{descr} is alive from this point...", {"descr": descr},
region.range.begin()),
diagnostic.Diagnostic("note",
"... to this point", {},
region.range.end())
]
elif isinstance(region, Global):
return [
diagnostic.Diagnostic("note",
"{descr} is alive forever", {"descr": descr},
loc)
]
elif isinstance(region, Argument):
return [
diagnostic.Diagnostic("note",
"{descr} is still alive after this function returns", {"descr": descr},
loc),
diagnostic.Diagnostic("note",
"{descr} is introduced here as a formal argument", {"descr": descr},
region.loc)
]
else:
assert False
def visit_in_region(self, node, region, typing_env, args=[]):
try:
old_youngest_region = self.youngest_region
self.youngest_region = region
old_youngest_env = self.youngest_env
self.youngest_env = {}
for name in typing_env:
if has_region(typing_env[name]):
if name in args:
self.youngest_env[name] = args[name]
else:
self.youngest_env[name] = Region(None) # not yet known
else:
self.youngest_env[name] = Global()
self.env_stack.append(self.youngest_env)
self.generic_visit(node)
finally:
self.env_stack.pop()
self.youngest_env = old_youngest_env
self.youngest_region = old_youngest_region
def visit_ModuleT(self, node):
self.visit_in_region(node, None, node.typing_env)
def visit_FunctionDefT(self, node):
self.youngest_env[node.name] = self.youngest_region
self.visit_in_region(node, Region(node.loc), node.typing_env,
args={ arg.arg: Argument(arg.loc) for arg in node.args.args })
visit_QuotedFunctionDefT = visit_FunctionDefT
def visit_ClassDefT(self, node):
self.youngest_env[node.name] = self.youngest_region
self.visit_in_region(node, Region(node.loc), node.constructor_type.attributes)
# Only three ways for a pointer to escape:
# * Assigning or op-assigning it (we ensure an outlives relationship)
# * Returning it (we only allow returning values that live forever)
# * Raising it (we forbid allocating exceptions that refer to mutable data)¹
#
# Literals doesn't count: a constructed object is always
# outlived by all its constituents.
# Closures don't count: see above.
# Calling functions doesn't count: arguments never outlive
# the function body.
#
# ¹Strings are currently never allocated with a limited lifetime,
# and exceptions can only refer to strings, so we don't actually check
# this property. But we will need to, if string operations are ever added.
def visit_assignment(self, target, value):
value_region = self._region_of(value)
# If we assign to an attribute of a quoted value, there will be no names
# in the assignment lhs.
target_names = self._names_of(target) or []
# Adopt the value region for any variables declared on the lhs.
for name in target_names:
region = self._region_of(name)
if isinstance(region, Region) and not region.present():
# Find the name's environment to overwrite the region.
for env in self.env_stack[::-1]:
if name.id in env:
env[name.id] = value_region
break
# The assigned value should outlive the assignee
target_regions = [self._region_of(name) for name in target_names]
for target_region in target_regions:
if not Region.outlives(value_region, target_region):
diag = diagnostic.Diagnostic("error",
"the assigned value does not outlive the assignment target", {},
value.loc, [target.loc],
notes=self._diagnostics_for(target_region, target.loc,
"the assignment target") +
self._diagnostics_for(value_region, value.loc,
"the assigned value"))
self.engine.process(diag)
def visit_Assign(self, node):
for target in node.targets:
self.visit_assignment(target, node.value)
def visit_AugAssign(self, node):
if builtins.is_list(node.target.type):
note = diagnostic.Diagnostic("note",
"try using `{lhs} = {lhs} {op} {rhs}` instead",
{"lhs": node.target.loc.source(),
"rhs": node.value.loc.source(),
"op": node.op.loc.source()[:-1]},
node.loc)
diag = diagnostic.Diagnostic("error",
"lists cannot be mutated in-place", {},
node.op.loc, [node.target.loc],
notes=[note])
self.engine.process(diag)
self.visit_assignment(node.target, node.value)
def visit_Return(self, node):
region = self._region_of(node.value)
if isinstance(region, Region):
note = diagnostic.Diagnostic("note",
"this expression has type {type}",
{"type": types.TypePrinter().name(node.value.type)},
node.value.loc)
diag = diagnostic.Diagnostic("error",
"cannot return an allocated value that does not live forever", {},
node.value.loc, notes=self._diagnostics_for(region, node.value.loc) + [note])
self.engine.process(diag)

View File

@ -1,187 +0,0 @@
"""
:class:`LocalAccessValidator` verifies that local variables
are not accessed before being used.
"""
from functools import reduce
from pythonparser import diagnostic
from .. import ir, analyses
def is_special_variable(name):
return "$" in name
class LocalAccessValidator:
def __init__(self, engine):
self.engine = engine
def process(self, functions):
for func in functions:
self.process_function(func)
def process_function(self, func):
# Find all environments and closures allocated in this func.
environments, closures = [], []
for insn in func.instructions():
if isinstance(insn, ir.Alloc) and ir.is_environment(insn.type):
environments.append(insn)
elif isinstance(insn, ir.Closure):
closures.append(insn)
# Compute initial state of interesting environments.
# Environments consisting only of internal variables (containing a ".")
# are ignored.
initial_state = {}
for env in environments:
env_state = {var: False for var in env.type.params if "." not in var}
if any(env_state):
initial_state[env] = env_state
# Traverse the acyclic graph made of basic blocks and forward edges only,
# while updating the environment state.
domtree = analyses.DominatorTree(func)
state = {}
def traverse(block):
# Have we computed the state of this block already?
if block in state:
return state[block]
# No! Which forward edges lead to this block?
# If we dominate a predecessor, it's a back edge instead.
forward_edge_preds = [pred for pred in block.predecessors()
if block not in domtree.dominators(pred)]
# Figure out what the state is before the leader
# instruction of this block.
pred_states = [traverse(pred) for pred in forward_edge_preds]
block_state = {}
if len(pred_states) > 1:
for env in initial_state:
# The variable has to be initialized in all predecessors
# in order to be initialized in this block.
def merge_state(a, b):
return {var: a[var] and b[var] for var in a}
block_state[env] = reduce(merge_state,
[state[env] for state in pred_states])
elif len(pred_states) == 1:
# The state is the same as at the terminator of predecessor.
# We'll mutate it, so copy.
pred_state = pred_states[0]
for env in initial_state:
env_state = pred_state[env]
block_state[env] = {var: env_state[var] for var in env_state}
else:
# This is the entry block.
for env in initial_state:
env_state = initial_state[env]
block_state[env] = {var: env_state[var] for var in env_state}
# Update the state based on block contents, while validating
# that no access to uninitialized variables will be done.
for insn in block.instructions:
def pred_at_fault(env, var_name):
# Find out where the uninitialized state comes from.
for pred, pred_state in zip(forward_edge_preds, pred_states):
if not pred_state[env][var_name]:
return pred
# It's the entry block and it was never initialized.
return None
set_local_in_this_frame = False
if (isinstance(insn, (ir.SetLocal, ir.GetLocal)) and
not is_special_variable(insn.var_name)):
env, var_name = insn.environment(), insn.var_name
# Make sure that the variable is defined in the scope of this function.
if env in block_state and var_name in block_state[env]:
if isinstance(insn, ir.SetLocal):
# We've just initialized it.
block_state[env][var_name] = True
set_local_in_this_frame = True
else: # isinstance(insn, ir.GetLocal)
if not block_state[env][var_name]:
# Oops, accessing it uninitialized.
self._uninitialized_access(insn, var_name,
pred_at_fault(env, var_name))
closures_to_check = []
if (isinstance(insn, (ir.SetLocal, ir.SetAttr, ir.SetElem)) and
not set_local_in_this_frame):
# Closures may escape via these mechanisms and be invoked elsewhere.
if isinstance(insn.value(), ir.Closure):
closures_to_check.append(insn.value())
if isinstance(insn, (ir.Call, ir.Invoke)):
# We can't always trace the flow of closures from point of
# definition to point of call; however, we know that, by transitiveness
# of this analysis, only closures defined in this function can contain
# uninitialized variables.
#
# Thus, enumerate the closures, and check all of them during any operation
# that may eventually result in the closure being called.
closures_to_check = closures
for closure in closures_to_check:
env = closure.environment()
# Make sure this environment has any interesting variables.
if env in block_state:
for var_name in block_state[env]:
if not block_state[env][var_name] and not is_special_variable(var_name):
# A closure would capture this variable while it is not always
# initialized. Note that this check is transitive.
self._uninitialized_access(closure, var_name,
pred_at_fault(env, var_name))
# Save the state.
state[block] = block_state
return block_state
for block in func.basic_blocks:
traverse(block)
def _uninitialized_access(self, insn, var_name, pred_at_fault):
if pred_at_fault is not None:
visited = set()
possible_preds = [pred_at_fault]
uninitialized_loc = None
while uninitialized_loc is None:
possible_pred = possible_preds.pop(0)
visited.add(possible_pred)
for pred_insn in reversed(possible_pred.instructions):
if pred_insn.loc is not None:
uninitialized_loc = pred_insn.loc.begin()
break
for block in possible_pred.predecessors():
if block not in visited:
possible_preds.append(block)
assert uninitialized_loc is not None
note = diagnostic.Diagnostic("note",
"variable is not initialized when control flows from this point", {},
uninitialized_loc)
else:
note = None
if note is not None:
notes = [note]
else:
notes = []
if isinstance(insn, ir.Closure):
diag = diagnostic.Diagnostic("error",
"variable '{name}' can be captured in a closure uninitialized here",
{"name": var_name},
insn.loc, notes=notes)
else:
diag = diagnostic.Diagnostic("error",
"variable '{name}' is not always initialized here",
{"name": var_name},
insn.loc, notes=notes)
self.engine.process(diag)

View File

@ -1,41 +0,0 @@
"""
:class:`MonomorphismValidator` verifies that all type variables have been
elided, which is necessary for code generation.
"""
from pythonparser import algorithm, diagnostic
from .. import asttyped, types, builtins
class MonomorphismValidator(algorithm.Visitor):
def __init__(self, engine):
self.engine = engine
def visit_FunctionDefT(self, node):
super().generic_visit(node)
return_type = node.signature_type.find().ret
if types.is_polymorphic(return_type):
note = diagnostic.Diagnostic("note",
"the function has return type {type}",
{"type": types.TypePrinter().name(return_type)},
node.name_loc)
diag = diagnostic.Diagnostic("error",
"the return type of this function cannot be fully inferred", {},
node.name_loc, notes=[note])
self.engine.process(diag)
visit_QuotedFunctionDefT = visit_FunctionDefT
def generic_visit(self, node):
super().generic_visit(node)
if isinstance(node, asttyped.commontyped):
if types.is_polymorphic(node.type):
note = diagnostic.Diagnostic("note",
"the expression has type {type}",
{"type": types.TypePrinter().name(node.type)},
node.loc)
diag = diagnostic.Diagnostic("error",
"the type of this expression cannot be fully inferred", {},
node.loc, notes=[note])
self.engine.process(diag)

View File

@ -1,8 +1,6 @@
import os, sys import os, sys
import numpy import numpy
from pythonparser import diagnostic
from artiq import __artiq_dir__ as artiq_dir from artiq import __artiq_dir__ as artiq_dir
from artiq.language.core import * from artiq.language.core import *

View File

@ -10,8 +10,6 @@ from collections import defaultdict
import h5py import h5py
from llvmlite import binding as llvm
from sipyco import common_args from sipyco import common_args
from artiq import __version__ as artiq_version from artiq import __version__ as artiq_version
@ -70,22 +68,6 @@ class ELFRunner(FileRunner):
return f.read() return f.read()
class LLVMIRRunner(FileRunner):
def compile(self):
with open(self.file, "r") as f:
llmodule = llvm.parse_assembly(f.read())
llmodule.verify()
return self.target.link([self.target.assemble(llmodule)])
class LLVMBitcodeRunner(FileRunner):
def compile(self):
with open(self.file, "rb") as f:
llmodule = llvm.parse_bitcode(f.read())
llmodule.verify()
return self.target.link([self.target.assemble(llmodule)])
class DummyScheduler: class DummyScheduler:
def __init__(self): def __init__(self):
self.rid = 0 self.rid = 0
@ -157,9 +139,7 @@ def _build_experiment(device_mgr, dataset_mgr, args):
managers = (device_mgr, dataset_mgr, argument_mgr, {}) managers = (device_mgr, dataset_mgr, argument_mgr, {})
if hasattr(args, "file"): if hasattr(args, "file"):
is_elf = args.file.endswith(".elf") is_elf = args.file.endswith(".elf")
is_ll = args.file.endswith(".ll") if is_elf:
is_bc = args.file.endswith(".bc")
if is_elf or is_ll or is_bc:
if args.arguments: if args.arguments:
raise ValueError("arguments not supported for precompiled kernels") raise ValueError("arguments not supported for precompiled kernels")
if args.class_name: if args.class_name:
@ -167,10 +147,6 @@ def _build_experiment(device_mgr, dataset_mgr, args):
"for precompiled kernels") "for precompiled kernels")
if is_elf: if is_elf:
return ELFRunner(managers, file=args.file) return ELFRunner(managers, file=args.file)
elif is_ll:
return LLVMIRRunner(managers, file=args.file)
elif is_bc:
return LLVMBitcodeRunner(managers, file=args.file)
else: else:
import_cache.install_hook() import_cache.install_hook()
module = file_import(args.file, prefix="artiq_run_") module = file_import(args.file, prefix="artiq_run_")

View File

@ -1,78 +0,0 @@
#![feature(libc, panic_unwind, unwind_attributes, rustc_private, int_bits_const)]
#![crate_name = "artiq_support"]
#![crate_type = "cdylib"]
extern crate std as core;
extern crate libc;
extern crate unwind;
// Note: this does *not* match the cslice crate!
// ARTIQ Python has the slice length field fixed at 32 bits, even on 64-bit platforms.
mod cslice {
use core::marker::PhantomData;
use core::convert::AsRef;
use core::slice;
#[repr(C)]
#[derive(Clone, Copy)]
pub struct CSlice<'a, T> {
base: *const T,
len: u32,
phantom: PhantomData<&'a ()>
}
impl<'a, T> CSlice<'a, T> {
pub fn len(&self) -> usize {
self.len as usize
}
}
impl<'a, T> AsRef<[T]> for CSlice<'a, T> {
fn as_ref(&self) -> &[T] {
unsafe {
slice::from_raw_parts(self.base, self.len as usize)
}
}
}
pub trait AsCSlice<'a, T> {
fn as_c_slice(&'a self) -> CSlice<'a, T>;
}
impl<'a> AsCSlice<'a, u8> for str {
fn as_c_slice(&'a self) -> CSlice<'a, u8> {
CSlice {
base: self.as_ptr(),
len: self.len() as u32,
phantom: PhantomData
}
}
}
}
#[path = "."]
pub mod eh {
#[path = "../../firmware/libeh/dwarf.rs"]
pub mod dwarf;
}
#[path = "../../firmware/ksupport/eh_artiq.rs"]
pub mod eh_artiq;
use std::{str, process};
fn terminate(exception: &eh_artiq::Exception, mut _backtrace: &mut [usize]) -> ! {
println!("Uncaught {}: {} ({}, {}, {})",
str::from_utf8(exception.name.as_ref()).unwrap(),
str::from_utf8(exception.message.as_ref()).unwrap(),
exception.param[0],
exception.param[1],
exception.param[2]);
println!("at {}:{}:{}",
str::from_utf8(exception.file.as_ref()).unwrap(),
exception.line,
exception.column);
process::exit(1);
}
#[export_name = "now"]
pub static mut NOW: i64 = 0;

View File

@ -1,6 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.llvmgen %s
def f():
pass
def g():
a = f()

View File

@ -1,13 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
class Foo:
def __init__(self):
pass
a = Foo()
b = Foo()
# CHECK-L: ${LINE:+1}: error: Custom object comparison is not supported
a > b

View File

@ -1,13 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
class Foo:
def __init__(self):
pass
a = Foo()
b = Foo()
# CHECK-L: ${LINE:+1}: error: Custom object inclusion test is not supported
a in b

View File

@ -1,9 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
# CHECK-L: ${LINE:+1}: error: this function must return a value of type numpy.int32 explicitly
def foo(x):
if x:
return 1
foo(True)

View File

@ -1,11 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.llvmgen %s
def make_none():
return None
def take_arg(arg):
pass
def run():
retval = make_none()
take_arg(retval)

View File

@ -1,4 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.llvmgen %s
def f():
return float(1.0)

View File

@ -1,9 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.signature %s
def foo(x):
if x:
return 1
else:
return 2
foo(True)

View File

@ -1,5 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.signature +diag %s >%t
# RUN: OutputCheck %s --file-to-check=%t
# CHECK-L: ${LINE:+1}: warning: this expression, which is always truthful, is coerced to bool
bool(IndexError())

View File

@ -1,8 +0,0 @@
device_db = {
"core": {
"type": "local",
"module": "artiq.coredevice.core",
"class": "Core",
"arguments": {"host": None, "ref_period": 1e-9}
}
}

View File

@ -1,25 +0,0 @@
# RUN: env ARTIQ_DUMP_IR=%t ARTIQ_IR_NO_LOC=1 %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t.txt
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: %LOC.self.FLD.foo = numpy.int32 getattr('foo') <instance testbench.c> %ARG.self
# CHECK-L: for.head:
class c:
kernel_invariants = {"foo"}
def __init__(self):
self.foo = 1
@kernel
def run(self):
for _ in range(10):
core_log(1.0 * self.foo)
i = c()
@kernel
def entrypoint():
i.run()

View File

@ -1,8 +0,0 @@
device_db = {
"core": {
"type": "local",
"module": "artiq.coredevice.core",
"class": "Core",
"arguments": {"host": None, "ref_period": 1e-9}
}
}

View File

@ -1,23 +0,0 @@
# RUN: env ARTIQ_DUMP_IR=%t %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t.txt
# XFAIL: *
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: call ()->NoneType %local.testbench.entrypoint ; calls testbench.entrypoint
@kernel
def baz():
pass
class foo:
@kernel
def bar(self):
# CHECK-L: call ()->NoneType %local.testbench.baz ; calls testbench.baz
baz()
x = foo()
@kernel
def entrypoint():
x.bar()

View File

@ -1,17 +0,0 @@
# RUN: env ARTIQ_DUMP_IR=%t %python -m artiq.compiler.testbench.embedding +compile %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t.txt
# XFAIL: *
from artiq.language.core import *
from artiq.language.types import *
class foo:
@kernel
def bar(self):
pass
x = foo()
@kernel
def entrypoint():
# CHECK-L: ; calls testbench.foo.bar
x.bar()

View File

@ -1,22 +0,0 @@
# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t.ll
from artiq.language.core import *
from artiq.language.types import *
# CHECK: i64 @_Z13testbench.foozz\(i64 %ARG.x, \{ i1, i64 \} %ARG.y\)
@kernel
def foo(x: TInt64, y: TInt64 = 1) -> TInt64:
print(x+y)
return x+y
@kernel
def bar(x: TInt64) -> None:
print(x)
@kernel
def entrypoint():
print(foo(0, 2))
print(foo(1, 3))
bar(3)

View File

@ -1,26 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
import numpy as np
@kernel
def entrypoint():
# Just make sure everything compiles.
# LLVM intrinsic:
a = np.array([1.0, 2.0, 3.0])
b = np.sin(a)
assert b.shape == a.shape
# libm:
c = np.array([1.0, 2.0, 3.0])
d = np.arctan(c)
assert d.shape == c.shape
# libm, binary:
e = np.array([1.0, 2.0, 3.0])
f = np.array([4.0, 5.0, 6.0])
g = np.arctan2(e, f)
# g = np.arctan2(e, 0.0)
# g = np.arctan2(0.0, f)
assert g.shape == e.shape

View File

@ -1,22 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
import numpy as np
@kernel
def entrypoint():
# FIXME: This needs to be a runtime test (but numpy.* integration is
# currently embedding-only).
a = np.array([1, 2, 3])
b = np.transpose(a)
assert a.shape == b.shape
for i in range(len(a)):
assert a[i] == b[i]
c = np.array([[1, 2, 3], [4, 5, 6]])
d = np.transpose(c)
assert c.shape == d.shape
for i in range(2):
for j in range(3):
assert c[i][j] == d[j][i]

View File

@ -1,36 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
from numpy import array
int_vec = array([1, 2, 3])
float_vec = array([1.0, 2.0, 3.0])
int_mat = array([[1, 2], [3, 4]])
float_mat = array([[1.0, 2.0], [3.0, 4.0]])
@kernel
def entrypoint():
# TODO: These need to be runtime tests!
assert int_vec.shape == (3, )
assert int_vec[0] == 1
assert int_vec[1] == 2
assert int_vec[2] == 3
assert float_vec.shape == (3, )
assert float_vec[0] == 1.0
assert float_vec[1] == 2.0
assert float_vec[2] == 3.0
assert int_mat.shape == (2, 2)
assert int_mat[0][0] == 1
assert int_mat[0][1] == 2
assert int_mat[1][0] == 3
assert int_mat[1][1] == 4
assert float_mat.shape == (2, 2)
assert float_mat[0][0] == 1.0
assert float_mat[0][1] == 2.0
assert float_mat[1][0] == 3.0
assert float_mat[1][1] == 4.0

View File

@ -1,15 +0,0 @@
# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t.ll
from artiq.language.core import *
from artiq.language.types import *
# CHECK: call void @rpc_send_async
@rpc(flags={"async"})
def foo():
pass
@kernel
def entrypoint():
foo()

View File

@ -1,20 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.experiment import *
class MyClass:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
sl = [MyClass(x=1), MyClass(x=2)]
@kernel
def bug(l):
for c in l:
print(c.x)
@kernel
def entrypoint():
bug(sl)

View File

@ -1,20 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
class C:
@kernel
def f(self):
pass
class D(C):
@kernel
def f(self):
# super().f() # super() not bound
C.f(self) # KeyError in compile
di = D()
@kernel
def entrypoint():
di.f()

View File

@ -1,8 +0,0 @@
device_db = {
"core": {
"type": "local",
"module": "artiq.coredevice.core",
"class": "Core",
"arguments": {"host": None, "ref_period": 1e-9}
}
}

View File

@ -1,16 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
pass
@kernel
def entrypoint():
# CHECK-L: <synthesized>:1: error: host object does not have an attribute 'x'
# CHECK-L: ${LINE:+1}: note: expanded from here
a = c
# CHECK-L: ${LINE:+1}: note: attribute accessed here
a.x

View File

@ -1,16 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
xx = 1
@kernel
def entrypoint():
# CHECK-L: <synthesized>:1: error: host object does not have an attribute 'x'; did you mean 'xx'?
# CHECK-L: ${LINE:+1}: note: expanded from here
a = c
# CHECK-L: ${LINE:+1}: note: attribute accessed here
a.x

View File

@ -1,21 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
pass
i1 = c()
i1.x = 1
i2 = c()
i2.x = 1.0
@kernel
def entrypoint():
# CHECK-L: <synthesized>:1: error: host object has an attribute 'x' of type float, which is different from previously inferred type numpy.int32 for the same attribute
i1.x
# CHECK-L: ${LINE:+1}: note: expanded from here
i2.x

View File

@ -1,18 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
kernel_invariants = {"a"}
def __init__(self):
self.a = 1
i = c()
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: error: cannot assign to constant attribute 'a' of class 'testbench.c'
i.a = 1

View File

@ -1,17 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
x = [1, "x"]
@kernel
def entrypoint():
# CHECK-L: <synthesized>:1: error: cannot unify numpy.int? with str
# CHECK-NEXT-L: [1, 'x']
# CHECK-L: ${LINE:+1}: note: expanded from here
a = c
# CHECK-L: ${LINE:+1}: note: while inferring a type for an attribute 'x' of a host object
a.x

View File

@ -1,19 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class foo:
# CHECK-L: ${LINE:+2}: fatal: this function cannot be called as an RPC
@host_only
def pause(self):
pass
x = foo()
@kernel
def entrypoint():
# CHECK-L: ${LINE:+2}: note: in function called remotely here
# CHECK-L: ${LINE:+1}: note: while inferring a type for an attribute 'pause' of a host object
x.pause()

View File

@ -1,19 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
class c:
pass
@kernel
def f():
pass
c.f = f
x = c().f
@kernel
def entrypoint():
# CHECK-L: <synthesized>:1: error: function 'f()->NoneType delay('a)' of class 'testbench.c' cannot accept a self argument
# CHECK-L: ${LINE:+1}: note: expanded from here
x

View File

@ -1,24 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
@kernel
def f(self):
core_log(self.x)
class c:
a = f
x = 1
class d:
b = f
x = 2
xa = c().a
xb = d().b
@kernel
def entrypoint():
xa()
# CHECK-L: <synthesized>:1: error: cannot unify <instance testbench.d> with <instance testbench.c
# CHECK-L: ${LINE:+1}: note: expanded from here
xb()

View File

@ -1,13 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
def foo():
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: fatal: name 'fo0' is not bound to anything; did you mean 'foo'?
fo0()

View File

@ -1,14 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+1}: error: type annotation for return type, '1', is not an ARTIQ type
def foo() -> 1:
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: note: in function called remotely here
foo()

View File

@ -1,15 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: fatal: functions that return a value cannot be defined as async RPCs
@rpc(flags={"async"})
def foo() -> TInt32:
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: note: function called here
foo()

View File

@ -1,19 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.experiment import *
class c():
# CHECK-L: ${LINE:+2}: error: type annotation for argument 'x', '<class 'float'>', is not an ARTIQ type
@kernel
def hello(self, x: float):
pass
@kernel
def run(self):
self.hello(2)
i = c()
@kernel
def entrypoint():
i.run()

View File

@ -1,15 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: error: type annotation for argument 'x', '1', is not an ARTIQ type
@syscall
def foo(x: 1) -> TNone:
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: note: in system call here
foo()

View File

@ -1,15 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: error: type annotation for return type, '1', is not an ARTIQ type
@syscall
def foo() -> 1:
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: note: in system call here
foo()

View File

@ -1,15 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: error: system call argument 'x' must have a type annotation
@syscall
def foo(x) -> TNone:
pass
@kernel
def entrypoint():
# CHECK-L: ${LINE:+1}: note: in system call here
foo()

View File

@ -1,14 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: error: system call argument 'x' must not have a default value
@syscall
def foo(x=1) -> TNone:
pass
@kernel
def entrypoint():
foo()

View File

@ -1,14 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding +diag %s 2>%t
# RUN: OutputCheck %s --file-to-check=%t
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: ${LINE:+2}: error: system call must have a return type annotation
@syscall
def foo():
pass
@kernel
def entrypoint():
foo()

View File

@ -1,18 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
def make_incrementer(increment):
return kernel_from_string(["a"], "return a + {}".format(increment),
portable)
foo = make_incrementer(1)
bar = make_incrementer(2)
@kernel
def entrypoint():
assert foo(4) == 5
assert bar(4) == 6

View File

@ -1,12 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
from artiq.coredevice.exceptions import RTIOUnderflow
@kernel
def entrypoint():
try:
pass
except RTIOUnderflow:
pass

View File

@ -1,20 +0,0 @@
# RUN: env ARTIQ_DUMP_UNOPT_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t_unopt.ll
from artiq.language.core import *
from artiq.language.types import *
# CHECK-L: fmul fast double 1.000000e+00, 0.000000e+00
@kernel(flags=["fast-math"])
def foo():
core_log(1.0 * 0.0)
# CHECK-L: fmul fast double 2.000000e+00, 0.000000e+00
@portable(flags=["fast-math"])
def bar():
core_log(2.0 * 0.0)
@kernel
def entrypoint():
foo()
bar()

View File

@ -1,15 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
@kernel
def a():
pass
fns = [a, a]
@kernel
def entrypoint():
fns[0]()
fns[1]()

View File

@ -1,12 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
def f(x):
print(x)
@kernel
def entrypoint():
f("foo")
f(42)

View File

@ -1,22 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
class a:
@kernel
def f(self):
print(self.x)
return None
class b(a):
x = 1
class c(a):
x = 2
bi = b()
ci = c()
@kernel
def entrypoint():
bi.f()
ci.f()

View File

@ -1,22 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
class ClassA:
def __init__(self):
self.foo = False
class ClassB:
kernel_invariants = {"bar"}
def __init__(self):
self.bar = ClassA()
obj = ClassB()
@kernel
def entrypoint():
obj.bar.foo = True

View File

@ -1,26 +0,0 @@
# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding +compile %s
# RUN: OutputCheck %s --file-to-check=%t.ll
from artiq.language.core import *
from artiq.language.types import *
class Class:
kernel_invariants = {"foo"}
def __init__(self):
self.foo = True
@kernel
def run(self):
if self.foo:
print("bar")
else:
# Make sure all the code for this branch will be completely elided:
# CHECK-NOT: baz
print("baz")
obj = Class()
@kernel
def entrypoint():
obj.run()

View File

@ -1,31 +0,0 @@
# RUN: env ARTIQ_DUMP_LLVM=%t %python -m artiq.compiler.testbench.embedding %s
# RUN: OutputCheck %s --file-to-check=%t.ll
from artiq.language.core import *
from artiq.language.types import *
import numpy
@kernel
def entrypoint():
# LLVM's constant folding for transcendental functions is good enough that
# we can do a basic smoke test by just making sure the module compiles and
# all assertions are statically eliminated.
# CHECK-NOT: assert
assert numpy.sin(0.0) == 0.0
assert numpy.cos(0.0) == 1.0
assert numpy.exp(0.0) == 1.0
assert numpy.exp2(1.0) == 2.0
assert numpy.log(numpy.exp(1.0)) == 1.0
assert numpy.log10(10.0) == 1.0
assert numpy.log2(2.0) == 1.0
assert numpy.fabs(-1.0) == 1.0
assert numpy.floor(42.5) == 42.0
assert numpy.ceil(42.5) == 43.0
assert numpy.trunc(41.5) == 41.0
assert numpy.rint(41.5) == 42.0
assert numpy.tan(0.0) == 0.0
assert numpy.arcsin(0.0) == 0.0
assert numpy.arccos(1.0) == 0.0
assert numpy.arctan(0.0) == 0.0
assert numpy.arctan2(0.0, 1.0) == 0.0

View File

@ -1,22 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
class a:
def foo(self, x):
print(x)
class b:
def __init__(self):
self.obj = a()
self.meth = self.obj.foo
@kernel
def run(self):
self.meth(1)
bi = b()
@kernel
def entrypoint():
bi.run()

View File

@ -1,11 +0,0 @@
# RUN: %python -m artiq.compiler.testbench.embedding %s
from artiq.language.core import *
from artiq.language.types import *
import time, os
@kernel
def entrypoint():
time.sleep(10)
os.mkdir("foo")

Some files were not shown because too many files have changed in this diff Show More