mirror of
https://github.com/m-labs/artiq.git
synced 2024-12-28 12:48:26 +08:00
Merge commit 'd0b5c3ba7fb' into new-py2llvm
This commit is contained in:
commit
da622937f6
@ -166,9 +166,17 @@ fi
|
||||
|
||||
set +e
|
||||
xc3sprog -c $CABLE -R > /dev/null 2>&1
|
||||
if [ "$?" != "0" ]
|
||||
STATUS=$?
|
||||
set -e
|
||||
if [ "$STATUS" == "127" ]
|
||||
then
|
||||
echo "Flashing failed. Maybe you do not have permission to access the USB device?"
|
||||
echo "xc3sprog not found. Please install it or check your PATH."
|
||||
exit
|
||||
fi
|
||||
if [ "$STATUS" != "0" ]
|
||||
then
|
||||
echo "Failed to connect to FPGA."
|
||||
echo "Maybe you do not have permission to access the USB device?"
|
||||
echo "To fix this you might want to add a udev rule by doing:"
|
||||
echo "$ sudo cp $ARTIQ_PREFIX/misc/$UDEV_RULES /etc/udev/rules.d"
|
||||
echo "Then unplug/replug your device and try flashing again"
|
||||
@ -177,7 +185,6 @@ then
|
||||
echo "Please make sure you used the correct -t option (currently: $BOARD)"
|
||||
exit
|
||||
fi
|
||||
set -e
|
||||
|
||||
if [ ! -z "$FILENAME" ]
|
||||
then
|
||||
|
@ -83,7 +83,12 @@ def main():
|
||||
"log": log_worker
|
||||
}
|
||||
scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend)
|
||||
worker_handlers["scheduler_submit"] = scheduler.submit
|
||||
worker_handlers.update({
|
||||
"scheduler_submit": scheduler.submit,
|
||||
"scheduler_delete": scheduler.delete,
|
||||
"scheduler_request_termination": scheduler.request_termination,
|
||||
"scheduler_get_status": scheduler.get_status
|
||||
})
|
||||
scheduler.start()
|
||||
atexit.register(lambda: loop.run_until_complete(scheduler.stop()))
|
||||
|
||||
|
@ -38,20 +38,28 @@ class ELFRunner(EnvExperiment):
|
||||
|
||||
class DummyScheduler:
|
||||
def __init__(self):
|
||||
self.next_rid = 0
|
||||
self.rid = 0
|
||||
self.pipeline_name = "main"
|
||||
self.priority = 0
|
||||
self.expid = None
|
||||
|
||||
self._next_rid = 1
|
||||
|
||||
def submit(self, pipeline_name, expid, priority, due_date, flush):
|
||||
rid = self.next_rid
|
||||
self.next_rid += 1
|
||||
rid = self._next_rid
|
||||
self._next_rid += 1
|
||||
logger.info("Submitting: %s, RID=%s", expid, rid)
|
||||
return rid
|
||||
|
||||
def delete(self, rid):
|
||||
logger.info("Deleting RID %s", rid)
|
||||
|
||||
def request_termination(self, rid):
|
||||
logger.info("Requesting termination of RID %s", rid)
|
||||
|
||||
def get_status(self):
|
||||
return dict()
|
||||
|
||||
def pause(self):
|
||||
pass
|
||||
|
||||
|
@ -58,7 +58,7 @@ class DatasetsDock(dockarea.Dock):
|
||||
self.search = QtGui.QLineEdit()
|
||||
self.search.setPlaceholderText("search...")
|
||||
self.search.editingFinished.connect(self._search_datasets)
|
||||
grid.addWidget(self.search, 0, )
|
||||
grid.addWidget(self.search, 0, 0)
|
||||
|
||||
self.table = QtGui.QTableView()
|
||||
self.table.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
|
||||
|
@ -368,12 +368,9 @@ class ExplorerDock(dockarea.Dock):
|
||||
arguments = self.argeditor_states[key]["argument_values"]
|
||||
except KeyError:
|
||||
arguments = dict()
|
||||
asyncio.ensure_future(self.submit_task(self.pipeline.text(),
|
||||
expinfo["file"],
|
||||
asyncio.ensure_future(self.submit_task(pipeline, expinfo["file"],
|
||||
expinfo["class_name"],
|
||||
arguments,
|
||||
priority,
|
||||
due_date,
|
||||
arguments, priority, due_date,
|
||||
flush))
|
||||
|
||||
def submit_clicked(self):
|
||||
|
198
artiq/gui/log.py
198
artiq/gui/log.py
@ -6,7 +6,6 @@ from quamash import QtGui, QtCore
|
||||
from pyqtgraph import dockarea, LayoutWidget
|
||||
|
||||
from artiq.protocols.sync_struct import Subscriber
|
||||
from artiq.gui.tools import ListSyncModel
|
||||
|
||||
try:
|
||||
QSortFilterProxyModel = QtCore.QSortFilterProxyModel
|
||||
@ -26,11 +25,19 @@ def _level_to_name(level):
|
||||
return "DEBUG"
|
||||
|
||||
|
||||
class _LogModel(ListSyncModel):
|
||||
class _LogModel(QtCore.QAbstractTableModel):
|
||||
def __init__(self, parent, init):
|
||||
ListSyncModel.__init__(self,
|
||||
["Level", "Source", "Time", "Message"],
|
||||
parent, init)
|
||||
QtCore.QAbstractTableModel.__init__(self, parent)
|
||||
|
||||
self.headers = ["Level", "Source", "Time", "Message"]
|
||||
|
||||
self.entries = init
|
||||
self.pending_entries = []
|
||||
self.depth = 1000
|
||||
timer = QtCore.QTimer(self)
|
||||
timer.timeout.connect(self.timer_tick)
|
||||
timer.start(100)
|
||||
|
||||
self.fixed_font = QtGui.QFont()
|
||||
self.fixed_font.setFamily("Monospace")
|
||||
|
||||
@ -40,53 +47,110 @@ class _LogModel(ListSyncModel):
|
||||
self.warning_bg = QtGui.QBrush(QtGui.QColor(255, 255, 180))
|
||||
self.error_bg = QtGui.QBrush(QtGui.QColor(255, 150, 150))
|
||||
|
||||
def headerData(self, col, orientation, role):
|
||||
if (orientation == QtCore.Qt.Horizontal
|
||||
and role == QtCore.Qt.DisplayRole):
|
||||
return self.headers[col]
|
||||
return None
|
||||
|
||||
def rowCount(self, parent):
|
||||
return len(self.entries)
|
||||
|
||||
def columnCount(self, parent):
|
||||
return len(self.headers)
|
||||
|
||||
def __delitem__(self, k):
|
||||
pass
|
||||
|
||||
def append(self, v):
|
||||
self.pending_entries.append(v)
|
||||
|
||||
def insertRows(self, position, rows=1, index=QtCore.QModelIndex()):
|
||||
self.beginInsertRows(QtCore.QModelIndex(), position, position+rows-1)
|
||||
self.endInsertRows()
|
||||
|
||||
def removeRows(self, position, rows=1, index=QtCore.QModelIndex()):
|
||||
self.beginRemoveRows(QtCore.QModelIndex(), position, position+rows-1)
|
||||
self.endRemoveRows()
|
||||
|
||||
def timer_tick(self):
|
||||
if not self.pending_entries:
|
||||
return
|
||||
nrows = len(self.entries)
|
||||
records = self.pending_entries
|
||||
self.pending_entries = []
|
||||
self.entries.extend(records)
|
||||
self.insertRows(nrows, len(records))
|
||||
if len(self.entries) > self.depth:
|
||||
start = len(self.entries) - self.depth
|
||||
self.entries = self.entries[start:]
|
||||
self.removeRows(0, start)
|
||||
|
||||
def data(self, index, role):
|
||||
if (role == QtCore.Qt.FontRole and index.isValid()
|
||||
and index.column() == 3):
|
||||
return self.fixed_font
|
||||
elif role == QtCore.Qt.BackgroundRole and index.isValid():
|
||||
level = self.backing_store[index.row()][0]
|
||||
if level >= logging.ERROR:
|
||||
return self.error_bg
|
||||
elif level >= logging.WARNING:
|
||||
return self.warning_bg
|
||||
else:
|
||||
return self.white
|
||||
elif role == QtCore.Qt.ForegroundRole and index.isValid():
|
||||
level = self.backing_store[index.row()][0]
|
||||
if level <= logging.DEBUG:
|
||||
return self.debug_fg
|
||||
else:
|
||||
return self.black
|
||||
else:
|
||||
return ListSyncModel.data(self, index, role)
|
||||
|
||||
def convert(self, v, column):
|
||||
if column == 0:
|
||||
return _level_to_name(v[0])
|
||||
elif column == 1:
|
||||
return v[1]
|
||||
elif column == 2:
|
||||
return time.strftime("%m/%d %H:%M:%S", time.localtime(v[2]))
|
||||
else:
|
||||
return v[3]
|
||||
if index.isValid():
|
||||
if (role == QtCore.Qt.FontRole
|
||||
and index.column() == 3):
|
||||
return self.fixed_font
|
||||
elif role == QtCore.Qt.BackgroundRole:
|
||||
level = self.entries[index.row()][0]
|
||||
if level >= logging.ERROR:
|
||||
return self.error_bg
|
||||
elif level >= logging.WARNING:
|
||||
return self.warning_bg
|
||||
else:
|
||||
return self.white
|
||||
elif role == QtCore.Qt.ForegroundRole:
|
||||
level = self.entries[index.row()][0]
|
||||
if level <= logging.DEBUG:
|
||||
return self.debug_fg
|
||||
else:
|
||||
return self.black
|
||||
elif role == QtCore.Qt.DisplayRole:
|
||||
v = self.entries[index.row()]
|
||||
column = index.column()
|
||||
if column == 0:
|
||||
return _level_to_name(v[0])
|
||||
elif column == 1:
|
||||
return v[1]
|
||||
elif column == 2:
|
||||
return time.strftime("%m/%d %H:%M:%S", time.localtime(v[2]))
|
||||
else:
|
||||
return v[3]
|
||||
|
||||
|
||||
class _LevelFilterProxyModel(QSortFilterProxyModel):
|
||||
def __init__(self, min_level):
|
||||
class _LogFilterProxyModel(QSortFilterProxyModel):
|
||||
def __init__(self, min_level, freetext):
|
||||
QSortFilterProxyModel.__init__(self)
|
||||
self.min_level = min_level
|
||||
self.freetext = freetext
|
||||
|
||||
def filterAcceptsRow(self, sourceRow, sourceParent):
|
||||
model = self.sourceModel()
|
||||
|
||||
index = model.index(sourceRow, 0, sourceParent)
|
||||
data = model.data(index, QtCore.Qt.DisplayRole)
|
||||
return getattr(logging, data) >= self.min_level
|
||||
accepted_level = getattr(logging, data) >= self.min_level
|
||||
|
||||
if self.freetext:
|
||||
index = model.index(sourceRow, 1, sourceParent)
|
||||
data_source = model.data(index, QtCore.Qt.DisplayRole)
|
||||
index = model.index(sourceRow, 3, sourceParent)
|
||||
data_message = model.data(index, QtCore.Qt.DisplayRole)
|
||||
accepted_freetext = (self.freetext in data_source
|
||||
or self.freetext in data_message)
|
||||
else:
|
||||
accepted_freetext = True
|
||||
|
||||
return accepted_level and accepted_freetext
|
||||
|
||||
def set_min_level(self, min_level):
|
||||
self.min_level = min_level
|
||||
self.invalidateFilter()
|
||||
|
||||
def set_freetext(self, freetext):
|
||||
self.freetext = freetext
|
||||
self.invalidateFilter()
|
||||
|
||||
|
||||
class LogDock(dockarea.Dock):
|
||||
def __init__(self):
|
||||
@ -96,14 +160,17 @@ class LogDock(dockarea.Dock):
|
||||
self.addWidget(grid)
|
||||
|
||||
grid.addWidget(QtGui.QLabel("Minimum level: "), 0, 0)
|
||||
grid.layout.setColumnStretch(0, 0)
|
||||
grid.layout.setColumnStretch(1, 0)
|
||||
grid.layout.setColumnStretch(2, 1)
|
||||
self.filterbox = QtGui.QComboBox()
|
||||
self.filterbox.addItems(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
|
||||
self.filterbox.setToolTip("Display entries at or above this level")
|
||||
grid.addWidget(self.filterbox, 0, 1)
|
||||
self.filterbox.currentIndexChanged.connect(self.filter_changed)
|
||||
self.filter_level = QtGui.QComboBox()
|
||||
self.filter_level.addItems(["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"])
|
||||
self.filter_level.setToolTip("Display entries at or above this level")
|
||||
grid.addWidget(self.filter_level, 0, 1)
|
||||
self.filter_level.currentIndexChanged.connect(
|
||||
self.filter_level_changed)
|
||||
self.filter_freetext = QtGui.QLineEdit()
|
||||
self.filter_freetext.setPlaceholderText("freetext filter...")
|
||||
self.filter_freetext.editingFinished.connect(
|
||||
self.filter_freetext_changed)
|
||||
grid.addWidget(self.filter_freetext, 0, 2)
|
||||
|
||||
self.log = QtGui.QTableView()
|
||||
self.log.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
|
||||
@ -113,7 +180,7 @@ class LogDock(dockarea.Dock):
|
||||
QtGui.QAbstractItemView.ScrollPerPixel)
|
||||
self.log.setShowGrid(False)
|
||||
self.log.setTextElideMode(QtCore.Qt.ElideNone)
|
||||
grid.addWidget(self.log, 1, 0, colspan=3)
|
||||
grid.addWidget(self.log, 1, 0, colspan=4)
|
||||
self.scroll_at_bottom = False
|
||||
|
||||
async def sub_connect(self, host, port):
|
||||
@ -123,30 +190,49 @@ class LogDock(dockarea.Dock):
|
||||
async def sub_close(self):
|
||||
await self.subscriber.close()
|
||||
|
||||
def filter_changed(self):
|
||||
def filter_level_changed(self):
|
||||
self.table_model_filter.set_min_level(
|
||||
getattr(logging, self.filterbox.currentText()))
|
||||
getattr(logging, self.filter_level.currentText()))
|
||||
|
||||
def filter_freetext_changed(self):
|
||||
self.table_model_filter.set_freetext(self.filter_freetext.text())
|
||||
|
||||
def rows_inserted_before(self):
|
||||
scrollbar = self.log.verticalScrollBar()
|
||||
self.scroll_at_bottom = scrollbar.value() == scrollbar.maximum()
|
||||
self.scroll_value = scrollbar.value()
|
||||
self.scroll_at_bottom = self.scroll_value == scrollbar.maximum()
|
||||
|
||||
def rows_inserted_after(self):
|
||||
if self.scroll_at_bottom:
|
||||
self.log.scrollToBottom()
|
||||
|
||||
# HACK:
|
||||
# Qt intermittently likes to scroll back to the top when rows are removed.
|
||||
# Work around this by restoring the scrollbar to the previously memorized
|
||||
# position, after the removal.
|
||||
# Note that this works because _LogModel always does the insertion right
|
||||
# before the removal.
|
||||
def rows_removed(self):
|
||||
if self.scroll_at_bottom:
|
||||
self.log.scrollToBottom()
|
||||
else:
|
||||
scrollbar = self.log.verticalScrollBar()
|
||||
scrollbar.setValue(self.scroll_value)
|
||||
|
||||
def init_log_model(self, init):
|
||||
table_model = _LogModel(self.log, init)
|
||||
self.table_model_filter = _LevelFilterProxyModel(
|
||||
getattr(logging, self.filterbox.currentText()))
|
||||
self.table_model_filter.setSourceModel(table_model)
|
||||
self.table_model = _LogModel(self.log, init)
|
||||
self.table_model_filter = _LogFilterProxyModel(
|
||||
getattr(logging, self.filter_level.currentText()),
|
||||
self.filter_freetext.text())
|
||||
self.table_model_filter.setSourceModel(self.table_model)
|
||||
self.log.setModel(self.table_model_filter)
|
||||
self.table_model_filter.rowsAboutToBeInserted.connect(self.rows_inserted_before)
|
||||
self.table_model_filter.rowsInserted.connect(self.rows_inserted_after)
|
||||
return table_model
|
||||
self.table_model_filter.rowsRemoved.connect(self.rows_removed)
|
||||
return self.table_model
|
||||
|
||||
def save_state(self):
|
||||
return {"min_level_idx": self.filterbox.currentIndex()}
|
||||
return {"min_level_idx": self.filter_level.currentIndex()}
|
||||
|
||||
def restore_state(self, state):
|
||||
try:
|
||||
@ -154,4 +240,4 @@ class LogDock(dockarea.Dock):
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self.filterbox.setCurrentIndex(idx)
|
||||
self.filter_level.setCurrentIndex(idx)
|
||||
|
@ -412,6 +412,7 @@ class Scheduler:
|
||||
logger.warning("some pipelines were not garbage-collected")
|
||||
|
||||
def submit(self, pipeline_name, expid, priority, due_date, flush):
|
||||
"""Submits a new run."""
|
||||
# mutates expid to insert head repository revision if None
|
||||
if self._terminated:
|
||||
return
|
||||
@ -427,9 +428,11 @@ class Scheduler:
|
||||
return pipeline.pool.submit(expid, priority, due_date, flush, pipeline_name)
|
||||
|
||||
def delete(self, rid):
|
||||
"""Kills the run with the specified RID."""
|
||||
self._deleter.delete(rid)
|
||||
|
||||
def request_termination(self, rid):
|
||||
"""Requests graceful termination of the run with the specified RID."""
|
||||
for pipeline in self._pipelines.values():
|
||||
if rid in pipeline.pool.runs:
|
||||
run = pipeline.pool.runs[rid]
|
||||
@ -438,3 +441,8 @@ class Scheduler:
|
||||
else:
|
||||
self.delete(rid)
|
||||
break
|
||||
|
||||
def get_status(self):
|
||||
"""Returns a dictionary containing information about the runs currently
|
||||
tracked by the scheduler."""
|
||||
return self.notifier.read
|
||||
|
@ -95,9 +95,13 @@ class Scheduler:
|
||||
raise TerminationRequested
|
||||
|
||||
submit = staticmethod(make_parent_action("scheduler_submit"))
|
||||
cancel = staticmethod(make_parent_action("scheduler_cancel"))
|
||||
delete = staticmethod(make_parent_action("scheduler_delete"))
|
||||
request_termination = staticmethod(
|
||||
make_parent_action("scheduler_request_termination"))
|
||||
get_status = staticmethod(make_parent_action("scheduler_get_status"))
|
||||
|
||||
def set_run_info(self, pipeline_name, expid, priority):
|
||||
def set_run_info(self, rid, pipeline_name, expid, priority):
|
||||
self.rid = rid
|
||||
self.pipeline_name = pipeline_name
|
||||
self.expid = expid
|
||||
self.priority = priority
|
||||
@ -182,7 +186,7 @@ def main():
|
||||
expf = expid["file"]
|
||||
exp = get_exp(expf, expid["class_name"])
|
||||
device_mgr.virtual_devices["scheduler"].set_run_info(
|
||||
obj["pipeline_name"], expid, obj["priority"])
|
||||
rid, obj["pipeline_name"], expid, obj["priority"])
|
||||
exp_inst = exp(device_mgr, dataset_mgr,
|
||||
**expid["arguments"])
|
||||
put_object({"action": "completed"})
|
||||
|
@ -50,9 +50,14 @@ Then you can install the ARTIQ package, it will pull all the necessary dependenc
|
||||
$ ENV=$(date +artiq-%Y-%m-%d); conda create -n $ENV artiq-pipistrello-nist_qc1; \
|
||||
echo "Created environment $ENV for ARTIQ"
|
||||
|
||||
* For the KC705 board::
|
||||
* For the KC705 board with SCSI cables and AD9858 DDS chips::
|
||||
|
||||
$ ENV=$(date +artiq-%Y-%m-%d); conda create -n $ENV artiq-kc705-nist_qc1 artiq-kc705-nist_qc2; \
|
||||
$ ENV=$(date +artiq-%Y-%m-%d); conda create -n $ENV artiq-kc705-nist_qc1; \
|
||||
echo "Created environment $ENV for ARTIQ"
|
||||
|
||||
* For the KC705 board with the FMC backplane and AD9914 DDS chips::
|
||||
|
||||
$ ENV=$(date +artiq-%Y-%m-%d); conda create -n $ENV artiq-kc705-nist_qc2; \
|
||||
echo "Created environment $ENV for ARTIQ"
|
||||
|
||||
This creates a new Conda "environment" (i.e. an isolated installation) and prints its name.
|
||||
|
@ -111,8 +111,18 @@ Push commits containing experiments to the bare repository using e.g. Git over S
|
||||
|
||||
The GUI always runs experiments from the repository. The command-line client, by default, runs experiment from the raw filesystem (which is useful for iterating rapidly without creating many disorganized commits). If you want to use the repository instead, simply pass the ``-R`` option.
|
||||
|
||||
Reference
|
||||
*********
|
||||
Scheduler API reference
|
||||
***********************
|
||||
|
||||
The scheduler is exposed to the experiments via a virtual device called ``scheduler``. It can be requested like any regular device, and then the methods below can be called on the returned object.
|
||||
|
||||
The scheduler virtual device also contains the attributes ``rid``, ``pipeline_name``, ``priority`` and ``expid`` that contain the corresponding information about the current run.
|
||||
|
||||
.. autoclass:: artiq.master.scheduler.Scheduler
|
||||
:members:
|
||||
|
||||
Front-end tool reference
|
||||
************************
|
||||
|
||||
.. argparse::
|
||||
:ref: artiq.frontend.artiq_master.get_argparser
|
||||
|
13
examples/master/repository/blink_forever.py
Normal file
13
examples/master/repository/blink_forever.py
Normal file
@ -0,0 +1,13 @@
|
||||
from artiq import *
|
||||
|
||||
|
||||
class BlinkForever(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("core")
|
||||
self.setattr_device("led")
|
||||
|
||||
@kernel
|
||||
def run(self):
|
||||
while True:
|
||||
self.led.pulse(100*ms)
|
||||
delay(100*ms)
|
17
examples/master/repository/terminate_all.py
Normal file
17
examples/master/repository/terminate_all.py
Normal file
@ -0,0 +1,17 @@
|
||||
from artiq import *
|
||||
|
||||
|
||||
class TerminateAll(EnvExperiment):
|
||||
def build(self):
|
||||
self.setattr_device("scheduler")
|
||||
self.setattr_argument("graceful_termination", BooleanValue(True))
|
||||
|
||||
def run(self):
|
||||
if self.graceful_termination:
|
||||
terminate = self.scheduler.request_termination
|
||||
else:
|
||||
terminate = self.scheduler.delete
|
||||
|
||||
for rid in self.scheduler.get_status().keys():
|
||||
if rid != self.scheduler.rid:
|
||||
terminate(rid)
|
@ -66,10 +66,6 @@ void bridge_main(void)
|
||||
mailbox_acknowledge();
|
||||
break;
|
||||
}
|
||||
case MESSAGE_TYPE_BRG_DDS_INITALL:
|
||||
dds_init_all();
|
||||
mailbox_acknowledge();
|
||||
break;
|
||||
case MESSAGE_TYPE_BRG_DDS_SEL: {
|
||||
struct msg_brg_dds_sel *msg;
|
||||
|
||||
|
@ -23,11 +23,6 @@ void brg_start(void)
|
||||
}
|
||||
}
|
||||
|
||||
void brg_stop(void)
|
||||
{
|
||||
kloader_stop();
|
||||
}
|
||||
|
||||
void brg_ttloe(int n, int value)
|
||||
{
|
||||
struct msg_brg_ttl_out msg;
|
||||
@ -48,14 +43,6 @@ void brg_ttlo(int n, int value)
|
||||
mailbox_send_and_wait(&msg);
|
||||
}
|
||||
|
||||
void brg_ddsinitall(void)
|
||||
{
|
||||
struct msg_base msg;
|
||||
|
||||
msg.type = MESSAGE_TYPE_BRG_DDS_INITALL;
|
||||
mailbox_send_and_wait(&msg);
|
||||
}
|
||||
|
||||
void brg_ddssel(int channel)
|
||||
{
|
||||
struct msg_brg_dds_sel msg;
|
||||
|
@ -2,12 +2,10 @@
|
||||
#define __BRIDGE_CTL_H
|
||||
|
||||
void brg_start(void);
|
||||
void brg_stop(void);
|
||||
|
||||
void brg_ttloe(int n, int value);
|
||||
void brg_ttlo(int n, int value);
|
||||
|
||||
void brg_ddsinitall(void);
|
||||
void brg_ddssel(int channel);
|
||||
void brg_ddsreset(void);
|
||||
unsigned int brg_ddsread(unsigned int address);
|
||||
|
@ -62,7 +62,7 @@ int watchdog_set(int ms)
|
||||
break;
|
||||
}
|
||||
if(id < 0) {
|
||||
log("Failed to add watchdog");
|
||||
log("WARNING: Failed to add watchdog");
|
||||
return id;
|
||||
}
|
||||
|
||||
|
@ -33,19 +33,6 @@
|
||||
now += DURATION_WRITE; \
|
||||
} while(0)
|
||||
|
||||
void dds_init_all(void)
|
||||
{
|
||||
int i;
|
||||
long long int now;
|
||||
|
||||
now = rtio_get_counter() + 10000;
|
||||
for(i=0;i<DDS_CHANNEL_COUNT;i++) {
|
||||
dds_init(now, i);
|
||||
now += DURATION_INIT + DURATION_WRITE; /* + FUD time */
|
||||
}
|
||||
while(rtio_get_counter() < now);
|
||||
}
|
||||
|
||||
void dds_init(long long int timestamp, int channel)
|
||||
{
|
||||
long long int now;
|
||||
|
@ -54,7 +54,6 @@ enum {
|
||||
PHASE_MODE_TRACKING = 2
|
||||
};
|
||||
|
||||
void dds_init_all(void);
|
||||
void dds_init(long long int timestamp, int channel);
|
||||
void dds_batch_enter(long long int timestamp);
|
||||
void dds_batch_exit(void);
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include "kloader.h"
|
||||
#include "log.h"
|
||||
#include "clock.h"
|
||||
#include "flash_storage.h"
|
||||
#include "mailbox.h"
|
||||
#include "messages.h"
|
||||
@ -87,22 +88,37 @@ void kloader_start_kernel()
|
||||
load_or_start_kernel(NULL, 1);
|
||||
}
|
||||
|
||||
int kloader_start_idle_kernel(void)
|
||||
static int kloader_start_flash_kernel(char *key)
|
||||
{
|
||||
#if (defined CSR_SPIFLASH_BASE && defined SPIFLASH_PAGE_SIZE)
|
||||
char buffer[32*1024];
|
||||
int length;
|
||||
int length, remain;
|
||||
|
||||
length = fs_read("idle_kernel", buffer, sizeof(buffer), NULL);
|
||||
length = fs_read(key, buffer, sizeof(buffer), &remain);
|
||||
if(length <= 0)
|
||||
return 0;
|
||||
|
||||
if(remain) {
|
||||
log("ERROR: kernel %s is too large", key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return load_or_start_kernel(buffer, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int kloader_start_startup_kernel(void)
|
||||
{
|
||||
return kloader_start_flash_kernel("startup_kernel");
|
||||
}
|
||||
|
||||
int kloader_start_idle_kernel(void)
|
||||
{
|
||||
return kloader_start_flash_kernel("idle_kernel");
|
||||
}
|
||||
|
||||
void kloader_stop(void)
|
||||
{
|
||||
kernel_cpu_reset_write(1);
|
||||
@ -125,6 +141,8 @@ int kloader_is_essential_kmsg(int msgtype)
|
||||
case MESSAGE_TYPE_NOW_INIT_REQUEST:
|
||||
case MESSAGE_TYPE_NOW_SAVE:
|
||||
case MESSAGE_TYPE_LOG:
|
||||
case MESSAGE_TYPE_WATCHDOG_SET_REQUEST:
|
||||
case MESSAGE_TYPE_WATCHDOG_CLEAR:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
@ -168,6 +186,22 @@ void kloader_service_essential_kmsg(void)
|
||||
mailbox_acknowledge();
|
||||
break;
|
||||
}
|
||||
case MESSAGE_TYPE_WATCHDOG_SET_REQUEST: {
|
||||
struct msg_watchdog_set_request *msg = (struct msg_watchdog_set_request *)umsg;
|
||||
struct msg_watchdog_set_reply reply;
|
||||
|
||||
reply.type = MESSAGE_TYPE_WATCHDOG_SET_REPLY;
|
||||
reply.id = watchdog_set(msg->ms);
|
||||
mailbox_send_and_wait(&reply);
|
||||
break;
|
||||
}
|
||||
case MESSAGE_TYPE_WATCHDOG_CLEAR: {
|
||||
struct msg_watchdog_clear *msg = (struct msg_watchdog_clear *)umsg;
|
||||
|
||||
watchdog_clear(msg->id);
|
||||
mailbox_acknowledge();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
/* handled elsewhere */
|
||||
break;
|
||||
|
@ -15,6 +15,7 @@ void kloader_filter_backtrace(struct artiq_backtrace_item *backtrace,
|
||||
size_t *backtrace_size);
|
||||
|
||||
void kloader_start_bridge(void);
|
||||
int kloader_start_startup_kernel(void);
|
||||
int kloader_start_idle_kernel(void);
|
||||
void kloader_start_kernel(void);
|
||||
void kloader_stop(void);
|
||||
|
@ -30,13 +30,6 @@
|
||||
#include "session.h"
|
||||
#include "moninj.h"
|
||||
|
||||
static void common_init(void)
|
||||
{
|
||||
brg_start();
|
||||
brg_ddsinitall();
|
||||
kloader_stop();
|
||||
}
|
||||
|
||||
#ifdef CSR_ETHMAC_BASE
|
||||
|
||||
u32_t sys_now(void)
|
||||
@ -261,7 +254,7 @@ int main(void)
|
||||
test_main();
|
||||
} else {
|
||||
puts("Entering regular mode.");
|
||||
common_init();
|
||||
session_startup_kernel();
|
||||
regular_main();
|
||||
}
|
||||
return 0;
|
||||
|
@ -22,7 +22,6 @@ enum {
|
||||
MESSAGE_TYPE_BRG_READY,
|
||||
MESSAGE_TYPE_BRG_TTL_O,
|
||||
MESSAGE_TYPE_BRG_TTL_OE,
|
||||
MESSAGE_TYPE_BRG_DDS_INITALL,
|
||||
MESSAGE_TYPE_BRG_DDS_SEL,
|
||||
MESSAGE_TYPE_BRG_DDS_RESET,
|
||||
MESSAGE_TYPE_BRG_DDS_READ_REQUEST,
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include <stdio.h>
|
||||
#include <generated/csr.h>
|
||||
|
||||
#include "log.h"
|
||||
#include "clock.h"
|
||||
#include "flash_storage.h"
|
||||
#include "rtiocrg.h"
|
||||
@ -17,17 +17,17 @@ void rtiocrg_init(void)
|
||||
clk = 0;
|
||||
fs_read("startup_clock", &b, 1, NULL);
|
||||
if(b == 'i')
|
||||
printf("Startup RTIO clock: internal\n");
|
||||
log("Startup RTIO clock: internal");
|
||||
else if(b == 'e') {
|
||||
printf("Startup RTIO clock: external\n");
|
||||
log("Startup RTIO clock: external");
|
||||
clk = 1;
|
||||
} else
|
||||
printf("WARNING: unknown startup_clock entry in flash storage\n");
|
||||
log("ERROR: unrecognized startup_clock entry in flash storage");
|
||||
|
||||
if(!rtiocrg_switch_clock(clk)) {
|
||||
printf("WARNING: startup RTIO clock failed\n");
|
||||
printf("WARNING: this may cause the system initialization to fail\n");
|
||||
printf("WARNING: fix clocking and reset the device\n");
|
||||
log("ERROR: startup RTIO clock failed");
|
||||
log("WARNING: this may cause the system initialization to fail");
|
||||
log("WARNING: fix clocking and reset the device");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -290,6 +290,44 @@ enum {
|
||||
USER_KERNEL_WAIT_RPC /* < must come after _RUNNING */
|
||||
};
|
||||
|
||||
void session_startup_kernel(void)
|
||||
{
|
||||
struct msg_base *umsg;
|
||||
|
||||
now = -1;
|
||||
watchdog_init();
|
||||
if(!kloader_start_startup_kernel())
|
||||
return;
|
||||
|
||||
while(1) {
|
||||
kloader_service_essential_kmsg();
|
||||
|
||||
umsg = mailbox_receive();
|
||||
if(umsg) {
|
||||
if(!kloader_validate_kpointer(umsg))
|
||||
break;
|
||||
if(kloader_is_essential_kmsg(umsg->type))
|
||||
continue;
|
||||
if(umsg->type == MESSAGE_TYPE_FINISHED)
|
||||
break;
|
||||
else if(umsg->type == MESSAGE_TYPE_EXCEPTION) {
|
||||
log("WARNING: startup kernel ended with exception");
|
||||
break;
|
||||
} else {
|
||||
log("ERROR: received invalid message type from kernel CPU");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(watchdog_expired()) {
|
||||
log("WARNING: watchdog expired in startup kernel");
|
||||
break;
|
||||
}
|
||||
}
|
||||
kloader_stop();
|
||||
log("Startup kernel terminated");
|
||||
}
|
||||
|
||||
void session_start(void)
|
||||
{
|
||||
in_packet_reset();
|
||||
@ -304,6 +342,7 @@ void session_end(void)
|
||||
{
|
||||
kloader_stop();
|
||||
now = -1;
|
||||
watchdog_init();
|
||||
kloader_start_idle_kernel();
|
||||
}
|
||||
|
||||
@ -914,24 +953,6 @@ static int process_kmsg(struct msg_base *umsg)
|
||||
break;
|
||||
}
|
||||
|
||||
case MESSAGE_TYPE_WATCHDOG_SET_REQUEST: {
|
||||
struct msg_watchdog_set_request *msg = (struct msg_watchdog_set_request *)umsg;
|
||||
struct msg_watchdog_set_reply reply;
|
||||
|
||||
reply.type = MESSAGE_TYPE_WATCHDOG_SET_REPLY;
|
||||
reply.id = watchdog_set(msg->ms);
|
||||
mailbox_send_and_wait(&reply);
|
||||
break;
|
||||
}
|
||||
|
||||
case MESSAGE_TYPE_WATCHDOG_CLEAR: {
|
||||
struct msg_watchdog_clear *msg = (struct msg_watchdog_clear *)umsg;
|
||||
|
||||
watchdog_clear(msg->id);
|
||||
mailbox_acknowledge();
|
||||
break;
|
||||
}
|
||||
|
||||
case MESSAGE_TYPE_RPC_SEND: {
|
||||
struct msg_rpc_send *msg = (struct msg_rpc_send *)umsg;
|
||||
|
||||
@ -946,10 +967,11 @@ static int process_kmsg(struct msg_base *umsg)
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
default: {
|
||||
log("Received invalid message type %d from kernel CPU",
|
||||
umsg->type);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef __SESSION_H
|
||||
#define __SESSION_H
|
||||
|
||||
void session_startup_kernel(void);
|
||||
void session_start(void);
|
||||
void session_end(void);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user